78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
113#define DEBUG_TYPE "isel"
121 cl::desc(
"Insert the experimental `assertalign` node."),
126 cl::desc(
"Generate low-precision inline sequences "
127 "for some float libcalls"),
133 cl::desc(
"Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
154 const SDValue *Parts,
unsigned NumParts,
157 std::optional<CallingConv::ID> CC);
166 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
173 PartVT, ValueVT, CC))
180 assert(NumParts > 0 &&
"No parts to assemble!");
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
198 if (RoundParts > 2) {
202 PartVT, HalfVT, V, InChain);
204 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
205 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
213 if (RoundParts < NumParts) {
215 unsigned OddParts = NumParts - RoundParts;
218 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
271 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
304 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
311 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
417 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
436 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
440 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
451 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
476 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
540 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
546 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
554 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
558 assert(NumParts == 1 && PartEVT != ValueVT);
559 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
568 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
580 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
630 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
631 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
664 }
else if (PartEVT != ValueEVT) {
679 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
690 std::optional<CallingConv::ID> CallConv) {
694 const bool IsABIRegCopy = CallConv.has_value();
697 EVT PartEVT = PartVT;
698 if (PartEVT == ValueVT) {
702 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
737 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
744 "lossy conversion of vector to scalar type");
759 unsigned NumIntermediates;
763 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
768 NumIntermediates, RegisterVT);
771 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
773 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
776 "Mixing scalable and fixed vectors when copying in parts");
778 std::optional<ElementCount> DestEltCnt;
788 if (ValueVT == BuiltVectorTy) {
792 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
812 for (
unsigned i = 0; i != NumIntermediates; ++i) {
827 if (NumParts == NumIntermediates) {
830 for (
unsigned i = 0; i != NumParts; ++i)
832 }
else if (NumParts > 0) {
835 assert(NumIntermediates != 0 &&
"division by zero");
836 assert(NumParts % NumIntermediates == 0 &&
837 "Must expand into a divisible number of parts!");
838 unsigned Factor = NumParts / NumIntermediates;
839 for (
unsigned i = 0; i != NumIntermediates; ++i)
847 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
851 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
854 OS << LS << U.getTagName();
857 Twine(
"cannot lower ", Name)
863 EVT valuevt, std::optional<CallingConv::ID> CC)
869 std::optional<CallingConv::ID> CC) {
883 for (
unsigned i = 0; i != NumRegs; ++i)
884 Regs.push_back(Reg + i);
885 RegVTs.push_back(RegisterVT);
887 Reg = Reg.id() + NumRegs;
914 for (
unsigned i = 0; i != NumRegs; ++i) {
920 *Glue =
P.getValue(2);
923 Chain =
P.getValue(1);
951 EVT FromVT(MVT::Other);
955 }
else if (NumSignBits > 1) {
963 assert(FromVT != MVT::Other);
969 RegisterVT, ValueVT, V, Chain,
CallConv);
985 unsigned NumRegs =
Regs.size();
999 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1005 for (
unsigned i = 0; i != NumRegs; ++i) {
1017 if (NumRegs == 1 || Glue)
1028 Chain = Chains[NumRegs-1];
1034 unsigned MatchingIdx,
const SDLoc &dl,
1036 std::vector<SDValue> &
Ops)
const {
1041 Flag.setMatchingOp(MatchingIdx);
1042 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1050 Flag.setRegClass(RC->
getID());
1061 "No 1:1 mapping from clobbers to regs?");
1064 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1069 "If we clobbered the stack pointer, MFI should know about it.");
1078 for (
unsigned i = 0; i != NumRegs; ++i) {
1079 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1091 unsigned RegCount = std::get<0>(CountAndVT);
1092 MVT RegisterVT = std::get<1>(CountAndVT);
1109 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1111 *
DAG.getMachineFunction().getFunction().getParent());
1116 UnusedArgNodeMap.clear();
1118 PendingExports.clear();
1119 PendingConstrainedFP.clear();
1120 PendingConstrainedFPStrict.clear();
1128 DanglingDebugInfoMap.clear();
1135 if (Pending.
empty())
1141 unsigned i = 0, e = Pending.
size();
1142 for (; i != e; ++i) {
1144 if (Pending[i].
getNode()->getOperand(0) == Root)
1152 if (Pending.
size() == 1)
1171 PendingConstrainedFP.size() +
1172 PendingConstrainedFPStrict.size());
1174 PendingConstrainedFP.end());
1175 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1176 PendingConstrainedFPStrict.end());
1177 PendingConstrainedFP.clear();
1178 PendingConstrainedFPStrict.clear();
1185 PendingExports.append(PendingConstrainedFPStrict.begin(),
1186 PendingConstrainedFPStrict.end());
1187 PendingConstrainedFPStrict.clear();
1188 return updateRoot(PendingExports);
1195 assert(Variable &&
"Missing variable");
1202 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1218 if (IsParameter && FINode) {
1220 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1221 true,
DL, SDNodeOrder);
1226 FuncArgumentDbgValueKind::Declare,
N);
1229 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1230 true,
DL, SDNodeOrder);
1232 DAG.AddDbgValue(SDV, IsParameter);
1237 FuncArgumentDbgValueKind::Declare,
N)) {
1239 <<
" (could not emit func-arg dbg_value)\n");
1250 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1252 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1254 if (It->Values.isKillLocation(It->Expr)) {
1260 It->Values.hasArgList())) {
1263 FnVarLocs->getDILocalVariable(It->VariableID),
1264 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1277 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1280 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1282 assert(DLR->getLabel() &&
"Missing label");
1284 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1285 DAG.AddDbgLabel(SDV);
1289 if (SkipDbgVariableRecords)
1297 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1299 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1308 if (Values.
empty()) {
1325 SDNodeOrder, IsVariadic)) {
1336 if (
I.isTerminator()) {
1337 HandlePHINodesInSuccessorBlocks(
I.getParent());
1344 bool NodeInserted =
false;
1345 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1346 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1347 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1348 if (PCSectionsMD || MMRA) {
1349 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1350 DAG, [&](
SDNode *) { NodeInserted =
true; });
1360 if (PCSectionsMD || MMRA) {
1361 auto It = NodeMap.find(&
I);
1362 if (It != NodeMap.end()) {
1364 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1366 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1367 }
else if (NodeInserted) {
1370 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1371 <<
I.getModule()->getName() <<
"]\n";
1380void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1390#define HANDLE_INST(NUM, OPCODE, CLASS) \
1391 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1392#include "llvm/IR/Instruction.def"
1404 for (
const Value *V : Values) {
1429 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1434 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1435 DIVariable *DanglingVariable = DDI.getVariable();
1437 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1439 << printDDI(
nullptr, DDI) <<
"\n");
1445 for (
auto &DDIMI : DanglingDebugInfoMap) {
1446 DanglingDebugInfoVector &DDIV = DDIMI.second;
1450 for (
auto &DDI : DDIV)
1451 if (isMatchingDbgValue(DDI))
1454 erase_if(DDIV, isMatchingDbgValue);
1462 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1463 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1466 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1467 for (
auto &DDI : DDIV) {
1470 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1474 "Expected inlined-at fields to agree");
1483 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1484 FuncArgumentDbgValueKind::Value, Val)) {
1486 << printDDI(V, DDI) <<
"\n");
1493 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1494 << ValSDNodeOrder <<
"\n");
1495 SDV = getDbgValue(Val, Variable, Expr,
DL,
1496 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1497 DAG.AddDbgValue(SDV,
false);
1501 <<
" in EmitFuncArgumentDbgValue\n");
1503 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1507 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1508 DAG.AddDbgValue(SDV,
false);
1515 DanglingDebugInfo &DDI) {
1520 const Value *OrigV = V;
1524 unsigned SDOrder = DDI.getSDNodeOrder();
1528 bool StackValue =
true;
1553 if (!AdditionalValues.
empty())
1563 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1564 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1572 assert(OrigV &&
"V shouldn't be null");
1574 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1575 DAG.AddDbgValue(SDV,
false);
1577 << printDDI(OrigV, DDI) <<
"\n");
1594 unsigned Order,
bool IsVariadic) {
1599 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1604 for (
const Value *V : Values) {
1614 if (CE->getOpcode() == Instruction::IntToPtr) {
1633 N = UnusedArgNodeMap[V];
1638 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1639 FuncArgumentDbgValueKind::Value,
N))
1666 bool IsParamOfFunc =
1674 auto VMI =
FuncInfo.ValueMap.find(V);
1675 if (VMI !=
FuncInfo.ValueMap.end()) {
1680 V->getType(), std::nullopt);
1686 unsigned BitsToDescribe = 0;
1688 BitsToDescribe = *VarSize;
1690 BitsToDescribe = Fragment->SizeInBits;
1693 if (
Offset >= BitsToDescribe)
1696 unsigned RegisterSize = RegAndSize.second;
1697 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1698 ? BitsToDescribe -
Offset
1701 Expr,
Offset, FragmentSize);
1705 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1706 DAG.AddDbgValue(SDV,
false);
1722 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1723 false, DbgLoc, Order, IsVariadic);
1724 DAG.AddDbgValue(SDV,
false);
1730 for (
auto &Pair : DanglingDebugInfoMap)
1731 for (
auto &DDI : Pair.second)
1742 if (It !=
FuncInfo.ValueMap.end()) {
1746 DAG.getDataLayout(), InReg, Ty,
1763 if (
N.getNode())
return N;
1823 return DAG.getSplatBuildVector(
1826 return DAG.getConstant(*CI,
DL, VT);
1835 getValue(CPA->getAddrDiscriminator()),
1836 getValue(CPA->getDiscriminator()));
1852 visit(CE->getOpcode(), *CE);
1854 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1860 for (
const Use &U :
C->operands()) {
1866 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1867 Constants.push_back(
SDValue(Val, i));
1876 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1880 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1889 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1891 "Unknown struct or array constant!");
1895 unsigned NumElts = ValueVTs.
size();
1899 for (
unsigned i = 0; i != NumElts; ++i) {
1900 EVT EltVT = ValueVTs[i];
1902 Constants[i] =
DAG.getUNDEF(EltVT);
1913 return DAG.getBlockAddress(BA, VT);
1916 return getValue(Equiv->getGlobalValue());
1921 if (VT == MVT::aarch64svcount) {
1922 assert(
C->isNullValue() &&
"Can only zero this target type!");
1928 assert(
C->isNullValue() &&
"Can only zero this target type!");
1945 for (
unsigned i = 0; i != NumElements; ++i)
1973 return DAG.getFrameIndex(
1982 Inst->getType(), std::nullopt);
1996void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2009 if (IsMSVCCXX || IsCoreCLR)
2015 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2016 FuncInfo.MBB->addSuccessor(TargetMBB);
2023 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2032 DAG.getMachineFunction().setHasEHContTarget(
true);
2038 Value *ParentPad =
I.getCatchSwitchParentPad();
2041 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2044 assert(SuccessorColor &&
"No parent funclet for catchret!");
2045 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2046 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2051 DAG.getBasicBlock(SuccessorColorMBB));
2055void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2061 FuncInfo.MBB->setIsEHFuncletEntry();
2062 FuncInfo.MBB->setIsCleanupFuncletEntry();
2091 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2097 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2098 UnwindDests.back().first->setIsEHScopeEntry();
2101 UnwindDests.back().first->setIsEHFuncletEntry();
2105 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2106 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2108 if (IsMSVCCXX || IsCoreCLR)
2109 UnwindDests.back().first->setIsEHFuncletEntry();
2111 UnwindDests.back().first->setIsEHScopeEntry();
2113 NewEHPadBB = CatchSwitch->getUnwindDest();
2119 if (BPI && NewEHPadBB)
2121 EHPadBB = NewEHPadBB;
2128 auto UnwindDest =
I.getUnwindDest();
2129 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2130 BranchProbability UnwindDestProb =
2135 for (
auto &UnwindDest : UnwindDests) {
2136 UnwindDest.first->setIsEHPad();
2137 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2139 FuncInfo.MBB->normalizeSuccProbs();
2142 MachineBasicBlock *CleanupPadMBB =
2143 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2149void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2153void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2154 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2155 auto &
DL =
DAG.getDataLayout();
2167 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2184 SmallVector<uint64_t, 4>
Offsets;
2187 unsigned NumValues = ValueVTs.
size();
2190 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2191 for (
unsigned i = 0; i != NumValues; ++i) {
2198 if (MemVTs[i] != ValueVTs[i])
2200 Chains[i] =
DAG.getStore(
2208 MVT::Other, Chains);
2209 }
else if (
I.getNumOperands() != 0) {
2212 unsigned NumValues =
Types.size();
2216 const Function *
F =
I.getParent()->getParent();
2219 I.getOperand(0)->getType(),
F->getCallingConv(),
2223 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2225 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2228 LLVMContext &
Context =
F->getContext();
2229 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2231 for (
unsigned j = 0;
j != NumValues; ++
j) {
2244 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2247 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2251 if (
I.getOperand(0)->getType()->isPointerTy()) {
2253 Flags.setPointerAddrSpace(
2257 if (NeedsRegBlock) {
2258 Flags.setInConsecutiveRegs();
2259 if (j == NumValues - 1)
2260 Flags.setInConsecutiveRegsLast();
2268 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2271 for (
unsigned i = 0; i < NumParts; ++i) {
2274 VT, Types[j], 0, 0));
2284 const Function *
F =
I.getParent()->getParent();
2286 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2288 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2289 Flags.setSwiftError();
2301 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2303 DAG.getMachineFunction().getFunction().getCallingConv();
2304 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2309 "LowerReturn didn't return a valid chain!");
2320 if (V->getType()->isEmptyTy())
2324 if (VMI !=
FuncInfo.ValueMap.end()) {
2326 "Unused value assigned virtual registers!");
2339 if (
FuncInfo.isExportedInst(V))
return;
2351 if (VI->getParent() == FromBB)
2377 const BasicBlock *SrcBB = Src->getBasicBlock();
2378 const BasicBlock *DstBB = Dst->getBasicBlock();
2382 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2392 Src->addSuccessorWithoutProb(Dst);
2395 Prob = getEdgeProbability(Src, Dst);
2396 Src->addSuccessor(Dst, Prob);
2402 return I->getParent() == BB;
2426 if (CurBB == SwitchBB ||
2432 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2437 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2439 if (TM.Options.NoNaNsFPMath)
2443 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2445 SL->SwitchCases.push_back(CB);
2454 SL->SwitchCases.push_back(CB);
2462 unsigned Depth = 0) {
2471 if (Necessary !=
nullptr) {
2474 if (Necessary->contains(
I))
2493 if (
I.getNumSuccessors() != 2)
2496 if (!
I.isConditional())
2508 if (BPI !=
nullptr) {
2514 std::optional<bool> Likely;
2517 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2521 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2533 if (CostThresh <= 0)
2551 const auto &TLI =
DAG.getTargetLoweringInfo();
2558 Value *BrCond =
I.getCondition();
2559 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2560 for (
const auto *U : Ins->users()) {
2563 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2576 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2578 for (
const auto &InsPair : RhsDeps) {
2579 if (!ShouldCountInsn(InsPair.first)) {
2580 ToDrop = InsPair.first;
2584 if (ToDrop ==
nullptr)
2586 RhsDeps.erase(ToDrop);
2589 for (
const auto &InsPair : RhsDeps) {
2597 if (CostOfIncluding > CostThresh)
2623 const Value *BOpOp0, *BOpOp1;
2637 if (BOpc == Instruction::And)
2638 BOpc = Instruction::Or;
2639 else if (BOpc == Instruction::Or)
2640 BOpc = Instruction::And;
2646 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2651 TProb, FProb, InvertCond);
2661 if (
Opc == Instruction::Or) {
2682 auto NewTrueProb = TProb / 2;
2683 auto NewFalseProb = TProb / 2 + FProb;
2686 NewFalseProb, InvertCond);
2693 Probs[1], InvertCond);
2695 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2715 auto NewTrueProb = TProb + FProb / 2;
2716 auto NewFalseProb = FProb / 2;
2719 NewFalseProb, InvertCond);
2726 Probs[1], InvertCond);
2735 if (Cases.size() != 2)
return true;
2739 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2740 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2741 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2742 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2748 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2749 Cases[0].CC == Cases[1].CC &&
2752 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2754 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2761void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2767 if (
I.isUnconditional()) {
2773 if (Succ0MBB != NextBlock(BrMBB) ||
2786 const Value *CondVal =
I.getCondition();
2787 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2806 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2808 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2811 const Value *BOp0, *BOp1;
2814 Opcode = Instruction::And;
2816 Opcode = Instruction::Or;
2823 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2824 Opcode, BOp0, BOp1))) {
2826 getEdgeProbability(BrMBB, Succ0MBB),
2827 getEdgeProbability(BrMBB, Succ1MBB),
2832 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2836 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2843 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2849 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2850 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2852 SL->SwitchCases.clear();
2858 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2879 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2886 auto &TLI =
DAG.getTargetLoweringInfo();
2910 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2922 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2926 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2927 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2942 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2958 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2961 DAG.setRoot(BrCond);
2967 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2968 assert(JT.Reg &&
"Should lower JT Header first!");
2969 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
2971 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
2972 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2973 Index.getValue(1), Table, Index);
2974 DAG.setRoot(BrJumpTable);
2982 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2983 const SDLoc &dl = *JT.SL;
2989 DAG.getConstant(JTH.
First, dl, VT));
3004 JT.Reg = JumpTableReg;
3012 Sub.getValueType()),
3015 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3016 MVT::Other, CopyTo, CMP,
3017 DAG.getBasicBlock(JT.Default));
3020 if (JT.MBB != NextBlock(SwitchBB))
3021 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3022 DAG.getBasicBlock(JT.MBB));
3024 DAG.setRoot(BrCond);
3027 if (JT.MBB != NextBlock(SwitchBB))
3028 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3029 DAG.getBasicBlock(JT.MBB)));
3031 DAG.setRoot(CopyTo);
3054 if (PtrTy != PtrMemTy)
3070 auto &
DL =
DAG.getDataLayout();
3079 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3086 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3099 assert(GuardCheckFn &&
"Guard check function is null");
3110 Entry.IsInReg =
true;
3111 Args.push_back(Entry);
3117 getValue(GuardCheckFn), std::move(Args));
3119 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3120 DAG.setRoot(Result.second);
3133 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3144 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3177 auto &
DL =
DAG.getDataLayout();
3185 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3191 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3206 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3207 Entry.IsInReg =
true;
3208 Args.push_back(Entry);
3214 getValue(GuardCheckFn), std::move(Args));
3220 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3228 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3243 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3247 bool UsePtrType =
false;
3271 if (!
B.FallthroughUnreachable)
3272 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3273 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3277 if (!
B.FallthroughUnreachable) {
3285 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3286 DAG.getBasicBlock(
B.Default));
3290 if (
MBB != NextBlock(SwitchBB))
3291 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3308 if (PopCount == 1) {
3315 }
else if (PopCount == BB.
Range) {
3323 DAG.getConstant(1, dl, VT), ShiftOp);
3327 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3334 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3336 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3344 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3347 if (NextMBB != NextBlock(SwitchBB))
3348 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3349 DAG.getBasicBlock(NextMBB));
3354void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3372 const Value *Callee(
I.getCalledOperand());
3375 visitInlineAsm(
I, EHPadBB);
3380 case Intrinsic::donothing:
3382 case Intrinsic::seh_try_begin:
3383 case Intrinsic::seh_scope_begin:
3384 case Intrinsic::seh_try_end:
3385 case Intrinsic::seh_scope_end:
3391 case Intrinsic::experimental_patchpoint_void:
3392 case Intrinsic::experimental_patchpoint:
3393 visitPatchpoint(
I, EHPadBB);
3395 case Intrinsic::experimental_gc_statepoint:
3401 case Intrinsic::wasm_throw: {
3403 std::array<SDValue, 4>
Ops = {
3414 case Intrinsic::wasm_rethrow: {
3415 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3416 std::array<SDValue, 2>
Ops = {
3425 }
else if (
I.hasDeoptState()) {
3446 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3447 BranchProbability EHPadBBProb =
3453 addSuccessorWithProb(InvokeMBB, Return);
3454 for (
auto &UnwindDest : UnwindDests) {
3455 UnwindDest.first->setIsEHPad();
3456 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3462 DAG.getBasicBlock(Return)));
3465void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3466 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3473 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3478 SmallPtrSet<BasicBlock *, 8> Dests;
3479 Dests.
insert(
I.getDefaultDest());
3484 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3487 Target->setIsInlineAsmBrIndirectTarget();
3493 Target->setLabelMustBeEmitted();
3495 if (Dests.
insert(Dest).second)
3503 DAG.getBasicBlock(Return)));
3506void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3507 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3510void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3512 "Call to landingpad not in landing pad!");
3516 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3532 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3537 if (
FuncInfo.ExceptionPointerVirtReg) {
3538 Ops[0] =
DAG.getZExtOrTrunc(
3539 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3546 Ops[1] =
DAG.getZExtOrTrunc(
3547 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3554 DAG.getVTList(ValueVTs),
Ops);
3562 if (JTB.first.HeaderBB ==
First)
3563 JTB.first.HeaderBB =
Last;
3576 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3578 bool Inserted =
Done.insert(BB).second;
3583 addSuccessorWithProb(IndirectBrMBB, Succ);
3593 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3594 DAG.getTarget().Options.NoTrapAfterNoreturn))
3600void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3603 Flags.copyFMF(*FPOp);
3611void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3614 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3615 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3618 Flags.setExact(ExactOp->isExact());
3620 Flags.setDisjoint(DisjointOp->isDisjoint());
3622 Flags.copyFMF(*FPOp);
3631void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3635 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3640 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3642 "Unexpected shift type");
3652 if (
const OverflowingBinaryOperator *OFBinOp =
3654 nuw = OFBinOp->hasNoUnsignedWrap();
3655 nsw = OFBinOp->hasNoSignedWrap();
3657 if (
const PossiblyExactOperator *ExactOp =
3659 exact = ExactOp->isExact();
3662 Flags.setExact(exact);
3663 Flags.setNoSignedWrap(nsw);
3664 Flags.setNoUnsignedWrap(nuw);
3670void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3681void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3687 auto &TLI =
DAG.getTargetLoweringInfo();
3700 Flags.setSameSign(
I.hasSameSign());
3701 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3703 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3708void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3715 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3719 Flags.copyFMF(*FPMO);
3720 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3722 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3731 return isa<SelectInst>(V);
3735void SelectionDAGBuilder::visitSelect(
const User &
I) {
3739 unsigned NumValues = ValueVTs.
size();
3740 if (NumValues == 0)
return;
3750 bool IsUnaryAbs =
false;
3751 bool Negate =
false;
3755 Flags.copyFMF(*FPOp);
3757 Flags.setUnpredictable(
3762 EVT VT = ValueVTs[0];
3763 LLVMContext &Ctx = *
DAG.getContext();
3764 auto &TLI =
DAG.getTargetLoweringInfo();
3774 bool UseScalarMinMax = VT.
isVector() &&
3783 switch (SPR.Flavor) {
3789 switch (SPR.NaNBehavior) {
3802 switch (SPR.NaNBehavior) {
3846 for (
unsigned i = 0; i != NumValues; ++i) {
3852 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3855 for (
unsigned i = 0; i != NumValues; ++i) {
3859 Values[i] =
DAG.getNode(
3866 DAG.getVTList(ValueVTs), Values));
3869void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3872 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3876 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3877 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3883void SelectionDAGBuilder::visitZExt(
const User &
I) {
3887 auto &TLI =
DAG.getTargetLoweringInfo();
3892 Flags.setNonNeg(PNI->hasNonNeg());
3897 if (
Flags.hasNonNeg() &&
3906void SelectionDAGBuilder::visitSExt(
const User &
I) {
3910 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3915void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3921 Flags.copyFMF(*TruncInst);
3922 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3925 DAG.getTargetConstant(
3930void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3933 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3938void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3941 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3946void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3949 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3954void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3957 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3961 Flags.setNonNeg(PNI->hasNonNeg());
3966void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3969 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3974void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
3979void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3983 auto &TLI =
DAG.getTargetLoweringInfo();
3984 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3993void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3997 auto &TLI =
DAG.getTargetLoweringInfo();
4005void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4008 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4013 if (DestVT !=
N.getValueType())
4021 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4027void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4028 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4029 const Value *SV =
I.getOperand(0);
4034 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4036 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4042void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4043 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4050 InVec, InVal, InIdx));
4053void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4054 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4063void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4068 Mask = SVI->getShuffleMask();
4072 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4076 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4081 DAG.getVectorIdxConstant(0,
DL));
4092 unsigned MaskNumElts =
Mask.size();
4094 if (SrcNumElts == MaskNumElts) {
4100 if (SrcNumElts < MaskNumElts) {
4104 if (MaskNumElts % SrcNumElts == 0) {
4108 unsigned NumConcat = MaskNumElts / SrcNumElts;
4109 bool IsConcat =
true;
4110 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4111 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4117 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4118 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4119 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4124 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4131 for (
auto Src : ConcatSrcs) {
4144 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4145 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4161 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4162 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4164 if (Idx >= (
int)SrcNumElts)
4165 Idx -= SrcNumElts - PaddedMaskNumElts;
4173 if (MaskNumElts != PaddedMaskNumElts)
4175 DAG.getVectorIdxConstant(0,
DL));
4181 assert(SrcNumElts > MaskNumElts);
4185 int StartIdx[2] = {-1, -1};
4186 bool CanExtract =
true;
4187 for (
int Idx : Mask) {
4192 if (Idx >= (
int)SrcNumElts) {
4200 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4201 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4202 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4206 StartIdx[Input] = NewStartIdx;
4209 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4215 for (
unsigned Input = 0; Input < 2; ++Input) {
4216 SDValue &Src = Input == 0 ? Src1 : Src2;
4217 if (StartIdx[Input] < 0)
4218 Src =
DAG.getUNDEF(VT);
4221 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4226 SmallVector<int, 8> MappedOps(Mask);
4227 for (
int &Idx : MappedOps) {
4228 if (Idx >= (
int)SrcNumElts)
4229 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4234 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4243 for (
int Idx : Mask) {
4247 Res =
DAG.getUNDEF(EltVT);
4249 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4250 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4253 DAG.getVectorIdxConstant(Idx,
DL));
4263 ArrayRef<unsigned> Indices =
I.getIndices();
4264 const Value *Op0 =
I.getOperand(0);
4266 Type *AggTy =
I.getType();
4273 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4279 unsigned NumAggValues = AggValueVTs.
size();
4280 unsigned NumValValues = ValValueVTs.
size();
4284 if (!NumAggValues) {
4292 for (; i != LinearIndex; ++i)
4293 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4298 for (; i != LinearIndex + NumValValues; ++i)
4299 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4303 for (; i != NumAggValues; ++i)
4304 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4308 DAG.getVTList(AggValueVTs), Values));
4312 ArrayRef<unsigned> Indices =
I.getIndices();
4313 const Value *Op0 =
I.getOperand(0);
4315 Type *ValTy =
I.getType();
4320 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4324 unsigned NumValValues = ValValueVTs.
size();
4327 if (!NumValValues) {
4336 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4337 Values[i - LinearIndex] =
4343 DAG.getVTList(ValValueVTs), Values));
4346void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4347 Value *Op0 =
I.getOperand(0);
4353 auto &TLI =
DAG.getTargetLoweringInfo();
4358 bool IsVectorGEP =
I.getType()->isVectorTy();
4359 ElementCount VectorElementCount =
4365 const Value *Idx = GTI.getOperand();
4366 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4371 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4380 N =
DAG.getMemBasePlusOffset(
4381 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4387 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4389 TypeSize ElementSize =
4390 GTI.getSequentialElementStride(
DAG.getDataLayout());
4395 bool ElementScalable = ElementSize.
isScalable();
4401 C =
C->getSplatValue();
4404 if (CI && CI->isZero())
4406 if (CI && !ElementScalable) {
4407 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4410 if (
N.getValueType().isVector())
4411 OffsVal =
DAG.getConstant(
4414 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4421 Flags.setNoUnsignedWrap(
true);
4423 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4425 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4433 if (
N.getValueType().isVector()) {
4435 VectorElementCount);
4436 IdxN =
DAG.getSplat(VT, dl, IdxN);
4440 N =
DAG.getSplat(VT, dl,
N);
4446 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4448 SDNodeFlags ScaleFlags;
4457 if (ElementScalable) {
4458 EVT VScaleTy =
N.getValueType().getScalarType();
4460 ISD::VSCALE, dl, VScaleTy,
4461 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4462 if (
N.getValueType().isVector())
4463 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4464 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4469 if (ElementMul != 1) {
4470 if (ElementMul.isPowerOf2()) {
4471 unsigned Amt = ElementMul.logBase2();
4472 IdxN =
DAG.getNode(
ISD::SHL, dl,
N.getValueType(), IdxN,
4476 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4478 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4488 SDNodeFlags AddFlags;
4491 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4495 if (IsVectorGEP && !
N.getValueType().isVector()) {
4497 N =
DAG.getSplat(VT, dl,
N);
4508 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4513void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4520 Type *Ty =
I.getAllocatedType();
4521 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4522 auto &
DL =
DAG.getDataLayout();
4523 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4524 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4530 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4533 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4534 DAG.getVScale(dl, IntPtr,
4540 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4541 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4547 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4548 if (*Alignment <= StackAlign)
4549 Alignment = std::nullopt;
4551 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4556 DAG.getConstant(StackAlignMask, dl, IntPtr),
4561 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4565 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4567 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4575 return I.getMetadata(LLVMContext::MD_range);
4580 if (std::optional<ConstantRange> CR = CB->getRange())
4584 return std::nullopt;
4587void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4589 return visitAtomicLoad(
I);
4591 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4592 const Value *SV =
I.getOperand(0);
4597 if (Arg->hasSwiftErrorAttr())
4598 return visitLoadFromSwiftError(
I);
4602 if (Alloca->isSwiftError())
4603 return visitLoadFromSwiftError(
I);
4609 Type *Ty =
I.getType();
4613 unsigned NumValues = ValueVTs.
size();
4617 Align Alignment =
I.getAlign();
4618 AAMDNodes AAInfo =
I.getAAMetadata();
4620 bool isVolatile =
I.isVolatile();
4625 bool ConstantMemory =
false;
4632 BatchAA->pointsToConstantMemory(MemoryLocation(
4637 Root =
DAG.getEntryNode();
4638 ConstantMemory =
true;
4642 Root =
DAG.getRoot();
4653 unsigned ChainI = 0;
4654 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4670 MachinePointerInfo PtrInfo =
4672 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4673 : MachinePointerInfo();
4676 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4677 MMOFlags, AAInfo, Ranges);
4678 Chains[ChainI] =
L.getValue(1);
4680 if (MemVTs[i] != ValueVTs[i])
4681 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4686 if (!ConstantMemory) {
4696 DAG.getVTList(ValueVTs), Values));
4699void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4700 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4701 "call visitStoreToSwiftError when backend supports swifterror");
4704 SmallVector<uint64_t, 4>
Offsets;
4705 const Value *SrcV =
I.getOperand(0);
4707 SrcV->
getType(), ValueVTs, &Offsets, 0);
4708 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4709 "expect a single EVT for swifterror");
4718 SDValue(Src.getNode(), Src.getResNo()));
4719 DAG.setRoot(CopyNode);
4722void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4723 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4724 "call visitLoadFromSwiftError when backend supports swifterror");
4727 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4728 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4729 "Support volatile, non temporal, invariant for load_from_swift_error");
4731 const Value *SV =
I.getOperand(0);
4732 Type *Ty =
I.getType();
4735 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4737 I.getAAMetadata()))) &&
4738 "load_from_swift_error should not be constant memory");
4741 SmallVector<uint64_t, 4>
Offsets;
4743 ValueVTs, &Offsets, 0);
4744 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4745 "expect a single EVT for swifterror");
4755void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4757 return visitAtomicStore(
I);
4759 const Value *SrcV =
I.getOperand(0);
4760 const Value *PtrV =
I.getOperand(1);
4762 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4767 if (Arg->hasSwiftErrorAttr())
4768 return visitStoreToSwiftError(
I);
4772 if (Alloca->isSwiftError())
4773 return visitStoreToSwiftError(
I);
4780 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4781 unsigned NumValues = ValueVTs.
size();
4794 Align Alignment =
I.getAlign();
4795 AAMDNodes AAInfo =
I.getAAMetadata();
4799 unsigned ChainI = 0;
4800 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4810 MachinePointerInfo PtrInfo =
4812 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4813 : MachinePointerInfo();
4817 if (MemVTs[i] != ValueVTs[i])
4818 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4820 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4821 Chains[ChainI] = St;
4827 DAG.setRoot(StoreNode);
4830void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4831 bool IsCompressing) {
4834 auto getMaskedStoreOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
4837 Src0 =
I.getArgOperand(0);
4838 Ptr =
I.getArgOperand(1);
4840 Mask =
I.getArgOperand(3);
4842 auto getCompressingStoreOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
4845 Src0 =
I.getArgOperand(0);
4846 Ptr =
I.getArgOperand(1);
4847 Mask =
I.getArgOperand(2);
4848 Alignment =
I.getParamAlign(1).valueOrOne();
4851 Value *PtrOperand, *MaskOperand, *Src0Operand;
4854 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4856 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4866 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4869 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4870 MachinePointerInfo(PtrOperand), MMOFlags,
4873 const auto &TLI =
DAG.getTargetLoweringInfo();
4878 I.getArgOperand(0)->getType(),
true)
4884 DAG.setRoot(StoreNode);
4910 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4914 C =
C->getSplatValue();
4928 if (!
GEP ||
GEP->getParent() != CurBB)
4931 if (
GEP->getNumOperands() != 2)
4934 const Value *BasePtr =
GEP->getPointerOperand();
4935 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4941 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4946 if (ScaleVal != 1 &&
4958void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4962 const Value *
Ptr =
I.getArgOperand(1);
4967 ->getMaybeAlignValue()
4969 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4977 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4978 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4988 EVT IdxVT =
Index.getValueType();
4996 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
4998 DAG.setRoot(Scatter);
5002void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5005 auto getMaskedLoadOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
5008 Ptr =
I.getArgOperand(0);
5010 Mask =
I.getArgOperand(2);
5011 Src0 =
I.getArgOperand(3);
5013 auto getExpandingLoadOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
5016 Ptr =
I.getArgOperand(0);
5017 Alignment =
I.getParamAlign(0).valueOrOne();
5018 Mask =
I.getArgOperand(1);
5019 Src0 =
I.getArgOperand(2);
5022 Value *PtrOperand, *MaskOperand, *Src0Operand;
5025 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5027 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5035 AAMDNodes AAInfo =
I.getAAMetadata();
5042 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5045 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5048 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5049 MachinePointerInfo(PtrOperand), MMOFlags,
5052 const auto &TLI =
DAG.getTargetLoweringInfo();
5064 DAG.getMaskedLoad(VT, sdl, InChain,
Ptr,
Offset, Mask, Src0, VT, MMO,
5071void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5075 const Value *
Ptr =
I.getArgOperand(0);
5079 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5082 ->getMaybeAlignValue()
5093 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5094 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5106 EVT IdxVT =
Index.getValueType();
5115 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5131 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5133 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5136 MachineFunction &MF =
DAG.getMachineFunction();
5138 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5139 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5142 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5143 dl, MemVT, VTs, InChain,
5151 DAG.setRoot(OutChain);
5154void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5157 switch (
I.getOperation()) {
5175 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5178 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5181 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5184 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5187 NT = ISD::ATOMIC_LOAD_USUB_COND;
5190 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5199 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5202 MachineFunction &MF =
DAG.getMachineFunction();
5204 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5205 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5208 DAG.getAtomic(NT, dl, MemVT, InChain,
5215 DAG.setRoot(OutChain);
5218void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5220 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5223 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5225 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5232void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5239 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5250 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5251 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5252 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5262 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5265 DAG.setRoot(OutChain);
5268void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5276 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5286 MachineFunction &MF =
DAG.getMachineFunction();
5288 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5289 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5293 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5297 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val,
Ptr, MMO);
5300 DAG.setRoot(OutChain);
5305void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5311 bool HasChain = !
F->doesNotAccessMemory();
5313 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5320 Ops.push_back(
DAG.getRoot());
5327 TargetLowering::IntrinsicInfo
Info;
5328 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5330 DAG.getMachineFunction(),
5340 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5341 const Value *Arg =
I.getArgOperand(i);
5342 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5350 assert(CI->getBitWidth() <= 64 &&
5351 "large intrinsic immediates not handled");
5352 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5365 SDVTList VTs =
DAG.getVTList(ValueVTs);
5370 Flags.copyFMF(*FPMO);
5371 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5377 auto *Token = Bundle->Inputs[0].get();
5379 assert(
Ops.back().getValueType() != MVT::Glue &&
5380 "Did not expected another glue node here.");
5382 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5383 Ops.push_back(ConvControlToken);
5388 if (IsTgtIntrinsic) {
5393 MachinePointerInfo MPI;
5395 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5396 else if (
Info.fallbackAddressSpace)
5397 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5398 EVT MemVT =
Info.memVT;
5400 if (
Size.hasValue() && !
Size.getValue())
5402 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5403 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5404 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5408 }
else if (!HasChain) {
5410 }
else if (!
I.getType()->isVoidTy()) {
5424 if (!
I.getType()->isVoidTy()) {
5428 MaybeAlign Alignment =
I.getRetAlign();
5451 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5497 SDValue TwoToFractionalPartOfX;
5565 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5566 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5574 if (
Op.getValueType() == MVT::f32 &&
5589 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5598 if (
Op.getValueType() == MVT::f32 &&
5688 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5697 if (
Op.getValueType() == MVT::f32 &&
5781 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5785 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5794 if (
Op.getValueType() == MVT::f32 &&
5871 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5875 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5882 if (
Op.getValueType() == MVT::f32 &&
5887 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5895 bool IsExp10 =
false;
5896 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5900 IsExp10 = LHSC->isExactlyValue(Ten);
5927 unsigned Val = RHSC->getSExtValue();
5956 CurSquare, CurSquare);
5961 if (RHSC->getSExtValue() < 0)
5975 EVT VT =
LHS.getValueType();
5998 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6002 Opcode, VT, ScaleInt);
6037 switch (
N.getOpcode()) {
6041 Op.getValueType().getSizeInBits());
6066bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6073 MachineFunction &MF =
DAG.getMachineFunction();
6074 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6078 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6083 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6090 auto *NewDIExpr = FragExpr;
6097 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6100 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6101 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6105 if (Kind == FuncArgumentDbgValueKind::Value) {
6110 if (!IsInEntryBlock)
6126 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
6127 !
DL->getInlinedAt();
6129 if (!IsInPrologue && !VariableIsFunctionInputArg)
6163 if (VariableIsFunctionInputArg) {
6165 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6166 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6167 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6168 return !NodeMap[
V].getNode();
6173 bool IsIndirect =
false;
6174 std::optional<MachineOperand>
Op;
6176 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6177 if (FI != std::numeric_limits<int>::max())
6181 if (!
Op &&
N.getNode()) {
6184 if (ArgRegsAndSizes.
size() == 1)
6185 Reg = ArgRegsAndSizes.
front().first;
6188 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6195 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6199 if (!
Op &&
N.getNode()) {
6203 if (FrameIndexSDNode *FINode =
6213 for (
const auto &RegAndSize : SplitRegs) {
6217 int RegFragmentSizeInBits = RegAndSize.second;
6219 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6222 if (
Offset >= ExprFragmentSizeInBits)
6226 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6227 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6232 Expr,
Offset, RegFragmentSizeInBits);
6233 Offset += RegAndSize.second;
6236 if (!FragmentExpr) {
6237 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6239 DAG.AddDbgValue(SDV,
false);
6242 MachineInstr *NewMI =
6243 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6244 Kind != FuncArgumentDbgValueKind::Value);
6245 FuncInfo.ArgDbgValues.push_back(NewMI);
6252 if (VMI !=
FuncInfo.ValueMap.end()) {
6253 const auto &TLI =
DAG.getTargetLoweringInfo();
6254 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6255 V->getType(), std::nullopt);
6256 if (RFV.occupiesMultipleRegs()) {
6257 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6262 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6263 }
else if (ArgRegsAndSizes.
size() > 1) {
6266 splitMultiRegDbgValue(ArgRegsAndSizes);
6275 "Expected inlined-at fields to agree");
6276 MachineInstr *NewMI =
nullptr;
6279 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6281 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6285 FuncInfo.ArgDbgValues.push_back(NewMI);
6294 unsigned DbgSDNodeOrder) {
6306 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6307 false, dl, DbgSDNodeOrder);
6309 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6310 false, dl, DbgSDNodeOrder);
6315 case Intrinsic::smul_fix:
6317 case Intrinsic::umul_fix:
6319 case Intrinsic::smul_fix_sat:
6321 case Intrinsic::umul_fix_sat:
6323 case Intrinsic::sdiv_fix:
6325 case Intrinsic::udiv_fix:
6327 case Intrinsic::sdiv_fix_sat:
6329 case Intrinsic::udiv_fix_sat:
6342 "expected call_preallocated_setup Value");
6343 for (
const auto *U : PreallocatedSetup->
users()) {
6345 const Function *Fn = UseCall->getCalledFunction();
6346 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6356bool SelectionDAGBuilder::visitEntryValueDbgValue(
6366 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6367 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6369 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6370 "couldn't find an associated register for the Argument\n");
6373 Register ArgVReg = ArgIt->getSecond();
6375 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6376 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6377 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6378 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6379 DAG.AddDbgValue(SDV,
false );
6382 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6383 "couldn't find a physical register\n");
6388void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6391 switch (Intrinsic) {
6392 case Intrinsic::experimental_convergence_anchor:
6393 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6395 case Intrinsic::experimental_convergence_entry:
6396 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6398 case Intrinsic::experimental_convergence_loop: {
6400 auto *Token = Bundle->Inputs[0].get();
6401 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6408void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6409 unsigned IntrinsicID) {
6412 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6413 "Tried to lower unsupported histogram type");
6415 Value *
Ptr =
I.getOperand(0);
6419 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6420 DataLayout TargetDL =
DAG.getDataLayout();
6422 Align Alignment =
DAG.getEVTAlign(VT);
6433 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6435 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6436 MachinePointerInfo(AS),
6447 EVT IdxVT =
Index.getValueType();
6454 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6457 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6461 DAG.setRoot(Histogram);
6464void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6466 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6467 "Tried lowering invalid vector extract last");
6469 const DataLayout &Layout =
DAG.getDataLayout();
6473 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6477 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6483 EVT BoolVT =
Mask.getValueType().getScalarType();
6484 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6485 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6492void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6494 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6501 Flags.copyFMF(*FPOp);
6503 switch (Intrinsic) {
6506 visitTargetIntrinsic(
I, Intrinsic);
6508 case Intrinsic::vscale: {
6513 case Intrinsic::vastart: visitVAStart(
I);
return;
6514 case Intrinsic::vaend: visitVAEnd(
I);
return;
6515 case Intrinsic::vacopy: visitVACopy(
I);
return;
6516 case Intrinsic::returnaddress:
6521 case Intrinsic::addressofreturnaddress:
6526 case Intrinsic::sponentry:
6531 case Intrinsic::frameaddress:
6536 case Intrinsic::read_volatile_register:
6537 case Intrinsic::read_register: {
6538 Value *
Reg =
I.getArgOperand(0);
6544 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6549 case Intrinsic::write_register: {
6550 Value *
Reg =
I.getArgOperand(0);
6551 Value *RegValue =
I.getArgOperand(1);
6559 case Intrinsic::memcpy:
6560 case Intrinsic::memcpy_inline: {
6566 "memcpy_inline needs constant size");
6568 Align DstAlign = MCI.getDestAlign().valueOrOne();
6569 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6570 Align Alignment = std::min(DstAlign, SrcAlign);
6571 bool isVol = MCI.isVolatile();
6575 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6576 MCI.isForceInlined(), &
I, std::nullopt,
6577 MachinePointerInfo(
I.getArgOperand(0)),
6578 MachinePointerInfo(
I.getArgOperand(1)),
6580 updateDAGForMaybeTailCall(MC);
6583 case Intrinsic::memset:
6584 case Intrinsic::memset_inline: {
6590 "memset_inline needs constant size");
6592 Align DstAlign = MSII.getDestAlign().valueOrOne();
6593 bool isVol = MSII.isVolatile();
6596 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6597 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6598 updateDAGForMaybeTailCall(MC);
6601 case Intrinsic::memmove: {
6607 Align DstAlign = MMI.getDestAlign().valueOrOne();
6608 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6609 Align Alignment = std::min(DstAlign, SrcAlign);
6610 bool isVol = MMI.isVolatile();
6614 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6616 MachinePointerInfo(
I.getArgOperand(0)),
6617 MachinePointerInfo(
I.getArgOperand(1)),
6619 updateDAGForMaybeTailCall(MM);
6622 case Intrinsic::memcpy_element_unordered_atomic: {
6628 Type *LengthTy =
MI.getLength()->getType();
6629 unsigned ElemSz =
MI.getElementSizeInBytes();
6633 isTC, MachinePointerInfo(
MI.getRawDest()),
6634 MachinePointerInfo(
MI.getRawSource()));
6635 updateDAGForMaybeTailCall(MC);
6638 case Intrinsic::memmove_element_unordered_atomic: {
6644 Type *LengthTy =
MI.getLength()->getType();
6645 unsigned ElemSz =
MI.getElementSizeInBytes();
6649 isTC, MachinePointerInfo(
MI.getRawDest()),
6650 MachinePointerInfo(
MI.getRawSource()));
6651 updateDAGForMaybeTailCall(MC);
6654 case Intrinsic::memset_element_unordered_atomic: {
6660 Type *LengthTy =
MI.getLength()->getType();
6661 unsigned ElemSz =
MI.getElementSizeInBytes();
6665 isTC, MachinePointerInfo(
MI.getRawDest()));
6666 updateDAGForMaybeTailCall(MC);
6669 case Intrinsic::call_preallocated_setup: {
6671 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6672 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6678 case Intrinsic::call_preallocated_arg: {
6680 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6687 ISD::PREALLOCATED_ARG, sdl,
6694 case Intrinsic::eh_typeid_for: {
6697 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6698 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6703 case Intrinsic::eh_return_i32:
6704 case Intrinsic::eh_return_i64:
6705 DAG.getMachineFunction().setCallsEHReturn(
true);
6712 case Intrinsic::eh_unwind_init:
6713 DAG.getMachineFunction().setCallsUnwindInit(
true);
6715 case Intrinsic::eh_dwarf_cfa:
6720 case Intrinsic::eh_sjlj_callsite: {
6722 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6727 case Intrinsic::eh_sjlj_functioncontext: {
6729 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6732 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6736 case Intrinsic::eh_sjlj_setjmp: {
6741 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6743 DAG.setRoot(
Op.getValue(1));
6746 case Intrinsic::eh_sjlj_longjmp:
6750 case Intrinsic::eh_sjlj_setup_dispatch:
6754 case Intrinsic::masked_gather:
6755 visitMaskedGather(
I);
6757 case Intrinsic::masked_load:
6760 case Intrinsic::masked_scatter:
6761 visitMaskedScatter(
I);
6763 case Intrinsic::masked_store:
6764 visitMaskedStore(
I);
6766 case Intrinsic::masked_expandload:
6767 visitMaskedLoad(
I,
true );
6769 case Intrinsic::masked_compressstore:
6770 visitMaskedStore(
I,
true );
6772 case Intrinsic::powi:
6776 case Intrinsic::log:
6779 case Intrinsic::log2:
6783 case Intrinsic::log10:
6787 case Intrinsic::exp:
6790 case Intrinsic::exp2:
6794 case Intrinsic::pow:
6798 case Intrinsic::sqrt:
6799 case Intrinsic::fabs:
6800 case Intrinsic::sin:
6801 case Intrinsic::cos:
6802 case Intrinsic::tan:
6803 case Intrinsic::asin:
6804 case Intrinsic::acos:
6805 case Intrinsic::atan:
6806 case Intrinsic::sinh:
6807 case Intrinsic::cosh:
6808 case Intrinsic::tanh:
6809 case Intrinsic::exp10:
6810 case Intrinsic::floor:
6811 case Intrinsic::ceil:
6812 case Intrinsic::trunc:
6813 case Intrinsic::rint:
6814 case Intrinsic::nearbyint:
6815 case Intrinsic::round:
6816 case Intrinsic::roundeven:
6817 case Intrinsic::canonicalize: {
6820 switch (Intrinsic) {
6822 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6823 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6824 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6825 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6826 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6827 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6828 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6829 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6830 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6831 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6832 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6833 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6834 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6835 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6836 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6837 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6838 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6839 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6840 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6846 getValue(
I.getArgOperand(0)).getValueType(),
6850 case Intrinsic::atan2:
6852 getValue(
I.getArgOperand(0)).getValueType(),
6856 case Intrinsic::lround:
6857 case Intrinsic::llround:
6858 case Intrinsic::lrint:
6859 case Intrinsic::llrint: {
6862 switch (Intrinsic) {
6864 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6865 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6866 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6867 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6876 case Intrinsic::minnum:
6878 getValue(
I.getArgOperand(0)).getValueType(),
6882 case Intrinsic::maxnum:
6884 getValue(
I.getArgOperand(0)).getValueType(),
6888 case Intrinsic::minimum:
6890 getValue(
I.getArgOperand(0)).getValueType(),
6894 case Intrinsic::maximum:
6896 getValue(
I.getArgOperand(0)).getValueType(),
6900 case Intrinsic::minimumnum:
6902 getValue(
I.getArgOperand(0)).getValueType(),
6906 case Intrinsic::maximumnum:
6908 getValue(
I.getArgOperand(0)).getValueType(),
6912 case Intrinsic::copysign:
6914 getValue(
I.getArgOperand(0)).getValueType(),
6918 case Intrinsic::ldexp:
6920 getValue(
I.getArgOperand(0)).getValueType(),
6924 case Intrinsic::modf:
6925 case Intrinsic::sincos:
6926 case Intrinsic::sincospi:
6927 case Intrinsic::frexp: {
6929 switch (Intrinsic) {
6932 case Intrinsic::sincos:
6933 Opcode = ISD::FSINCOS;
6935 case Intrinsic::sincospi:
6936 Opcode = ISD::FSINCOSPI;
6938 case Intrinsic::modf:
6939 Opcode = ISD::FMODF;
6941 case Intrinsic::frexp:
6942 Opcode = ISD::FFREXP;
6947 SDVTList VTs =
DAG.getVTList(ValueVTs);
6949 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
6952 case Intrinsic::arithmetic_fence: {
6954 getValue(
I.getArgOperand(0)).getValueType(),
6958 case Intrinsic::fma:
6964#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6965 case Intrinsic::INTRINSIC:
6966#include "llvm/IR/ConstrainedOps.def"
6969#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6970#include "llvm/IR/VPIntrinsics.def"
6973 case Intrinsic::fptrunc_round: {
6977 std::optional<RoundingMode> RoundMode =
6985 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
6990 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
6995 case Intrinsic::fmuladd: {
7000 getValue(
I.getArgOperand(0)).getValueType(),
7016 case Intrinsic::convert_to_fp16:
7020 DAG.getTargetConstant(0, sdl,
7023 case Intrinsic::convert_from_fp16:
7026 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7029 case Intrinsic::fptosi_sat: {
7036 case Intrinsic::fptoui_sat: {
7043 case Intrinsic::set_rounding:
7044 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7049 case Intrinsic::is_fpclass: {
7050 const DataLayout DLayout =
DAG.getDataLayout();
7052 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7055 MachineFunction &MF =
DAG.getMachineFunction();
7059 Flags.setNoFPExcept(
7060 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7076 case Intrinsic::get_fpenv: {
7077 const DataLayout DLayout =
DAG.getDataLayout();
7079 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7085 ISD::GET_FPENV, sdl,
7094 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7097 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7098 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7104 case Intrinsic::set_fpenv: {
7105 const DataLayout DLayout =
DAG.getDataLayout();
7108 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7113 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7121 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7123 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7126 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7131 case Intrinsic::reset_fpenv:
7132 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7134 case Intrinsic::get_fpmode:
7136 ISD::GET_FPMODE, sdl,
7143 case Intrinsic::set_fpmode:
7144 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7148 case Intrinsic::reset_fpmode: {
7149 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7153 case Intrinsic::pcmarker: {
7155 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7158 case Intrinsic::readcyclecounter: {
7160 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7161 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7166 case Intrinsic::readsteadycounter: {
7168 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7169 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7174 case Intrinsic::bitreverse:
7176 getValue(
I.getArgOperand(0)).getValueType(),
7179 case Intrinsic::bswap:
7181 getValue(
I.getArgOperand(0)).getValueType(),
7184 case Intrinsic::cttz: {
7192 case Intrinsic::ctlz: {
7200 case Intrinsic::ctpop: {
7206 case Intrinsic::fshl:
7207 case Intrinsic::fshr: {
7208 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7212 EVT VT =
X.getValueType();
7223 case Intrinsic::sadd_sat: {
7229 case Intrinsic::uadd_sat: {
7235 case Intrinsic::ssub_sat: {
7241 case Intrinsic::usub_sat: {
7247 case Intrinsic::sshl_sat: {
7253 case Intrinsic::ushl_sat: {
7259 case Intrinsic::smul_fix:
7260 case Intrinsic::umul_fix:
7261 case Intrinsic::smul_fix_sat:
7262 case Intrinsic::umul_fix_sat: {
7270 case Intrinsic::sdiv_fix:
7271 case Intrinsic::udiv_fix:
7272 case Intrinsic::sdiv_fix_sat:
7273 case Intrinsic::udiv_fix_sat: {
7278 Op1, Op2, Op3,
DAG, TLI));
7281 case Intrinsic::smax: {
7287 case Intrinsic::smin: {
7293 case Intrinsic::umax: {
7299 case Intrinsic::umin: {
7305 case Intrinsic::abs: {
7311 case Intrinsic::scmp: {
7318 case Intrinsic::ucmp: {
7325 case Intrinsic::stacksave: {
7328 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7333 case Intrinsic::stackrestore:
7335 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7337 case Intrinsic::get_dynamic_area_offset: {
7340 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7346 case Intrinsic::stackguard: {
7347 MachineFunction &MF =
DAG.getMachineFunction();
7353 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7358 MachinePointerInfo(
Global, 0), Align,
7367 case Intrinsic::stackprotector: {
7369 MachineFunction &MF =
DAG.getMachineFunction();
7389 Chain, sdl, Src, FIN,
7396 case Intrinsic::objectsize:
7399 case Intrinsic::is_constant:
7402 case Intrinsic::annotation:
7403 case Intrinsic::ptr_annotation:
7404 case Intrinsic::launder_invariant_group:
7405 case Intrinsic::strip_invariant_group:
7410 case Intrinsic::type_test:
7411 case Intrinsic::public_type_test:
7415 case Intrinsic::assume:
7416 case Intrinsic::experimental_noalias_scope_decl:
7417 case Intrinsic::var_annotation:
7418 case Intrinsic::sideeffect:
7423 case Intrinsic::codeview_annotation: {
7425 MachineFunction &MF =
DAG.getMachineFunction();
7429 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7434 case Intrinsic::init_trampoline: {
7442 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7445 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7450 case Intrinsic::adjust_trampoline:
7455 case Intrinsic::gcroot: {
7456 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7457 "only valid in functions with gc specified, enforced by Verifier");
7459 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7466 case Intrinsic::gcread:
7467 case Intrinsic::gcwrite:
7469 case Intrinsic::get_rounding:
7475 case Intrinsic::expect:
7476 case Intrinsic::expect_with_probability:
7482 case Intrinsic::ubsantrap:
7483 case Intrinsic::debugtrap:
7484 case Intrinsic::trap: {
7485 StringRef TrapFuncName =
7486 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7487 if (TrapFuncName.
empty()) {
7488 switch (Intrinsic) {
7489 case Intrinsic::trap:
7490 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7492 case Intrinsic::debugtrap:
7493 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7495 case Intrinsic::ubsantrap:
7497 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7498 DAG.getTargetConstant(
7504 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7505 I.hasFnAttr(Attribute::NoMerge));
7509 if (Intrinsic == Intrinsic::ubsantrap) {
7510 Value *Arg =
I.getArgOperand(0);
7514 TargetLowering::CallLoweringInfo CLI(
DAG);
7515 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7517 DAG.getExternalSymbol(TrapFuncName.
data(),
7520 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7526 case Intrinsic::allow_runtime_check:
7527 case Intrinsic::allow_ubsan_check:
7531 case Intrinsic::uadd_with_overflow:
7532 case Intrinsic::sadd_with_overflow:
7533 case Intrinsic::usub_with_overflow:
7534 case Intrinsic::ssub_with_overflow:
7535 case Intrinsic::umul_with_overflow:
7536 case Intrinsic::smul_with_overflow: {
7538 switch (Intrinsic) {
7540 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7541 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7542 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7543 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7544 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7545 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7551 EVT OverflowVT = MVT::i1;
7556 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7560 case Intrinsic::prefetch: {
7573 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7575 std::nullopt, Flags);
7581 DAG.setRoot(Result);
7584 case Intrinsic::lifetime_start:
7585 case Intrinsic::lifetime_end: {
7586 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7592 if (!LifetimeObject)
7597 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7598 if (SI ==
FuncInfo.StaticAllocaMap.end())
7602 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7606 case Intrinsic::pseudoprobe: {
7614 case Intrinsic::invariant_start:
7619 case Intrinsic::invariant_end:
7622 case Intrinsic::clear_cache: {
7627 {InputChain, StartVal, EndVal});
7632 case Intrinsic::donothing:
7633 case Intrinsic::seh_try_begin:
7634 case Intrinsic::seh_scope_begin:
7635 case Intrinsic::seh_try_end:
7636 case Intrinsic::seh_scope_end:
7639 case Intrinsic::experimental_stackmap:
7642 case Intrinsic::experimental_patchpoint_void:
7643 case Intrinsic::experimental_patchpoint:
7646 case Intrinsic::experimental_gc_statepoint:
7649 case Intrinsic::experimental_gc_result:
7652 case Intrinsic::experimental_gc_relocate:
7655 case Intrinsic::instrprof_cover:
7657 case Intrinsic::instrprof_increment:
7659 case Intrinsic::instrprof_timestamp:
7661 case Intrinsic::instrprof_value_profile:
7663 case Intrinsic::instrprof_mcdc_parameters:
7665 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7667 case Intrinsic::localescape: {
7668 MachineFunction &MF =
DAG.getMachineFunction();
7669 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7673 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7679 "can only escape static allocas");
7684 TII->get(TargetOpcode::LOCAL_ESCAPE))
7692 case Intrinsic::localrecover: {
7694 MachineFunction &MF =
DAG.getMachineFunction();
7700 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7704 Value *
FP =
I.getArgOperand(1);
7710 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7715 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7721 case Intrinsic::fake_use: {
7722 Value *
V =
I.getArgOperand(0);
7727 auto FakeUseValue = [&]() ->
SDValue {
7741 if (!FakeUseValue || FakeUseValue.isUndef())
7744 Ops[1] = FakeUseValue;
7749 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7753 case Intrinsic::eh_exceptionpointer:
7754 case Intrinsic::eh_exceptioncode: {
7760 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7761 if (Intrinsic == Intrinsic::eh_exceptioncode)
7762 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7766 case Intrinsic::xray_customevent: {
7769 const auto &Triple =
DAG.getTarget().getTargetTriple();
7778 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7780 Ops.push_back(LogEntryVal);
7781 Ops.push_back(StrSizeVal);
7782 Ops.push_back(Chain);
7788 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7791 DAG.setRoot(patchableNode);
7795 case Intrinsic::xray_typedevent: {
7798 const auto &Triple =
DAG.getTarget().getTargetTriple();
7810 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7812 Ops.push_back(LogTypeId);
7813 Ops.push_back(LogEntryVal);
7814 Ops.push_back(StrSizeVal);
7815 Ops.push_back(Chain);
7821 MachineSDNode *MN =
DAG.getMachineNode(
7822 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7824 DAG.setRoot(patchableNode);
7828 case Intrinsic::experimental_deoptimize:
7831 case Intrinsic::stepvector:
7834 case Intrinsic::vector_reduce_fadd:
7835 case Intrinsic::vector_reduce_fmul:
7836 case Intrinsic::vector_reduce_add:
7837 case Intrinsic::vector_reduce_mul:
7838 case Intrinsic::vector_reduce_and:
7839 case Intrinsic::vector_reduce_or:
7840 case Intrinsic::vector_reduce_xor:
7841 case Intrinsic::vector_reduce_smax:
7842 case Intrinsic::vector_reduce_smin:
7843 case Intrinsic::vector_reduce_umax:
7844 case Intrinsic::vector_reduce_umin:
7845 case Intrinsic::vector_reduce_fmax:
7846 case Intrinsic::vector_reduce_fmin:
7847 case Intrinsic::vector_reduce_fmaximum:
7848 case Intrinsic::vector_reduce_fminimum:
7849 visitVectorReduce(
I, Intrinsic);
7852 case Intrinsic::icall_branch_funnel: {
7858 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7861 "llvm.icall.branch.funnel operand must be a GlobalValue");
7862 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7864 struct BranchFunnelTarget {
7870 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7873 if (ElemBase !=
Base)
7875 "to the same GlobalValue");
7881 "llvm.icall.branch.funnel operand must be a GlobalValue");
7887 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7888 return T1.Offset < T2.Offset;
7891 for (
auto &
T : Targets) {
7892 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7893 Ops.push_back(
T.Target);
7896 Ops.push_back(
DAG.getRoot());
7897 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7906 case Intrinsic::wasm_landingpad_index:
7912 case Intrinsic::aarch64_settag:
7913 case Intrinsic::aarch64_settag_zero: {
7914 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
7915 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7918 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
7924 case Intrinsic::amdgcn_cs_chain: {
7929 Type *RetTy =
I.getType();
7939 for (
unsigned Idx : {2, 3, 1}) {
7940 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7942 Arg.setAttributes(&
I, Idx);
7943 Args.push_back(Arg);
7946 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7947 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7948 Args[2].IsInReg =
true;
7951 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
7952 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7954 Arg.setAttributes(&
I, Idx);
7955 Args.push_back(Arg);
7958 TargetLowering::CallLoweringInfo CLI(
DAG);
7961 .setCallee(CC, RetTy, Callee, std::move(Args))
7964 .setConvergent(
I.isConvergent());
7966 std::pair<SDValue, SDValue>
Result =
7970 "Should've lowered as tail call");
7975 case Intrinsic::amdgcn_call_whole_wave: {
7977 bool isTailCall =
I.isTailCall();
7980 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
7981 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
7982 I.getArgOperand(Idx)->getType());
7983 Arg.setAttributes(&
I, Idx);
7990 Args.push_back(Arg);
7995 auto *Token = Bundle->Inputs[0].get();
7996 ConvControlToken =
getValue(Token);
7999 TargetLowering::CallLoweringInfo CLI(
DAG);
8003 getValue(
I.getArgOperand(0)), std::move(Args))
8007 .setConvergent(
I.isConvergent())
8008 .setConvergenceControlToken(ConvControlToken);
8011 std::pair<SDValue, SDValue>
Result =
8014 if (
Result.first.getNode())
8018 case Intrinsic::ptrmask: {
8034 auto HighOnes =
DAG.getNode(
8035 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8036 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8039 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8040 }
else if (
Mask.getValueType() != PtrVT)
8041 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8047 case Intrinsic::threadlocal_address: {
8051 case Intrinsic::get_active_lane_mask: {
8055 EVT ElementVT =
Index.getValueType();
8058 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8066 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8067 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8068 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8071 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8076 case Intrinsic::experimental_get_vector_length: {
8078 "Expected positive VF");
8083 EVT CountVT =
Count.getValueType();
8086 visitTargetIntrinsic(
I, Intrinsic);
8095 if (CountVT.
bitsLT(VT)) {
8100 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8110 case Intrinsic::experimental_vector_partial_reduce_add: {
8112 visitTargetIntrinsic(
I, Intrinsic);
8122 case Intrinsic::experimental_cttz_elts: {
8125 EVT OpVT =
Op.getValueType();
8128 visitTargetIntrinsic(
I, Intrinsic);
8144 ConstantRange VScaleRange(1,
true);
8173 case Intrinsic::vector_insert: {
8181 if (
Index.getValueType() != VectorIdxTy)
8182 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8189 case Intrinsic::vector_extract: {
8197 if (
Index.getValueType() != VectorIdxTy)
8198 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8204 case Intrinsic::experimental_vector_match: {
8210 EVT ResVT =
Mask.getValueType();
8216 visitTargetIntrinsic(
I, Intrinsic);
8222 for (
unsigned i = 0; i < SearchSize; ++i) {
8225 DAG.getVectorIdxConstant(i, sdl));
8234 case Intrinsic::vector_reverse:
8235 visitVectorReverse(
I);
8237 case Intrinsic::vector_splice:
8238 visitVectorSplice(
I);
8240 case Intrinsic::callbr_landingpad:
8241 visitCallBrLandingPad(
I);
8243 case Intrinsic::vector_interleave2:
8244 visitVectorInterleave(
I, 2);
8246 case Intrinsic::vector_interleave3:
8247 visitVectorInterleave(
I, 3);
8249 case Intrinsic::vector_interleave4:
8250 visitVectorInterleave(
I, 4);
8252 case Intrinsic::vector_interleave5:
8253 visitVectorInterleave(
I, 5);
8255 case Intrinsic::vector_interleave6:
8256 visitVectorInterleave(
I, 6);
8258 case Intrinsic::vector_interleave7:
8259 visitVectorInterleave(
I, 7);
8261 case Intrinsic::vector_interleave8:
8262 visitVectorInterleave(
I, 8);
8264 case Intrinsic::vector_deinterleave2:
8265 visitVectorDeinterleave(
I, 2);
8267 case Intrinsic::vector_deinterleave3:
8268 visitVectorDeinterleave(
I, 3);
8270 case Intrinsic::vector_deinterleave4:
8271 visitVectorDeinterleave(
I, 4);
8273 case Intrinsic::vector_deinterleave5:
8274 visitVectorDeinterleave(
I, 5);
8276 case Intrinsic::vector_deinterleave6:
8277 visitVectorDeinterleave(
I, 6);
8279 case Intrinsic::vector_deinterleave7:
8280 visitVectorDeinterleave(
I, 7);
8282 case Intrinsic::vector_deinterleave8:
8283 visitVectorDeinterleave(
I, 8);
8285 case Intrinsic::experimental_vector_compress:
8287 getValue(
I.getArgOperand(0)).getValueType(),
8292 case Intrinsic::experimental_convergence_anchor:
8293 case Intrinsic::experimental_convergence_entry:
8294 case Intrinsic::experimental_convergence_loop:
8295 visitConvergenceControl(
I, Intrinsic);
8297 case Intrinsic::experimental_vector_histogram_add: {
8298 visitVectorHistogram(
I, Intrinsic);
8301 case Intrinsic::experimental_vector_extract_last_active: {
8302 visitVectorExtractLastActive(
I, Intrinsic);
8305 case Intrinsic::loop_dependence_war_mask:
8311 case Intrinsic::loop_dependence_raw_mask:
8320void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8348 PendingConstrainedFP.push_back(OutChain);
8354 PendingConstrainedFPStrict.push_back(OutChain);
8359 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8361 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8366 Flags.setNoFPExcept(
true);
8369 Flags.copyFMF(*FPOp);
8374#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8375 case Intrinsic::INTRINSIC: \
8376 Opcode = ISD::STRICT_##DAGN; \
8378#include "llvm/IR/ConstrainedOps.def"
8379 case Intrinsic::experimental_constrained_fmuladd: {
8386 pushOutChain(
Mul, EB);
8409 if (TM.Options.NoNaNsFPMath)
8417 pushOutChain(Result, EB);
8424 std::optional<unsigned> ResOPC;
8426 case Intrinsic::vp_ctlz: {
8428 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8431 case Intrinsic::vp_cttz: {
8433 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8436 case Intrinsic::vp_cttz_elts: {
8438 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8441#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8442 case Intrinsic::VPID: \
8443 ResOPC = ISD::VPSD; \
8445#include "llvm/IR/VPIntrinsics.def"
8450 "Inconsistency: no SDNode available for this VPIntrinsic!");
8452 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8453 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8455 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8456 : ISD::VP_REDUCE_FMUL;
8462void SelectionDAGBuilder::visitVPLoad(
8474 Alignment =
DAG.getEVTAlign(VT);
8477 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8478 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8481 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8482 MachinePointerInfo(PtrOperand), MMOFlags,
8484 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8491void SelectionDAGBuilder::visitVPLoadFF(
8494 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8504 Alignment =
DAG.getEVTAlign(VT);
8507 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8508 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8511 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8516 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8519void SelectionDAGBuilder::visitVPGather(
8523 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8535 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8537 *Alignment, AAInfo, Ranges);
8547 EVT IdxVT =
Index.getValueType();
8553 LD =
DAG.getGatherVP(
8554 DAG.getVTList(VT, MVT::Other), VT,
DL,
8555 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8561void SelectionDAGBuilder::visitVPStore(
8565 EVT VT = OpValues[0].getValueType();
8570 Alignment =
DAG.getEVTAlign(VT);
8573 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8576 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8577 MachinePointerInfo(PtrOperand), MMOFlags,
8586void SelectionDAGBuilder::visitVPScatter(
8589 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8591 EVT VT = OpValues[0].getValueType();
8601 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8603 *Alignment, AAInfo);
8613 EVT IdxVT =
Index.getValueType();
8619 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8620 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8621 OpValues[2], OpValues[3]},
8627void SelectionDAGBuilder::visitVPStridedLoad(
8639 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8641 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8644 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8646 *Alignment, AAInfo, Ranges);
8648 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8649 OpValues[2], OpValues[3], MMO,
8657void SelectionDAGBuilder::visitVPStridedStore(
8661 EVT VT = OpValues[0].getValueType();
8667 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8670 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8672 *Alignment, AAInfo);
8676 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8684void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8685 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8696 if (TM.Options.NoNaNsFPMath)
8709 "Unexpected target EVL type");
8712 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8715 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8718void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8726 return visitVPCmp(*CmpI);
8729 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8731 SDVTList VTs =
DAG.getVTList(ValueVTs);
8737 "Unexpected target EVL type");
8741 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8743 if (
I == EVLParamPos)
8750 SDNodeFlags SDFlags;
8758 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8760 case ISD::VP_LOAD_FF:
8761 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8763 case ISD::VP_GATHER:
8764 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8766 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8767 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8770 visitVPStore(VPIntrin, OpValues);
8772 case ISD::VP_SCATTER:
8773 visitVPScatter(VPIntrin, OpValues);
8775 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8776 visitVPStridedStore(VPIntrin, OpValues);
8778 case ISD::VP_FMULADD: {
8779 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8780 SDNodeFlags SDFlags;
8785 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8788 ISD::VP_FMUL,
DL, VTs,
8789 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8791 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8792 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8797 case ISD::VP_IS_FPCLASS: {
8798 const DataLayout DLayout =
DAG.getDataLayout();
8800 auto Constant = OpValues[1]->getAsZExtVal();
8803 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8807 case ISD::VP_INTTOPTR: {
8818 case ISD::VP_PTRTOINT: {
8820 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8833 case ISD::VP_CTLZ_ZERO_UNDEF:
8835 case ISD::VP_CTTZ_ZERO_UNDEF:
8836 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8837 case ISD::VP_CTTZ_ELTS: {
8839 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8849 MachineFunction &MF =
DAG.getMachineFunction();
8857 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8858 if (CallSiteIndex) {
8872 assert(BeginLabel &&
"BeginLabel should've been set");
8874 MachineFunction &MF =
DAG.getMachineFunction();
8886 assert(
II &&
"II should've been set");
8897std::pair<SDValue, SDValue>
8911 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8914 "Non-null chain expected with non-tail call!");
8915 assert((Result.second.getNode() || !Result.first.getNode()) &&
8916 "Null value expected with tail call!");
8918 if (!Result.second.getNode()) {
8925 PendingExports.clear();
8927 DAG.setRoot(Result.second);
8945 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8953 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
8954 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8963 bool isTailCall,
bool isMustTailCall,
8966 auto &
DL =
DAG.getDataLayout();
8973 const Value *SwiftErrorVal =
nullptr;
8980 const Value *V = *
I;
8983 if (V->getType()->isEmptyTy())
8988 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9000 Args.push_back(Entry);
9011 Value *V = Bundle->Inputs[0];
9013 Entry.IsCFGuardTarget =
true;
9014 Args.push_back(Entry);
9027 "Target doesn't support calls with kcfi operand bundles.");
9035 auto *Token = Bundle->Inputs[0].get();
9036 ConvControlToken =
getValue(Token);
9042 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9054 "This target doesn't support calls with ptrauth operand bundles.");
9058 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9060 if (Result.first.getNode()) {
9074 DAG.setRoot(CopyNode);
9090 LoadTy, Builder.DAG.getDataLayout()))
9091 return Builder.getValue(LoadCst);
9097 bool ConstantMemory =
false;
9100 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9101 Root = Builder.DAG.getEntryNode();
9102 ConstantMemory =
true;
9105 Root = Builder.DAG.getRoot();
9110 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr,
9113 if (!ConstantMemory)
9114 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9120void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9123 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9134bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9135 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9136 const Value *
Size =
I.getArgOperand(2);
9139 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9145 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9149 if (Res.first.getNode()) {
9150 processIntegerCallValue(
I, Res.first,
true);
9164 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9165 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9187 switch (NumBitsToCompare) {
9199 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9212 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9213 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9217 processIntegerCallValue(
I, Cmp,
false);
9226bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9227 const Value *Src =
I.getArgOperand(0);
9228 const Value *
Char =
I.getArgOperand(1);
9229 const Value *
Length =
I.getArgOperand(2);
9231 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9232 std::pair<SDValue, SDValue> Res =
9235 MachinePointerInfo(Src));
9236 if (Res.first.getNode()) {
9250bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9255 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9256 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9258 Align Alignment = std::min(DstAlign, SrcAlign);
9267 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9268 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9269 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9271 "** memcpy should not be lowered as TailCall in mempcpy context **");
9275 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9288bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9289 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9291 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9292 std::pair<SDValue, SDValue> Res =
9295 MachinePointerInfo(Arg0),
9296 MachinePointerInfo(Arg1), isStpcpy);
9297 if (Res.first.getNode()) {
9299 DAG.setRoot(Res.second);
9311bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9312 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9314 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9315 std::pair<SDValue, SDValue> Res =
9318 MachinePointerInfo(Arg0),
9319 MachinePointerInfo(Arg1));
9320 if (Res.first.getNode()) {
9321 processIntegerCallValue(
I, Res.first,
true);
9334bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9335 const Value *Arg0 =
I.getArgOperand(0);
9337 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9338 std::pair<SDValue, SDValue> Res =
9340 getValue(Arg0), MachinePointerInfo(Arg0));
9341 if (Res.first.getNode()) {
9342 processIntegerCallValue(
I, Res.first,
false);
9355bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9356 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9358 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9359 std::pair<SDValue, SDValue> Res =
9362 MachinePointerInfo(Arg0));
9363 if (Res.first.getNode()) {
9364 processIntegerCallValue(
I, Res.first,
false);
9377bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9380 if (!
I.onlyReadsMemory())
9397bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9400 if (!
I.onlyReadsMemory())
9413void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9415 if (
I.isInlineAsm()) {
9422 if (Function *
F =
I.getCalledFunction()) {
9423 if (
F->isDeclaration()) {
9425 if (
unsigned IID =
F->getIntrinsicID()) {
9426 visitIntrinsicCall(
I, IID);
9435 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9436 F->hasName() &&
LibInfo->getLibFunc(*
F, Func) &&
9437 LibInfo->hasOptimizedCodeGen(Func)) {
9441 if (visitMemCmpBCmpCall(
I))
9444 case LibFunc_copysign:
9445 case LibFunc_copysignf:
9446 case LibFunc_copysignl:
9449 if (
I.onlyReadsMemory()) {
9460 if (visitUnaryFloatCall(
I, ISD::FABS))
9466 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9472 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9475 case LibFunc_fminimum_num:
9476 case LibFunc_fminimum_numf:
9477 case LibFunc_fminimum_numl:
9478 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9481 case LibFunc_fmaximum_num:
9482 case LibFunc_fmaximum_numf:
9483 case LibFunc_fmaximum_numl:
9484 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9490 if (visitUnaryFloatCall(
I, ISD::FSIN))
9496 if (visitUnaryFloatCall(
I, ISD::FCOS))
9502 if (visitUnaryFloatCall(
I, ISD::FTAN))
9508 if (visitUnaryFloatCall(
I, ISD::FASIN))
9514 if (visitUnaryFloatCall(
I, ISD::FACOS))
9520 if (visitUnaryFloatCall(
I, ISD::FATAN))
9524 case LibFunc_atan2f:
9525 case LibFunc_atan2l:
9526 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9532 if (visitUnaryFloatCall(
I, ISD::FSINH))
9538 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9544 if (visitUnaryFloatCall(
I, ISD::FTANH))
9550 case LibFunc_sqrt_finite:
9551 case LibFunc_sqrtf_finite:
9552 case LibFunc_sqrtl_finite:
9553 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9557 case LibFunc_floorf:
9558 case LibFunc_floorl:
9559 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9562 case LibFunc_nearbyint:
9563 case LibFunc_nearbyintf:
9564 case LibFunc_nearbyintl:
9565 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9571 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9577 if (visitUnaryFloatCall(
I, ISD::FRINT))
9581 case LibFunc_roundf:
9582 case LibFunc_roundl:
9583 if (visitUnaryFloatCall(
I, ISD::FROUND))
9587 case LibFunc_truncf:
9588 case LibFunc_truncl:
9589 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9595 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9601 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9605 case LibFunc_exp10f:
9606 case LibFunc_exp10l:
9607 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9611 case LibFunc_ldexpf:
9612 case LibFunc_ldexpl:
9613 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9616 case LibFunc_memcmp:
9617 if (visitMemCmpBCmpCall(
I))
9620 case LibFunc_mempcpy:
9621 if (visitMemPCpyCall(
I))
9624 case LibFunc_memchr:
9625 if (visitMemChrCall(
I))
9628 case LibFunc_strcpy:
9629 if (visitStrCpyCall(
I,
false))
9632 case LibFunc_stpcpy:
9633 if (visitStrCpyCall(
I,
true))
9636 case LibFunc_strcmp:
9637 if (visitStrCmpCall(
I))
9640 case LibFunc_strlen:
9641 if (visitStrLenCall(
I))
9644 case LibFunc_strnlen:
9645 if (visitStrNLenCall(
I))
9669 if (
I.hasDeoptState())
9686 const Value *Discriminator = PAB->Inputs[1];
9688 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9689 assert(Discriminator->getType()->isIntegerTy(64) &&
9690 "Invalid ptrauth discriminator");
9695 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9696 DAG.getDataLayout()))
9736 for (
const auto &Code : Codes)
9751 SDISelAsmOperandInfo &MatchingOpInfo,
9753 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9759 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9761 OpInfo.ConstraintVT);
9762 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9764 MatchingOpInfo.ConstraintVT);
9765 const bool OutOpIsIntOrFP =
9766 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9767 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9768 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9769 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9772 " with a matching output constraint of"
9773 " incompatible type!");
9775 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9782 SDISelAsmOperandInfo &OpInfo,
9795 const Value *OpVal = OpInfo.CallOperandVal;
9813 DL.getPrefTypeAlign(Ty),
false,
9816 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9819 OpInfo.CallOperand = StackSlot;
9832static std::optional<unsigned>
9834 SDISelAsmOperandInfo &OpInfo,
9835 SDISelAsmOperandInfo &RefOpInfo) {
9846 return std::nullopt;
9850 unsigned AssignedReg;
9853 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9856 return std::nullopt;
9861 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9863 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9872 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9877 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9882 OpInfo.CallOperand =
9883 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9884 OpInfo.ConstraintVT = RegVT;
9888 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9891 OpInfo.CallOperand =
9892 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9893 OpInfo.ConstraintVT = VT;
9900 if (OpInfo.isMatchingInputConstraint())
9901 return std::nullopt;
9903 EVT ValueVT = OpInfo.ConstraintVT;
9904 if (OpInfo.ConstraintVT == MVT::Other)
9908 unsigned NumRegs = 1;
9909 if (OpInfo.ConstraintVT != MVT::Other)
9924 I = std::find(
I, RC->
end(), AssignedReg);
9925 if (
I == RC->
end()) {
9928 return {AssignedReg};
9932 for (; NumRegs; --NumRegs, ++
I) {
9933 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9938 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9939 return std::nullopt;
9944 const std::vector<SDValue> &AsmNodeOperands) {
9947 for (; OperandNo; --OperandNo) {
9949 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9952 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9953 "Skipped past definitions?");
9954 CurOp +=
F.getNumOperandRegisters() + 1;
9965 explicit ExtraFlags(
const CallBase &
Call) {
9967 if (
IA->hasSideEffects())
9969 if (
IA->isAlignStack())
9976 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
9992 unsigned get()
const {
return Flags; }
10015void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10022 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10024 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10028 bool HasSideEffect =
IA->hasSideEffects();
10029 ExtraFlags ExtraInfo(
Call);
10031 for (
auto &
T : TargetConstraints) {
10032 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10033 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10035 if (OpInfo.CallOperandVal)
10036 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10038 if (!HasSideEffect)
10039 HasSideEffect = OpInfo.hasMemory(TLI);
10051 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10052 "' expects an integer constant "
10055 ExtraInfo.update(
T);
10063 if (EmitEHLabels) {
10064 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10068 if (IsCallBr || EmitEHLabels) {
10076 if (EmitEHLabels) {
10077 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10082 IA->collectAsmStrs(AsmStrs);
10085 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10093 if (OpInfo.hasMatchingInput()) {
10094 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10125 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10128 OpInfo.isIndirect =
false;
10135 !OpInfo.isIndirect) {
10136 assert((OpInfo.isMultipleAlternative ||
10138 "Can only indirectify direct input operands!");
10144 OpInfo.CallOperandVal =
nullptr;
10147 OpInfo.isIndirect =
true;
10153 std::vector<SDValue> AsmNodeOperands;
10154 AsmNodeOperands.push_back(
SDValue());
10155 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10162 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10166 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10171 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10173 SDISelAsmOperandInfo &RefOpInfo =
10174 OpInfo.isMatchingInputConstraint()
10175 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10177 const auto RegError =
10180 const MachineFunction &MF =
DAG.getMachineFunction();
10182 const char *
RegName =
TRI.getName(*RegError);
10183 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10184 "' allocated for constraint '" +
10185 Twine(OpInfo.ConstraintCode) +
10186 "' does not match required type");
10190 auto DetectWriteToReservedRegister = [&]() {
10191 const MachineFunction &MF =
DAG.getMachineFunction();
10196 emitInlineAsmError(
Call,
"write to reserved register '" +
10205 !OpInfo.isMatchingInputConstraint())) &&
10206 "Only address as input operand is allowed.");
10208 switch (OpInfo.Type) {
10214 "Failed to convert memory constraint code to constraint id.");
10218 OpFlags.setMemConstraint(ConstraintID);
10219 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10221 AsmNodeOperands.push_back(OpInfo.CallOperand);
10226 if (OpInfo.AssignedRegs.
Regs.empty()) {
10227 emitInlineAsmError(
10228 Call,
"couldn't allocate output register for constraint '" +
10229 Twine(OpInfo.ConstraintCode) +
"'");
10233 if (DetectWriteToReservedRegister())
10247 SDValue InOperandVal = OpInfo.CallOperand;
10249 if (OpInfo.isMatchingInputConstraint()) {
10254 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10255 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10256 if (OpInfo.isIndirect) {
10258 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10259 "don't know how to handle tied "
10260 "indirect register inputs");
10265 MachineFunction &MF =
DAG.getMachineFunction();
10270 MVT RegVT =
R->getSimpleValueType(0);
10271 const TargetRegisterClass *RC =
10274 :
TRI.getMinimalPhysRegClass(TiedReg);
10275 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10278 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10282 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10284 OpInfo.getMatchedOperand(), dl,
DAG,
10289 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10290 assert(
Flag.getNumOperandRegisters() == 1 &&
10291 "Unexpected number of operands");
10294 Flag.clearMemConstraint();
10295 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10296 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10298 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10309 std::vector<SDValue>
Ops;
10315 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10316 Twine(OpInfo.ConstraintCode) +
"'");
10320 emitInlineAsmError(
Call,
10321 "invalid operand for inline asm constraint '" +
10322 Twine(OpInfo.ConstraintCode) +
"'");
10328 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10335 assert((OpInfo.isIndirect ||
10337 "Operand must be indirect to be a mem!");
10340 "Memory operands expect pointer values");
10345 "Failed to convert memory constraint code to constraint id.");
10349 ResOpType.setMemConstraint(ConstraintID);
10350 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10353 AsmNodeOperands.push_back(InOperandVal);
10361 "Failed to convert memory constraint code to constraint id.");
10365 SDValue AsmOp = InOperandVal;
10369 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10375 ResOpType.setMemConstraint(ConstraintID);
10377 AsmNodeOperands.push_back(
10380 AsmNodeOperands.push_back(AsmOp);
10386 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10387 Twine(OpInfo.ConstraintCode) +
"'");
10392 if (OpInfo.isIndirect) {
10393 emitInlineAsmError(
10394 Call,
"Don't know how to handle indirect register inputs yet "
10395 "for constraint '" +
10396 Twine(OpInfo.ConstraintCode) +
"'");
10401 if (OpInfo.AssignedRegs.
Regs.empty()) {
10402 emitInlineAsmError(
Call,
10403 "couldn't allocate input reg for constraint '" +
10404 Twine(OpInfo.ConstraintCode) +
"'");
10408 if (DetectWriteToReservedRegister())
10417 0, dl,
DAG, AsmNodeOperands);
10423 if (!OpInfo.AssignedRegs.
Regs.empty())
10433 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10435 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10437 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10449 ResultTypes = StructResult->elements();
10450 else if (!CallResultType->
isVoidTy())
10451 ResultTypes =
ArrayRef(CallResultType);
10453 auto CurResultType = ResultTypes.
begin();
10454 auto handleRegAssign = [&](
SDValue V) {
10455 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10456 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10457 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10469 if (ResultVT !=
V.getValueType() &&
10472 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10473 V.getValueType().isInteger()) {
10479 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10485 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10489 if (OpInfo.AssignedRegs.
Regs.empty())
10492 switch (OpInfo.ConstraintType) {
10496 Chain, &Glue, &
Call);
10508 assert(
false &&
"Unexpected unknown constraint");
10512 if (OpInfo.isIndirect) {
10513 const Value *
Ptr = OpInfo.CallOperandVal;
10514 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10516 MachinePointerInfo(
Ptr));
10523 handleRegAssign(V);
10525 handleRegAssign(Val);
10531 if (!ResultValues.
empty()) {
10532 assert(CurResultType == ResultTypes.
end() &&
10533 "Mismatch in number of ResultTypes");
10535 "Mismatch in number of output operands in asm result");
10538 DAG.getVTList(ResultVTs), ResultValues);
10543 if (!OutChains.
empty())
10546 if (EmitEHLabels) {
10551 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10553 DAG.setRoot(Chain);
10556void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10557 const Twine &Message) {
10558 LLVMContext &Ctx = *
DAG.getContext();
10562 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10566 if (ValueVTs.
empty())
10570 for (
const EVT &VT : ValueVTs)
10571 Ops.push_back(
DAG.getUNDEF(VT));
10576void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10580 DAG.getSrcValue(
I.getArgOperand(0))));
10583void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10584 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10585 const DataLayout &
DL =
DAG.getDataLayout();
10589 DL.getABITypeAlign(
I.getType()).value());
10590 DAG.setRoot(
V.getValue(1));
10592 if (
I.getType()->isPointerTy())
10593 V =
DAG.getPtrExtOrTrunc(
10598void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10602 DAG.getSrcValue(
I.getArgOperand(0))));
10605void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10610 DAG.getSrcValue(
I.getArgOperand(0)),
10611 DAG.getSrcValue(
I.getArgOperand(1))));
10617 std::optional<ConstantRange> CR =
getRange(
I);
10619 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10622 APInt Lo = CR->getUnsignedMin();
10623 if (!
Lo.isMinValue())
10626 APInt Hi = CR->getUnsignedMax();
10627 unsigned Bits = std::max(
Hi.getActiveBits(),
10635 DAG.getValueType(SmallVT));
10636 unsigned NumVals =
Op.getNode()->getNumValues();
10642 Ops.push_back(ZExt);
10643 for (
unsigned I = 1;
I != NumVals; ++
I)
10644 Ops.push_back(
Op.getValue(
I));
10646 return DAG.getMergeValues(
Ops,
SL);
10657 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10660 Args.reserve(NumArgs);
10664 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10665 ArgI != ArgE; ++ArgI) {
10666 const Value *V =
Call->getOperand(ArgI);
10668 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10671 Entry.setAttributes(
Call, ArgI);
10672 Args.push_back(Entry);
10677 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10706 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10715 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10721void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10747 Ops.push_back(Chain);
10748 Ops.push_back(InGlue);
10755 assert(
ID.getValueType() == MVT::i64);
10757 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10758 Ops.push_back(IDConst);
10764 Ops.push_back(ShadConst);
10770 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10771 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10774 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10779 DAG.setRoot(Chain);
10782 FuncInfo.MF->getFrameInfo().setHasStackMap();
10786void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10803 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10806 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10807 SDLoc(SymbolicCallee),
10808 SymbolicCallee->getValueType(0));
10818 "Not enough arguments provided to the patchpoint intrinsic");
10821 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10825 TargetLowering::CallLoweringInfo CLI(
DAG);
10830 SDNode *CallEnd =
Result.second.getNode();
10831 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10839 "Expected a callseq node.");
10841 bool HasGlue =
Call->getGluedNode();
10866 Ops.push_back(Callee);
10872 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10873 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10876 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10881 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10892 if (IsAnyRegCC && HasDef) {
10894 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10897 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10902 NodeTys =
DAG.getVTList(ValueVTs);
10904 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10907 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
10921 if (IsAnyRegCC && HasDef) {
10924 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10930 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10933void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10935 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10938 if (
I.arg_size() > 1)
10943 SDNodeFlags SDFlags;
10947 switch (Intrinsic) {
10948 case Intrinsic::vector_reduce_fadd:
10951 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10954 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10956 case Intrinsic::vector_reduce_fmul:
10959 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10962 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10964 case Intrinsic::vector_reduce_add:
10965 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10967 case Intrinsic::vector_reduce_mul:
10968 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10970 case Intrinsic::vector_reduce_and:
10971 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10973 case Intrinsic::vector_reduce_or:
10974 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10976 case Intrinsic::vector_reduce_xor:
10977 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10979 case Intrinsic::vector_reduce_smax:
10980 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10982 case Intrinsic::vector_reduce_smin:
10983 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10985 case Intrinsic::vector_reduce_umax:
10986 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10988 case Intrinsic::vector_reduce_umin:
10989 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10991 case Intrinsic::vector_reduce_fmax:
10992 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10994 case Intrinsic::vector_reduce_fmin:
10995 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10997 case Intrinsic::vector_reduce_fmaximum:
10998 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
11000 case Intrinsic::vector_reduce_fminimum:
11001 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11014 Attrs.push_back(Attribute::SExt);
11016 Attrs.push_back(Attribute::ZExt);
11018 Attrs.push_back(Attribute::InReg);
11020 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11028std::pair<SDValue, SDValue>
11042 "Only supported for non-aggregate returns");
11045 for (
Type *Ty : RetOrigTys)
11054 RetOrigTys.
swap(OldRetOrigTys);
11055 RetVTs.
swap(OldRetVTs);
11056 Offsets.swap(OldOffsets);
11058 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11059 EVT RetVT = OldRetVTs[i];
11063 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11064 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11065 RetVTs.
append(NumRegs, RegisterVT);
11066 for (
unsigned j = 0; j != NumRegs; ++j)
11079 int DemoteStackIdx = -100;
11092 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11093 Entry.IsSRet =
true;
11094 Entry.Alignment = Alignment;
11106 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11108 if (NeedsRegBlock) {
11109 Flags.setInConsecutiveRegs();
11110 if (
I == RetVTs.
size() - 1)
11111 Flags.setInConsecutiveRegsLast();
11113 EVT VT = RetVTs[
I];
11117 for (
unsigned i = 0; i != NumRegs; ++i) {
11121 Ret.Flags.setPointer();
11122 Ret.Flags.setPointerAddrSpace(
11126 Ret.Flags.setSExt();
11128 Ret.Flags.setZExt();
11130 Ret.Flags.setInReg();
11131 CLI.
Ins.push_back(Ret);
11140 if (Arg.IsSwiftError) {
11146 CLI.
Ins.push_back(Ret);
11154 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11158 Type *FinalType = Args[i].Ty;
11159 if (Args[i].IsByVal)
11160 FinalType = Args[i].IndirectType;
11163 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11166 Type *ArgTy = OrigArgTy;
11167 if (Args[i].Ty != Args[i].OrigTy) {
11168 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11169 ArgTy = Args[i].Ty;
11174 Args[i].Node.getResNo() +
Value);
11181 Flags.setOrigAlign(OriginalAlignment);
11186 Flags.setPointer();
11189 if (Args[i].IsZExt)
11191 if (Args[i].IsSExt)
11193 if (Args[i].IsNoExt)
11195 if (Args[i].IsInReg) {
11202 Flags.setHvaStart();
11208 if (Args[i].IsSRet)
11210 if (Args[i].IsSwiftSelf)
11211 Flags.setSwiftSelf();
11212 if (Args[i].IsSwiftAsync)
11213 Flags.setSwiftAsync();
11214 if (Args[i].IsSwiftError)
11215 Flags.setSwiftError();
11216 if (Args[i].IsCFGuardTarget)
11217 Flags.setCFGuardTarget();
11218 if (Args[i].IsByVal)
11220 if (Args[i].IsByRef)
11222 if (Args[i].IsPreallocated) {
11223 Flags.setPreallocated();
11231 if (Args[i].IsInAlloca) {
11232 Flags.setInAlloca();
11241 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11242 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11243 Flags.setByValSize(FrameSize);
11246 if (
auto MA = Args[i].Alignment)
11250 }
else if (
auto MA = Args[i].Alignment) {
11253 MemAlign = OriginalAlignment;
11255 Flags.setMemAlign(MemAlign);
11256 if (Args[i].IsNest)
11259 Flags.setInConsecutiveRegs();
11262 unsigned NumParts =
11267 if (Args[i].IsSExt)
11269 else if (Args[i].IsZExt)
11274 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11279 Args[i].Ty->getPointerAddressSpace())) &&
11280 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11293 CLI.
RetZExt == Args[i].IsZExt))
11294 Flags.setReturned();
11300 for (
unsigned j = 0; j != NumParts; ++j) {
11306 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11307 if (NumParts > 1 && j == 0)
11311 if (j == NumParts - 1)
11315 CLI.
Outs.push_back(MyFlags);
11316 CLI.
OutVals.push_back(Parts[j]);
11319 if (NeedsRegBlock &&
Value == NumValues - 1)
11320 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11332 "LowerCall didn't return a valid chain!");
11334 "LowerCall emitted a return value for a tail call!");
11336 "LowerCall didn't emit the correct number of values!");
11348 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11349 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11350 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11351 "LowerCall emitted a value with the wrong type!");
11361 unsigned NumValues = RetVTs.
size();
11362 ReturnValues.
resize(NumValues);
11369 for (
unsigned i = 0; i < NumValues; ++i) {
11376 DemoteStackIdx, Offsets[i]),
11378 ReturnValues[i] = L;
11379 Chains[i] = L.getValue(1);
11386 std::optional<ISD::NodeType> AssertOp;
11391 unsigned CurReg = 0;
11392 for (
EVT VT : RetVTs) {
11398 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11406 if (ReturnValues.
empty())
11412 return std::make_pair(Res, CLI.
Chain);
11429 if (
N->getNumValues() == 1) {
11437 "Lowering returned the wrong number of results!");
11440 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11454 "Copy from a reg to the same reg!");
11455 assert(!Reg.isPhysical() &&
"Is a physreg");
11461 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11466 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11467 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11468 ExtendType = PreferredExtendIt->second;
11471 PendingExports.push_back(Chain);
11483 return A->use_empty();
11485 const BasicBlock &Entry =
A->getParent()->front();
11486 for (
const User *U :
A->users())
11495 std::pair<const AllocaInst *, const StoreInst *>>;
11507 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11509 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11510 StaticAllocas.
reserve(NumArgs * 2);
11512 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11515 V = V->stripPointerCasts();
11517 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11520 return &Iter.first->second;
11537 if (
I.isDebugOrPseudoInst())
11541 for (
const Use &U :
I.operands()) {
11542 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11543 *
Info = StaticAllocaInfo::Clobbered;
11549 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11550 *
Info = StaticAllocaInfo::Clobbered;
11553 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11554 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11560 if (*
Info != StaticAllocaInfo::Unknown)
11568 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11570 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11574 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11575 ArgCopyElisionCandidates.count(Arg)) {
11576 *
Info = StaticAllocaInfo::Clobbered;
11580 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11584 *
Info = StaticAllocaInfo::Elidable;
11585 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11590 if (ArgCopyElisionCandidates.size() == NumArgs)
11614 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11615 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11616 const AllocaInst *AI = ArgCopyIter->second.first;
11617 int FixedIndex = FINode->getIndex();
11619 int OldIndex = AllocaIndex;
11623 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11629 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11630 "greater than stack argument alignment ("
11631 <<
DebugStr(RequiredAlignment) <<
" vs "
11639 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11640 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11646 AllocaIndex = FixedIndex;
11647 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11648 for (
SDValue ArgVal : ArgVals)
11652 const StoreInst *
SI = ArgCopyIter->second.second;
11665void SelectionDAGISel::LowerArguments(
const Function &
F) {
11666 SelectionDAG &DAG =
SDB->DAG;
11667 SDLoc dl =
SDB->getCurSDLoc();
11672 if (
F.hasFnAttribute(Attribute::Naked))
11677 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11679 ISD::ArgFlagsTy
Flags;
11681 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11682 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11684 Ins.push_back(RetArg);
11692 ArgCopyElisionCandidates);
11695 for (
const Argument &Arg :
F.args()) {
11696 unsigned ArgNo = Arg.getArgNo();
11699 bool isArgValueUsed = !Arg.
use_empty();
11700 unsigned PartBase = 0;
11702 if (Arg.hasAttribute(Attribute::ByVal))
11703 FinalType = Arg.getParamByValType();
11704 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11705 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11706 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11709 EVT VT =
TLI->getValueType(
DL, ArgTy);
11710 ISD::ArgFlagsTy
Flags;
11713 Flags.setPointer();
11716 if (Arg.hasAttribute(Attribute::ZExt))
11718 if (Arg.hasAttribute(Attribute::SExt))
11720 if (Arg.hasAttribute(Attribute::InReg)) {
11727 Flags.setHvaStart();
11733 if (Arg.hasAttribute(Attribute::StructRet))
11735 if (Arg.hasAttribute(Attribute::SwiftSelf))
11736 Flags.setSwiftSelf();
11737 if (Arg.hasAttribute(Attribute::SwiftAsync))
11738 Flags.setSwiftAsync();
11739 if (Arg.hasAttribute(Attribute::SwiftError))
11740 Flags.setSwiftError();
11741 if (Arg.hasAttribute(Attribute::ByVal))
11743 if (Arg.hasAttribute(Attribute::ByRef))
11745 if (Arg.hasAttribute(Attribute::InAlloca)) {
11746 Flags.setInAlloca();
11754 if (Arg.hasAttribute(Attribute::Preallocated)) {
11755 Flags.setPreallocated();
11767 const Align OriginalAlignment(
11768 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11769 Flags.setOrigAlign(OriginalAlignment);
11772 Type *ArgMemTy =
nullptr;
11773 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11776 ArgMemTy = Arg.getPointeeInMemoryValueType();
11778 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11783 if (
auto ParamAlign = Arg.getParamStackAlign())
11784 MemAlign = *ParamAlign;
11785 else if ((ParamAlign = Arg.getParamAlign()))
11786 MemAlign = *ParamAlign;
11788 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11789 if (
Flags.isByRef())
11790 Flags.setByRefSize(MemSize);
11792 Flags.setByValSize(MemSize);
11793 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11794 MemAlign = *ParamAlign;
11796 MemAlign = OriginalAlignment;
11798 Flags.setMemAlign(MemAlign);
11800 if (Arg.hasAttribute(Attribute::Nest))
11803 Flags.setInConsecutiveRegs();
11804 if (ArgCopyElisionCandidates.count(&Arg))
11805 Flags.setCopyElisionCandidate();
11806 if (Arg.hasAttribute(Attribute::Returned))
11807 Flags.setReturned();
11809 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11810 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11811 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11812 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11813 for (
unsigned i = 0; i != NumRegs; ++i) {
11817 ISD::InputArg MyFlags(
11818 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11820 if (NumRegs > 1 && i == 0)
11821 MyFlags.Flags.setSplit();
11824 MyFlags.Flags.setOrigAlign(
Align(1));
11825 if (i == NumRegs - 1)
11826 MyFlags.Flags.setSplitEnd();
11828 Ins.push_back(MyFlags);
11830 if (NeedsRegBlock &&
Value == NumValues - 1)
11831 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11838 SDValue NewRoot =
TLI->LowerFormalArguments(
11839 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11843 "LowerFormalArguments didn't return a valid chain!");
11845 "LowerFormalArguments didn't emit the correct number of values!");
11847 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11849 "LowerFormalArguments emitted a null value!");
11851 "LowerFormalArguments emitted a value with the wrong type!");
11863 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11864 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11865 std::optional<ISD::NodeType> AssertOp;
11868 F.getCallingConv(), AssertOp);
11870 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11871 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11873 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11874 FuncInfo->DemoteRegister = SRetReg;
11876 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11884 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11885 for (
const Argument &Arg :
F.args()) {
11889 unsigned NumValues = ValueVTs.
size();
11890 if (NumValues == 0)
11897 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11898 unsigned NumParts = 0;
11899 for (EVT VT : ValueVTs)
11900 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
11901 F.getCallingConv(), VT);
11905 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11910 bool isSwiftErrorArg =
11911 TLI->supportSwiftError() &&
11912 Arg.hasAttribute(Attribute::SwiftError);
11913 if (!ArgHasUses && !isSwiftErrorArg) {
11914 SDB->setUnusedArgValue(&Arg, InVals[i]);
11917 if (FrameIndexSDNode *FI =
11919 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11922 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11923 EVT VT = ValueVTs[Val];
11924 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
11925 F.getCallingConv(), VT);
11926 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
11927 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11932 if (ArgHasUses || isSwiftErrorArg) {
11933 std::optional<ISD::NodeType> AssertOp;
11934 if (Arg.hasAttribute(Attribute::SExt))
11936 else if (Arg.hasAttribute(Attribute::ZExt))
11941 NewRoot,
F.getCallingConv(), AssertOp);
11944 if (NoFPClass !=
fcNone) {
11946 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
11948 OutVal, SDNoFPClass);
11957 if (ArgValues.
empty())
11961 if (FrameIndexSDNode *FI =
11963 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11966 SDB->getCurSDLoc());
11968 SDB->setValue(&Arg, Res);
11978 if (LoadSDNode *LNode =
11980 if (FrameIndexSDNode *FI =
11982 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12010 FuncInfo->InitializeRegForValue(&Arg);
12011 SDB->CopyToExportRegsIfNeeded(&Arg);
12015 if (!Chains.
empty()) {
12022 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12026 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12027 for (MachineFunction::VariableDbgInfo &VI :
12028 MF->getInStackSlotVariableDbgInfo()) {
12029 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12030 if (
I != ArgCopyElisionFrameIndexMap.
end())
12031 VI.updateStackSlot(
I->second);
12046SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12047 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12049 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12055 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12059 if (!SuccsHandled.
insert(SuccMBB).second)
12067 for (
const PHINode &PN : SuccBB->phis()) {
12069 if (PN.use_empty())
12073 if (PN.getType()->isEmptyTy())
12077 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12082 RegOut =
FuncInfo.CreateRegs(&PN);
12100 "Didn't codegen value into a register!??");
12110 for (EVT VT : ValueVTs) {
12112 for (
unsigned i = 0; i != NumRegisters; ++i)
12114 Reg += NumRegisters;
12134void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12136 if (MaybeTC.
getNode() !=
nullptr)
12137 DAG.setRoot(MaybeTC);
12142void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12145 MachineFunction *CurMF =
FuncInfo.MF;
12146 MachineBasicBlock *NextMBB =
nullptr;
12151 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12153 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12155 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12163 CaseCluster &
Small = *
W.FirstCluster;
12164 CaseCluster &
Big = *
W.LastCluster;
12168 const APInt &SmallValue =
Small.Low->getValue();
12169 const APInt &BigValue =
Big.Low->getValue();
12172 APInt CommonBit = BigValue ^ SmallValue;
12179 DAG.getConstant(CommonBit,
DL, VT));
12181 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12187 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12189 addSuccessorWithProb(
12190 SwitchMBB, DefaultMBB,
12194 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12201 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12202 DAG.getBasicBlock(DefaultMBB));
12204 DAG.setRoot(BrCond);
12216 [](
const CaseCluster &a,
const CaseCluster &b) {
12217 return a.Prob != b.Prob ?
12219 a.Low->getValue().slt(b.Low->getValue());
12226 if (
I->Prob >
W.LastCluster->Prob)
12228 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12236 BranchProbability DefaultProb =
W.DefaultProb;
12237 BranchProbability UnhandledProbs = DefaultProb;
12239 UnhandledProbs +=
I->Prob;
12241 MachineBasicBlock *CurMBB =
W.MBB;
12243 bool FallthroughUnreachable =
false;
12244 MachineBasicBlock *Fallthrough;
12245 if (
I ==
W.LastCluster) {
12247 Fallthrough = DefaultMBB;
12252 CurMF->
insert(BBI, Fallthrough);
12256 UnhandledProbs -=
I->Prob;
12261 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12262 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12265 MachineBasicBlock *JumpMBB =
JT->MBB;
12266 CurMF->
insert(BBI, JumpMBB);
12268 auto JumpProb =
I->Prob;
12269 auto FallthroughProb = UnhandledProbs;
12277 if (*SI == DefaultMBB) {
12278 JumpProb += DefaultProb / 2;
12279 FallthroughProb -= DefaultProb / 2;
12297 if (FallthroughUnreachable) {
12304 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12305 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12311 JT->Default = Fallthrough;
12314 if (CurMBB == SwitchMBB) {
12322 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12325 for (BitTestCase &BTC : BTB->
Cases)
12337 BTB->
Prob += DefaultProb / 2;
12341 if (FallthroughUnreachable)
12345 if (CurMBB == SwitchMBB) {
12352 const Value *
RHS, *
LHS, *MHS;
12354 if (
I->Low ==
I->High) {
12369 if (FallthroughUnreachable)
12373 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12376 if (CurMBB == SwitchMBB)
12379 SL->SwitchCases.push_back(CB);
12384 CurMBB = Fallthrough;
12388void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12389 const SwitchWorkListItem &W,
12392 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12393 "Clusters not sorted?");
12394 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12396 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12397 SL->computeSplitWorkItemInfo(W);
12402 assert(PivotCluster >
W.FirstCluster);
12403 assert(PivotCluster <=
W.LastCluster);
12408 const ConstantInt *Pivot = PivotCluster->Low;
12417 MachineBasicBlock *LeftMBB;
12418 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12419 FirstLeft->Low ==
W.GE &&
12420 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12421 LeftMBB = FirstLeft->MBB;
12423 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12424 FuncInfo.MF->insert(BBI, LeftMBB);
12426 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12434 MachineBasicBlock *RightMBB;
12435 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12436 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12437 RightMBB = FirstRight->MBB;
12439 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12440 FuncInfo.MF->insert(BBI, RightMBB);
12442 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12448 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12451 if (
W.MBB == SwitchMBB)
12454 SL->SwitchCases.push_back(CB);
12479 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12487 unsigned PeeledCaseIndex = 0;
12488 bool SwitchPeeled =
false;
12489 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12490 CaseCluster &CC = Clusters[
Index];
12491 if (CC.
Prob < TopCaseProb)
12493 TopCaseProb = CC.
Prob;
12494 PeeledCaseIndex =
Index;
12495 SwitchPeeled =
true;
12500 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12501 << TopCaseProb <<
"\n");
12506 MachineBasicBlock *PeeledSwitchMBB =
12508 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12511 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12512 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12513 nullptr,
nullptr, TopCaseProb.
getCompl()};
12514 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12516 Clusters.erase(PeeledCaseIt);
12517 for (CaseCluster &CC : Clusters) {
12519 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12520 << CC.
Prob <<
"\n");
12524 PeeledCaseProb = TopCaseProb;
12525 return PeeledSwitchMBB;
12528void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12530 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12532 Clusters.reserve(
SI.getNumCases());
12533 for (
auto I :
SI.cases()) {
12534 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12535 const ConstantInt *CaseVal =
I.getCaseValue();
12536 BranchProbability Prob =
12538 : BranchProbability(1,
SI.getNumCases() + 1);
12542 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12551 MachineBasicBlock *PeeledSwitchMBB =
12552 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12555 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12556 if (Clusters.empty()) {
12557 assert(PeeledSwitchMBB == SwitchMBB);
12559 if (DefaultMBB != NextBlock(SwitchMBB)) {
12566 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12568 SL->findBitTestClusters(Clusters, &SI);
12571 dbgs() <<
"Case clusters: ";
12572 for (
const CaseCluster &
C : Clusters) {
12578 C.Low->getValue().print(
dbgs(),
true);
12579 if (
C.Low !=
C.High) {
12581 C.High->getValue().print(
dbgs(),
true);
12588 assert(!Clusters.empty());
12592 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12596 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12599 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12601 while (!WorkList.
empty()) {
12603 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12608 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12612 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12616void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12617 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12623void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12624 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12629 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12638 SmallVector<int, 8>
Mask;
12640 for (
unsigned i = 0; i != NumElts; ++i)
12641 Mask.push_back(NumElts - 1 - i);
12646void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12655 EVT OutVT = ValueVTs[0];
12659 for (
unsigned i = 0; i != Factor; ++i) {
12660 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12662 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12668 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12670 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12678 DAG.getVTList(ValueVTs), SubVecs);
12682void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12685 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12690 for (
unsigned i = 0; i < Factor; ++i) {
12693 "Expected VTs to be the same");
12711 for (
unsigned i = 0; i < Factor; ++i)
12718void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12722 unsigned NumValues = ValueVTs.
size();
12723 if (NumValues == 0)
return;
12728 for (
unsigned i = 0; i != NumValues; ++i)
12733 DAG.getVTList(ValueVTs), Values));
12736void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12737 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12749 DAG.getSignedConstant(
12756 uint64_t Idx = (NumElts +
Imm) % NumElts;
12759 SmallVector<int, 8>
Mask;
12760 for (
unsigned i = 0; i < NumElts; ++i)
12761 Mask.push_back(Idx + i);
12789 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12790 "start of copy chain MUST be COPY");
12791 Reg =
MI->getOperand(1).getReg();
12794 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12795 MI =
MRI.def_begin(
Reg)->getParent();
12798 if (
MI->getOpcode() == TargetOpcode::COPY) {
12799 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12800 Reg =
MI->getOperand(1).getReg();
12801 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12804 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12805 "end of copy chain MUST be INLINEASM_BR");
12815void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12821 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12822 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12823 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12831 for (
auto &
T : TargetConstraints) {
12832 SDISelAsmOperandInfo OpInfo(
T);
12840 switch (OpInfo.ConstraintType) {
12851 FuncInfo.MBB->addLiveIn(OriginalDef);
12859 ResultVTs.
push_back(OpInfo.ConstraintVT);
12868 ResultVTs.
push_back(OpInfo.ConstraintVT);
12876 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li)
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.experimental.vector.partial.reduce.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)