78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
113#define DEBUG_TYPE "isel"
121 cl::desc(
"Insert the experimental `assertalign` node."),
126 cl::desc(
"Generate low-precision inline sequences "
127 "for some float libcalls"),
133 cl::desc(
"Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
154 const SDValue *Parts,
unsigned NumParts,
157 std::optional<CallingConv::ID> CC);
166 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
173 PartVT, ValueVT, CC))
180 assert(NumParts > 0 &&
"No parts to assemble!");
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
198 if (RoundParts > 2) {
202 PartVT, HalfVT, V, InChain);
204 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
205 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
213 if (RoundParts < NumParts) {
215 unsigned OddParts = NumParts - RoundParts;
218 OddVT, V, InChain, CC);
234 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
245 !PartVT.
isVector() &&
"Unexpected split");
257 if (PartEVT == ValueVT)
261 ValueVT.
bitsLT(PartEVT)) {
270 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
274 if (ValueVT.
bitsLT(PartEVT)) {
279 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
294 llvm::Attribute::StrictFP)) {
296 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
303 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
308 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
309 ValueVT.
bitsLT(PartEVT)) {
310 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
318 const Twine &ErrMsg) {
321 return Ctx.emitError(ErrMsg);
324 if (CI->isInlineAsm()) {
326 *CI, ErrMsg +
", possible invalid constraint for vector type"));
329 return Ctx.emitError(
I, ErrMsg);
338 const SDValue *Parts,
unsigned NumParts,
341 std::optional<CallingConv::ID> CallConv) {
343 assert(NumParts > 0 &&
"No parts to assemble!");
344 const bool IsABIRegCopy = CallConv.has_value();
353 unsigned NumIntermediates;
358 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
359 NumIntermediates, RegisterVT);
363 NumIntermediates, RegisterVT);
366 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
368 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
371 "Part type sizes don't match!");
375 if (NumIntermediates == NumParts) {
378 for (
unsigned i = 0; i != NumParts; ++i)
380 V, InChain, CallConv);
381 }
else if (NumParts > 0) {
384 assert(NumParts % NumIntermediates == 0 &&
385 "Must expand into a divisible number of parts!");
386 unsigned Factor = NumParts / NumIntermediates;
387 for (
unsigned i = 0; i != NumIntermediates; ++i)
389 IntermediateVT, V, InChain, CallConv);
404 DL, BuiltVectorTy,
Ops);
410 if (PartEVT == ValueVT)
416 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
426 "Cannot narrow, it would be a lossy transformation");
432 if (PartEVT == ValueVT)
435 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
439 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
450 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
456 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 }
else if (ValueVT.
bitsLT(PartEVT)) {
466 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
475 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
497 std::optional<CallingConv::ID> CallConv);
504 unsigned NumParts,
MVT PartVT,
const Value *V,
505 std::optional<CallingConv::ID> CallConv = std::nullopt,
519 unsigned OrigNumParts = NumParts;
521 "Copying to an illegal type!");
527 EVT PartEVT = PartVT;
528 if (PartEVT == ValueVT) {
529 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
538 assert(NumParts == 1 &&
"Do not know what to promote to!");
539 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
545 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
549 "Unknown mismatch!");
551 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
552 if (PartVT == MVT::x86mmx)
553 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
557 assert(NumParts == 1 && PartEVT != ValueVT);
558 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
563 "Unknown mismatch!");
566 if (PartVT == MVT::x86mmx)
567 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
573 "Failed to tile the value with PartVT!");
576 if (PartEVT != ValueVT) {
578 "scalar-to-vector conversion failed");
579 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
587 if (NumParts & (NumParts - 1)) {
590 "Do not know what to expand to!");
592 unsigned RoundBits = RoundParts * PartBits;
593 unsigned OddParts = NumParts - RoundParts;
602 std::reverse(Parts + RoundParts, Parts + NumParts);
604 NumParts = RoundParts;
616 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617 for (
unsigned i = 0; i < NumParts; i += StepSize) {
618 unsigned ThisBits = StepSize * PartBits / 2;
621 SDValue &Part1 = Parts[i+StepSize/2];
628 if (ThisBits == PartBits && ThisVT != PartVT) {
629 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
630 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
636 std::reverse(Parts, Parts + OrigNumParts);
658 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660 "Cannot widen to illegal type");
663 }
else if (PartEVT != ValueEVT) {
678 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
689 std::optional<CallingConv::ID> CallConv) {
693 const bool IsABIRegCopy = CallConv.has_value();
696 EVT PartEVT = PartVT;
697 if (PartEVT == ValueVT) {
701 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
736 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
743 "lossy conversion of vector to scalar type");
758 unsigned NumIntermediates;
762 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
767 NumIntermediates, RegisterVT);
770 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
772 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
775 "Mixing scalable and fixed vectors when copying in parts");
777 std::optional<ElementCount> DestEltCnt;
787 if (ValueVT == BuiltVectorTy) {
791 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
811 for (
unsigned i = 0; i != NumIntermediates; ++i) {
826 if (NumParts == NumIntermediates) {
829 for (
unsigned i = 0; i != NumParts; ++i)
831 }
else if (NumParts > 0) {
834 assert(NumIntermediates != 0 &&
"division by zero");
835 assert(NumParts % NumIntermediates == 0 &&
836 "Must expand into a divisible number of parts!");
837 unsigned Factor = NumParts / NumIntermediates;
838 for (
unsigned i = 0; i != NumIntermediates; ++i)
846 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
850 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
853 OS << LS << U.getTagName();
856 Twine(
"cannot lower ", Name)
862 EVT valuevt, std::optional<CallingConv::ID> CC)
868 std::optional<CallingConv::ID> CC) {
882 for (
unsigned i = 0; i != NumRegs; ++i)
883 Regs.push_back(Reg + i);
884 RegVTs.push_back(RegisterVT);
886 Reg = Reg.id() + NumRegs;
913 for (
unsigned i = 0; i != NumRegs; ++i) {
919 *Glue =
P.getValue(2);
922 Chain =
P.getValue(1);
950 EVT FromVT(MVT::Other);
954 }
else if (NumSignBits > 1) {
962 assert(FromVT != MVT::Other);
968 RegisterVT, ValueVT, V, Chain,
CallConv);
984 unsigned NumRegs =
Regs.size();
998 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1004 for (
unsigned i = 0; i != NumRegs; ++i) {
1016 if (NumRegs == 1 || Glue)
1027 Chain = Chains[NumRegs-1];
1033 unsigned MatchingIdx,
const SDLoc &dl,
1035 std::vector<SDValue> &
Ops)
const {
1040 Flag.setMatchingOp(MatchingIdx);
1041 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1049 Flag.setRegClass(RC->
getID());
1060 "No 1:1 mapping from clobbers to regs?");
1063 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1068 "If we clobbered the stack pointer, MFI should know about it.");
1077 for (
unsigned i = 0; i != NumRegs; ++i) {
1078 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1090 unsigned RegCount = std::get<0>(CountAndVT);
1091 MVT RegisterVT = std::get<1>(CountAndVT);
1108 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1110 *
DAG.getMachineFunction().getFunction().getParent());
1115 UnusedArgNodeMap.clear();
1117 PendingExports.clear();
1118 PendingConstrainedFP.clear();
1119 PendingConstrainedFPStrict.clear();
1127 DanglingDebugInfoMap.clear();
1134 if (Pending.
empty())
1140 unsigned i = 0, e = Pending.
size();
1141 for (; i != e; ++i) {
1143 if (Pending[i].
getNode()->getOperand(0) == Root)
1151 if (Pending.
size() == 1)
1170 PendingConstrainedFP.size() +
1171 PendingConstrainedFPStrict.size());
1173 PendingConstrainedFP.end());
1174 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1175 PendingConstrainedFPStrict.end());
1176 PendingConstrainedFP.clear();
1177 PendingConstrainedFPStrict.clear();
1184 PendingExports.append(PendingConstrainedFPStrict.begin(),
1185 PendingConstrainedFPStrict.end());
1186 PendingConstrainedFPStrict.clear();
1187 return updateRoot(PendingExports);
1194 assert(Variable &&
"Missing variable");
1201 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1217 if (IsParameter && FINode) {
1219 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1220 true,
DL, SDNodeOrder);
1225 FuncArgumentDbgValueKind::Declare,
N);
1228 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1229 true,
DL, SDNodeOrder);
1231 DAG.AddDbgValue(SDV, IsParameter);
1236 FuncArgumentDbgValueKind::Declare,
N)) {
1238 <<
" (could not emit func-arg dbg_value)\n");
1249 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1251 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1253 if (It->Values.isKillLocation(It->Expr)) {
1259 It->Values.hasArgList())) {
1262 FnVarLocs->getDILocalVariable(It->VariableID),
1263 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1276 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1279 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1281 assert(DLR->getLabel() &&
"Missing label");
1283 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1284 DAG.AddDbgLabel(SDV);
1288 if (SkipDbgVariableRecords)
1296 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1298 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1307 if (Values.
empty()) {
1324 SDNodeOrder, IsVariadic)) {
1335 if (
I.isTerminator()) {
1336 HandlePHINodesInSuccessorBlocks(
I.getParent());
1343 bool NodeInserted =
false;
1344 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1345 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1346 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1347 if (PCSectionsMD || MMRA) {
1348 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1349 DAG, [&](
SDNode *) { NodeInserted =
true; });
1359 if (PCSectionsMD || MMRA) {
1360 auto It = NodeMap.find(&
I);
1361 if (It != NodeMap.end()) {
1363 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1365 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1366 }
else if (NodeInserted) {
1369 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1370 <<
I.getModule()->getName() <<
"]\n";
1379void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1389#define HANDLE_INST(NUM, OPCODE, CLASS) \
1390 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1391#include "llvm/IR/Instruction.def"
1403 for (
const Value *V : Values) {
1428 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1433 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1434 DIVariable *DanglingVariable = DDI.getVariable();
1436 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1438 << printDDI(
nullptr, DDI) <<
"\n");
1444 for (
auto &DDIMI : DanglingDebugInfoMap) {
1445 DanglingDebugInfoVector &DDIV = DDIMI.second;
1449 for (
auto &DDI : DDIV)
1450 if (isMatchingDbgValue(DDI))
1453 erase_if(DDIV, isMatchingDbgValue);
1461 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1462 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1465 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1466 for (
auto &DDI : DDIV) {
1469 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1472 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1473 "Expected inlined-at fields to agree");
1482 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1483 FuncArgumentDbgValueKind::Value, Val)) {
1485 << printDDI(V, DDI) <<
"\n");
1492 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1493 << ValSDNodeOrder <<
"\n");
1494 SDV = getDbgValue(Val, Variable, Expr,
DL,
1495 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1496 DAG.AddDbgValue(SDV,
false);
1500 <<
" in EmitFuncArgumentDbgValue\n");
1502 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1506 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1507 DAG.AddDbgValue(SDV,
false);
1514 DanglingDebugInfo &DDI) {
1519 const Value *OrigV = V;
1523 unsigned SDOrder = DDI.getSDNodeOrder();
1527 bool StackValue =
true;
1552 if (!AdditionalValues.
empty())
1562 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1563 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1571 assert(OrigV &&
"V shouldn't be null");
1573 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1574 DAG.AddDbgValue(SDV,
false);
1576 << printDDI(OrigV, DDI) <<
"\n");
1593 unsigned Order,
bool IsVariadic) {
1598 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1603 for (
const Value *V : Values) {
1613 if (CE->getOpcode() == Instruction::IntToPtr) {
1632 N = UnusedArgNodeMap[V];
1637 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1638 FuncArgumentDbgValueKind::Value,
N))
1665 bool IsParamOfFunc =
1673 auto VMI =
FuncInfo.ValueMap.find(V);
1674 if (VMI !=
FuncInfo.ValueMap.end()) {
1679 V->getType(), std::nullopt);
1685 unsigned BitsToDescribe = 0;
1687 BitsToDescribe = *VarSize;
1689 BitsToDescribe = Fragment->SizeInBits;
1692 if (
Offset >= BitsToDescribe)
1695 unsigned RegisterSize = RegAndSize.second;
1696 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1697 ? BitsToDescribe -
Offset
1700 Expr,
Offset, FragmentSize);
1704 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1705 DAG.AddDbgValue(SDV,
false);
1721 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1722 false, DbgLoc, Order, IsVariadic);
1723 DAG.AddDbgValue(SDV,
false);
1729 for (
auto &Pair : DanglingDebugInfoMap)
1730 for (
auto &DDI : Pair.second)
1741 if (It !=
FuncInfo.ValueMap.end()) {
1745 DAG.getDataLayout(), InReg, Ty,
1762 if (
N.getNode())
return N;
1822 return DAG.getSplatBuildVector(
1825 return DAG.getConstant(*CI,
DL, VT);
1834 getValue(CPA->getAddrDiscriminator()),
1835 getValue(CPA->getDiscriminator()));
1851 visit(CE->getOpcode(), *CE);
1853 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1859 for (
const Use &U :
C->operands()) {
1865 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1866 Constants.push_back(
SDValue(Val, i));
1875 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1879 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1888 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1890 "Unknown struct or array constant!");
1894 unsigned NumElts = ValueVTs.
size();
1898 for (
unsigned i = 0; i != NumElts; ++i) {
1899 EVT EltVT = ValueVTs[i];
1901 Constants[i] =
DAG.getUNDEF(EltVT);
1912 return DAG.getBlockAddress(BA, VT);
1915 return getValue(Equiv->getGlobalValue());
1920 if (VT == MVT::aarch64svcount) {
1921 assert(
C->isNullValue() &&
"Can only zero this target type!");
1927 assert(
C->isNullValue() &&
"Can only zero this target type!");
1944 for (
unsigned i = 0; i != NumElements; ++i)
1972 return DAG.getFrameIndex(
1981 Inst->getType(), std::nullopt);
1995void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2008 if (IsMSVCCXX || IsCoreCLR)
2014 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2015 FuncInfo.MBB->addSuccessor(TargetMBB);
2022 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2031 DAG.getMachineFunction().setHasEHContTarget(
true);
2037 Value *ParentPad =
I.getCatchSwitchParentPad();
2040 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2043 assert(SuccessorColor &&
"No parent funclet for catchret!");
2044 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2045 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2050 DAG.getBasicBlock(SuccessorColorMBB));
2054void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2060 FuncInfo.MBB->setIsEHFuncletEntry();
2061 FuncInfo.MBB->setIsCleanupFuncletEntry();
2090 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2096 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2097 UnwindDests.back().first->setIsEHScopeEntry();
2100 UnwindDests.back().first->setIsEHFuncletEntry();
2104 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2105 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2107 if (IsMSVCCXX || IsCoreCLR)
2108 UnwindDests.back().first->setIsEHFuncletEntry();
2110 UnwindDests.back().first->setIsEHScopeEntry();
2112 NewEHPadBB = CatchSwitch->getUnwindDest();
2118 if (BPI && NewEHPadBB)
2120 EHPadBB = NewEHPadBB;
2127 auto UnwindDest =
I.getUnwindDest();
2128 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2129 BranchProbability UnwindDestProb =
2134 for (
auto &UnwindDest : UnwindDests) {
2135 UnwindDest.first->setIsEHPad();
2136 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2138 FuncInfo.MBB->normalizeSuccProbs();
2141 MachineBasicBlock *CleanupPadMBB =
2142 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2148void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2152void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2153 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2154 auto &
DL =
DAG.getDataLayout();
2166 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2183 SmallVector<uint64_t, 4>
Offsets;
2186 unsigned NumValues = ValueVTs.
size();
2189 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2190 for (
unsigned i = 0; i != NumValues; ++i) {
2197 if (MemVTs[i] != ValueVTs[i])
2199 Chains[i] =
DAG.getStore(
2207 MVT::Other, Chains);
2208 }
else if (
I.getNumOperands() != 0) {
2211 unsigned NumValues =
Types.size();
2215 const Function *
F =
I.getParent()->getParent();
2218 I.getOperand(0)->getType(),
F->getCallingConv(),
2222 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2224 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2227 LLVMContext &
Context =
F->getContext();
2228 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2230 for (
unsigned j = 0;
j != NumValues; ++
j) {
2243 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2246 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2250 if (
I.getOperand(0)->getType()->isPointerTy()) {
2252 Flags.setPointerAddrSpace(
2256 if (NeedsRegBlock) {
2257 Flags.setInConsecutiveRegs();
2258 if (j == NumValues - 1)
2259 Flags.setInConsecutiveRegsLast();
2267 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2270 for (
unsigned i = 0; i < NumParts; ++i) {
2273 VT, Types[j], 0, 0));
2283 const Function *
F =
I.getParent()->getParent();
2285 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2287 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2288 Flags.setSwiftError();
2300 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2302 DAG.getMachineFunction().getFunction().getCallingConv();
2303 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2308 "LowerReturn didn't return a valid chain!");
2319 if (V->getType()->isEmptyTy())
2323 if (VMI !=
FuncInfo.ValueMap.end()) {
2325 "Unused value assigned virtual registers!");
2338 if (
FuncInfo.isExportedInst(V))
return;
2350 if (VI->getParent() == FromBB)
2376 const BasicBlock *SrcBB = Src->getBasicBlock();
2377 const BasicBlock *DstBB = Dst->getBasicBlock();
2381 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2391 Src->addSuccessorWithoutProb(Dst);
2394 Prob = getEdgeProbability(Src, Dst);
2395 Src->addSuccessor(Dst, Prob);
2401 return I->getParent() == BB;
2425 if (CurBB == SwitchBB ||
2431 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2436 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2438 if (TM.Options.NoNaNsFPMath)
2442 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2444 SL->SwitchCases.push_back(CB);
2453 SL->SwitchCases.push_back(CB);
2461 unsigned Depth = 0) {
2470 if (Necessary !=
nullptr) {
2473 if (Necessary->contains(
I))
2492 if (
I.getNumSuccessors() != 2)
2495 if (!
I.isConditional())
2507 if (BPI !=
nullptr) {
2513 std::optional<bool> Likely;
2516 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2520 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2532 if (CostThresh <= 0)
2550 const auto &TLI =
DAG.getTargetLoweringInfo();
2557 Value *BrCond =
I.getCondition();
2558 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2559 for (
const auto *U : Ins->users()) {
2562 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2575 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2577 for (
const auto &InsPair : RhsDeps) {
2578 if (!ShouldCountInsn(InsPair.first)) {
2579 ToDrop = InsPair.first;
2583 if (ToDrop ==
nullptr)
2585 RhsDeps.erase(ToDrop);
2588 for (
const auto &InsPair : RhsDeps) {
2596 if (CostOfIncluding > CostThresh)
2622 const Value *BOpOp0, *BOpOp1;
2636 if (BOpc == Instruction::And)
2637 BOpc = Instruction::Or;
2638 else if (BOpc == Instruction::Or)
2639 BOpc = Instruction::And;
2645 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2650 TProb, FProb, InvertCond);
2660 if (
Opc == Instruction::Or) {
2681 auto NewTrueProb = TProb / 2;
2682 auto NewFalseProb = TProb / 2 + FProb;
2685 NewFalseProb, InvertCond);
2692 Probs[1], InvertCond);
2694 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2714 auto NewTrueProb = TProb + FProb / 2;
2715 auto NewFalseProb = FProb / 2;
2718 NewFalseProb, InvertCond);
2725 Probs[1], InvertCond);
2734 if (Cases.size() != 2)
return true;
2738 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2739 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2740 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2741 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2747 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2748 Cases[0].CC == Cases[1].CC &&
2751 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2753 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2760void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2766 if (
I.isUnconditional()) {
2772 if (Succ0MBB != NextBlock(BrMBB) ||
2785 const Value *CondVal =
I.getCondition();
2786 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2805 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2807 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2810 const Value *BOp0, *BOp1;
2813 Opcode = Instruction::And;
2815 Opcode = Instruction::Or;
2822 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2823 Opcode, BOp0, BOp1))) {
2825 getEdgeProbability(BrMBB, Succ0MBB),
2826 getEdgeProbability(BrMBB, Succ1MBB),
2831 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2835 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2842 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2848 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2849 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2851 SL->SwitchCases.clear();
2857 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2878 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2885 auto &TLI =
DAG.getTargetLoweringInfo();
2909 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2921 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2925 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2926 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2941 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2957 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2960 DAG.setRoot(BrCond);
2966 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2967 assert(JT.Reg &&
"Should lower JT Header first!");
2968 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
2970 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
2971 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2972 Index.getValue(1), Table, Index);
2973 DAG.setRoot(BrJumpTable);
2981 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2982 const SDLoc &dl = *JT.SL;
2988 DAG.getConstant(JTH.
First, dl, VT));
3003 JT.Reg = JumpTableReg;
3011 Sub.getValueType()),
3014 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3015 MVT::Other, CopyTo, CMP,
3016 DAG.getBasicBlock(JT.Default));
3019 if (JT.MBB != NextBlock(SwitchBB))
3020 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3021 DAG.getBasicBlock(JT.MBB));
3023 DAG.setRoot(BrCond);
3026 if (JT.MBB != NextBlock(SwitchBB))
3027 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3028 DAG.getBasicBlock(JT.MBB)));
3030 DAG.setRoot(CopyTo);
3053 if (PtrTy != PtrMemTy)
3069 auto &
DL =
DAG.getDataLayout();
3078 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3085 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3098 assert(GuardCheckFn &&
"Guard check function is null");
3109 Entry.IsInReg =
true;
3110 Args.push_back(Entry);
3116 getValue(GuardCheckFn), std::move(Args));
3118 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3119 DAG.setRoot(Result.second);
3132 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3143 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3176 auto &
DL =
DAG.getDataLayout();
3184 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3190 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3205 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3206 Entry.IsInReg =
true;
3207 Args.push_back(Entry);
3213 getValue(GuardCheckFn), std::move(Args));
3219 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3227 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3242 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3246 bool UsePtrType =
false;
3270 if (!
B.FallthroughUnreachable)
3271 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3272 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3276 if (!
B.FallthroughUnreachable) {
3284 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3285 DAG.getBasicBlock(
B.Default));
3289 if (
MBB != NextBlock(SwitchBB))
3290 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3307 if (PopCount == 1) {
3314 }
else if (PopCount == BB.
Range) {
3322 DAG.getConstant(1, dl, VT), ShiftOp);
3326 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3333 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3335 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3343 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3346 if (NextMBB != NextBlock(SwitchBB))
3347 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3348 DAG.getBasicBlock(NextMBB));
3353void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3371 const Value *Callee(
I.getCalledOperand());
3374 visitInlineAsm(
I, EHPadBB);
3379 case Intrinsic::donothing:
3381 case Intrinsic::seh_try_begin:
3382 case Intrinsic::seh_scope_begin:
3383 case Intrinsic::seh_try_end:
3384 case Intrinsic::seh_scope_end:
3390 case Intrinsic::experimental_patchpoint_void:
3391 case Intrinsic::experimental_patchpoint:
3392 visitPatchpoint(
I, EHPadBB);
3394 case Intrinsic::experimental_gc_statepoint:
3400 case Intrinsic::wasm_throw: {
3402 std::array<SDValue, 4>
Ops = {
3413 case Intrinsic::wasm_rethrow: {
3414 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3415 std::array<SDValue, 2>
Ops = {
3424 }
else if (
I.hasDeoptState()) {
3445 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3446 BranchProbability EHPadBBProb =
3452 addSuccessorWithProb(InvokeMBB, Return);
3453 for (
auto &UnwindDest : UnwindDests) {
3454 UnwindDest.first->setIsEHPad();
3455 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3461 DAG.getBasicBlock(Return)));
3464void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3465 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3472 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3477 SmallPtrSet<BasicBlock *, 8> Dests;
3478 Dests.
insert(
I.getDefaultDest());
3483 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3486 Target->setIsInlineAsmBrIndirectTarget();
3492 Target->setLabelMustBeEmitted();
3494 if (Dests.
insert(Dest).second)
3502 DAG.getBasicBlock(Return)));
3505void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3506 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3509void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3511 "Call to landingpad not in landing pad!");
3515 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3531 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3536 if (
FuncInfo.ExceptionPointerVirtReg) {
3537 Ops[0] =
DAG.getZExtOrTrunc(
3538 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3545 Ops[1] =
DAG.getZExtOrTrunc(
3546 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3553 DAG.getVTList(ValueVTs),
Ops);
3561 if (JTB.first.HeaderBB ==
First)
3562 JTB.first.HeaderBB =
Last;
3575 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3577 bool Inserted =
Done.insert(BB).second;
3582 addSuccessorWithProb(IndirectBrMBB, Succ);
3592 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3593 DAG.getTarget().Options.NoTrapAfterNoreturn))
3599void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3602 Flags.copyFMF(*FPOp);
3610void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3613 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3614 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3617 Flags.setExact(ExactOp->isExact());
3619 Flags.setDisjoint(DisjointOp->isDisjoint());
3621 Flags.copyFMF(*FPOp);
3630void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3634 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3639 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3641 "Unexpected shift type");
3651 if (
const OverflowingBinaryOperator *OFBinOp =
3653 nuw = OFBinOp->hasNoUnsignedWrap();
3654 nsw = OFBinOp->hasNoSignedWrap();
3656 if (
const PossiblyExactOperator *ExactOp =
3658 exact = ExactOp->isExact();
3661 Flags.setExact(exact);
3662 Flags.setNoSignedWrap(nsw);
3663 Flags.setNoUnsignedWrap(nuw);
3669void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3680void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3686 auto &TLI =
DAG.getTargetLoweringInfo();
3699 Flags.setSameSign(
I.hasSameSign());
3700 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3702 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3707void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3714 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3718 Flags.copyFMF(*FPMO);
3719 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3721 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3730 return isa<SelectInst>(V);
3734void SelectionDAGBuilder::visitSelect(
const User &
I) {
3738 unsigned NumValues = ValueVTs.
size();
3739 if (NumValues == 0)
return;
3749 bool IsUnaryAbs =
false;
3750 bool Negate =
false;
3754 Flags.copyFMF(*FPOp);
3756 Flags.setUnpredictable(
3761 EVT VT = ValueVTs[0];
3762 LLVMContext &Ctx = *
DAG.getContext();
3763 auto &TLI =
DAG.getTargetLoweringInfo();
3773 bool UseScalarMinMax = VT.
isVector() &&
3782 switch (SPR.Flavor) {
3788 switch (SPR.NaNBehavior) {
3801 switch (SPR.NaNBehavior) {
3845 for (
unsigned i = 0; i != NumValues; ++i) {
3851 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3854 for (
unsigned i = 0; i != NumValues; ++i) {
3858 Values[i] =
DAG.getNode(
3865 DAG.getVTList(ValueVTs), Values));
3868void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3871 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3875 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3876 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3882void SelectionDAGBuilder::visitZExt(
const User &
I) {
3886 auto &TLI =
DAG.getTargetLoweringInfo();
3891 Flags.setNonNeg(PNI->hasNonNeg());
3896 if (
Flags.hasNonNeg() &&
3905void SelectionDAGBuilder::visitSExt(
const User &
I) {
3909 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3914void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3920 Flags.copyFMF(*TruncInst);
3921 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3924 DAG.getTargetConstant(
3929void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3932 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3937void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3940 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3945void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3948 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3953void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3956 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3960 Flags.setNonNeg(PNI->hasNonNeg());
3965void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3968 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3973void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
3978void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3982 auto &TLI =
DAG.getTargetLoweringInfo();
3983 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3992void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3996 auto &TLI =
DAG.getTargetLoweringInfo();
4004void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4007 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4012 if (DestVT !=
N.getValueType())
4020 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4026void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4027 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4028 const Value *SV =
I.getOperand(0);
4033 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4035 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4041void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4042 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4049 InVec, InVal, InIdx));
4052void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4053 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4062void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4067 Mask = SVI->getShuffleMask();
4071 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4075 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4080 DAG.getVectorIdxConstant(0,
DL));
4091 unsigned MaskNumElts =
Mask.size();
4093 if (SrcNumElts == MaskNumElts) {
4099 if (SrcNumElts < MaskNumElts) {
4103 if (MaskNumElts % SrcNumElts == 0) {
4107 unsigned NumConcat = MaskNumElts / SrcNumElts;
4108 bool IsConcat =
true;
4109 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4110 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4116 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4117 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4118 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4123 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4130 for (
auto Src : ConcatSrcs) {
4143 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4144 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4160 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4161 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4163 if (Idx >= (
int)SrcNumElts)
4164 Idx -= SrcNumElts - PaddedMaskNumElts;
4172 if (MaskNumElts != PaddedMaskNumElts)
4174 DAG.getVectorIdxConstant(0,
DL));
4180 assert(SrcNumElts > MaskNumElts);
4184 int StartIdx[2] = {-1, -1};
4185 bool CanExtract =
true;
4186 for (
int Idx : Mask) {
4191 if (Idx >= (
int)SrcNumElts) {
4199 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4200 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4201 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4205 StartIdx[Input] = NewStartIdx;
4208 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4214 for (
unsigned Input = 0; Input < 2; ++Input) {
4215 SDValue &Src = Input == 0 ? Src1 : Src2;
4216 if (StartIdx[Input] < 0)
4217 Src =
DAG.getUNDEF(VT);
4220 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4225 SmallVector<int, 8> MappedOps(Mask);
4226 for (
int &Idx : MappedOps) {
4227 if (Idx >= (
int)SrcNumElts)
4228 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4233 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4242 for (
int Idx : Mask) {
4246 Res =
DAG.getUNDEF(EltVT);
4248 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4249 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4252 DAG.getVectorIdxConstant(Idx,
DL));
4262 ArrayRef<unsigned> Indices =
I.getIndices();
4263 const Value *Op0 =
I.getOperand(0);
4265 Type *AggTy =
I.getType();
4272 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4278 unsigned NumAggValues = AggValueVTs.
size();
4279 unsigned NumValValues = ValValueVTs.
size();
4283 if (!NumAggValues) {
4291 for (; i != LinearIndex; ++i)
4292 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4297 for (; i != LinearIndex + NumValValues; ++i)
4298 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4302 for (; i != NumAggValues; ++i)
4303 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4307 DAG.getVTList(AggValueVTs), Values));
4311 ArrayRef<unsigned> Indices =
I.getIndices();
4312 const Value *Op0 =
I.getOperand(0);
4314 Type *ValTy =
I.getType();
4319 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4323 unsigned NumValValues = ValValueVTs.
size();
4326 if (!NumValValues) {
4335 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4336 Values[i - LinearIndex] =
4342 DAG.getVTList(ValValueVTs), Values));
4345void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4346 Value *Op0 =
I.getOperand(0);
4352 auto &TLI =
DAG.getTargetLoweringInfo();
4357 bool IsVectorGEP =
I.getType()->isVectorTy();
4358 ElementCount VectorElementCount =
4364 const Value *Idx = GTI.getOperand();
4365 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4370 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4379 N =
DAG.getMemBasePlusOffset(
4380 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4386 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4388 TypeSize ElementSize =
4389 GTI.getSequentialElementStride(
DAG.getDataLayout());
4394 bool ElementScalable = ElementSize.
isScalable();
4400 C =
C->getSplatValue();
4403 if (CI && CI->isZero())
4405 if (CI && !ElementScalable) {
4406 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4409 if (
N.getValueType().isVector())
4410 OffsVal =
DAG.getConstant(
4413 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4420 Flags.setNoUnsignedWrap(
true);
4422 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4424 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4432 if (
N.getValueType().isVector()) {
4434 VectorElementCount);
4435 IdxN =
DAG.getSplat(VT, dl, IdxN);
4439 N =
DAG.getSplat(VT, dl,
N);
4445 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4447 SDNodeFlags ScaleFlags;
4456 if (ElementScalable) {
4457 EVT VScaleTy =
N.getValueType().getScalarType();
4459 ISD::VSCALE, dl, VScaleTy,
4460 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4461 if (
N.getValueType().isVector())
4462 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4463 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4468 if (ElementMul != 1) {
4469 if (ElementMul.isPowerOf2()) {
4470 unsigned Amt = ElementMul.logBase2();
4473 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4476 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4478 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4488 SDNodeFlags AddFlags;
4491 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4495 if (IsVectorGEP && !
N.getValueType().isVector()) {
4497 N =
DAG.getSplat(VT, dl,
N);
4508 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4513void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4520 Type *Ty =
I.getAllocatedType();
4521 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4522 auto &
DL =
DAG.getDataLayout();
4523 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4524 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4530 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4533 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4534 DAG.getVScale(dl, IntPtr,
4540 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4541 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4547 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4548 if (*Alignment <= StackAlign)
4549 Alignment = std::nullopt;
4551 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4556 DAG.getConstant(StackAlignMask, dl, IntPtr),
4561 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4565 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4567 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4575 return I.getMetadata(LLVMContext::MD_range);
4580 if (std::optional<ConstantRange> CR = CB->getRange())
4584 return std::nullopt;
4587void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4589 return visitAtomicLoad(
I);
4591 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4592 const Value *SV =
I.getOperand(0);
4597 if (Arg->hasSwiftErrorAttr())
4598 return visitLoadFromSwiftError(
I);
4602 if (Alloca->isSwiftError())
4603 return visitLoadFromSwiftError(
I);
4609 Type *Ty =
I.getType();
4613 unsigned NumValues = ValueVTs.
size();
4617 Align Alignment =
I.getAlign();
4618 AAMDNodes AAInfo =
I.getAAMetadata();
4620 bool isVolatile =
I.isVolatile();
4625 bool ConstantMemory =
false;
4632 BatchAA->pointsToConstantMemory(MemoryLocation(
4637 Root =
DAG.getEntryNode();
4638 ConstantMemory =
true;
4642 Root =
DAG.getRoot();
4653 unsigned ChainI = 0;
4654 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4670 MachinePointerInfo PtrInfo =
4672 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4673 : MachinePointerInfo();
4676 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4677 MMOFlags, AAInfo, Ranges);
4678 Chains[ChainI] =
L.getValue(1);
4680 if (MemVTs[i] != ValueVTs[i])
4681 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4686 if (!ConstantMemory) {
4696 DAG.getVTList(ValueVTs), Values));
4699void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4700 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4701 "call visitStoreToSwiftError when backend supports swifterror");
4704 SmallVector<uint64_t, 4>
Offsets;
4705 const Value *SrcV =
I.getOperand(0);
4707 SrcV->
getType(), ValueVTs, &Offsets, 0);
4708 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4709 "expect a single EVT for swifterror");
4718 SDValue(Src.getNode(), Src.getResNo()));
4719 DAG.setRoot(CopyNode);
4722void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4723 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4724 "call visitLoadFromSwiftError when backend supports swifterror");
4727 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4728 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4729 "Support volatile, non temporal, invariant for load_from_swift_error");
4731 const Value *SV =
I.getOperand(0);
4732 Type *Ty =
I.getType();
4735 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4737 I.getAAMetadata()))) &&
4738 "load_from_swift_error should not be constant memory");
4741 SmallVector<uint64_t, 4>
Offsets;
4743 ValueVTs, &Offsets, 0);
4744 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4745 "expect a single EVT for swifterror");
4755void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4757 return visitAtomicStore(
I);
4759 const Value *SrcV =
I.getOperand(0);
4760 const Value *PtrV =
I.getOperand(1);
4762 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4767 if (Arg->hasSwiftErrorAttr())
4768 return visitStoreToSwiftError(
I);
4772 if (Alloca->isSwiftError())
4773 return visitStoreToSwiftError(
I);
4780 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4781 unsigned NumValues = ValueVTs.
size();
4794 Align Alignment =
I.getAlign();
4795 AAMDNodes AAInfo =
I.getAAMetadata();
4799 unsigned ChainI = 0;
4800 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4810 MachinePointerInfo PtrInfo =
4812 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4813 : MachinePointerInfo();
4817 if (MemVTs[i] != ValueVTs[i])
4818 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4820 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4821 Chains[ChainI] = St;
4827 DAG.setRoot(StoreNode);
4830void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4831 bool IsCompressing) {
4834 auto getMaskedStoreOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
4837 Src0 =
I.getArgOperand(0);
4838 Ptr =
I.getArgOperand(1);
4840 Mask =
I.getArgOperand(3);
4842 auto getCompressingStoreOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
4845 Src0 =
I.getArgOperand(0);
4846 Ptr =
I.getArgOperand(1);
4847 Mask =
I.getArgOperand(2);
4848 Alignment =
I.getParamAlign(1).valueOrOne();
4851 Value *PtrOperand, *MaskOperand, *Src0Operand;
4854 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4856 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4866 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4869 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4870 MachinePointerInfo(PtrOperand), MMOFlags,
4873 const auto &TLI =
DAG.getTargetLoweringInfo();
4878 I.getArgOperand(0)->getType(),
true)
4884 DAG.setRoot(StoreNode);
4910 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4914 C =
C->getSplatValue();
4928 if (!
GEP ||
GEP->getParent() != CurBB)
4931 if (
GEP->getNumOperands() != 2)
4934 const Value *BasePtr =
GEP->getPointerOperand();
4935 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4941 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4946 if (ScaleVal != 1 &&
4958void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4962 const Value *
Ptr =
I.getArgOperand(1);
4967 ->getMaybeAlignValue()
4969 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4977 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4978 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4988 EVT IdxVT =
Index.getValueType();
4996 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
4998 DAG.setRoot(Scatter);
5002void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5005 auto getMaskedLoadOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
5008 Ptr =
I.getArgOperand(0);
5010 Mask =
I.getArgOperand(2);
5011 Src0 =
I.getArgOperand(3);
5013 auto getExpandingLoadOps = [&](Value *&
Ptr, Value *&
Mask, Value *&Src0,
5016 Ptr =
I.getArgOperand(0);
5017 Alignment =
I.getParamAlign(0).valueOrOne();
5018 Mask =
I.getArgOperand(1);
5019 Src0 =
I.getArgOperand(2);
5022 Value *PtrOperand, *MaskOperand, *Src0Operand;
5025 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5027 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5035 AAMDNodes AAInfo =
I.getAAMetadata();
5042 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5045 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5048 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5049 MachinePointerInfo(PtrOperand), MMOFlags,
5052 const auto &TLI =
DAG.getTargetLoweringInfo();
5064 DAG.getMaskedLoad(VT, sdl, InChain,
Ptr,
Offset, Mask, Src0, VT, MMO,
5071void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5075 const Value *
Ptr =
I.getArgOperand(0);
5079 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5082 ->getMaybeAlignValue()
5093 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5094 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5106 EVT IdxVT =
Index.getValueType();
5115 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5131 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5133 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5136 MachineFunction &MF =
DAG.getMachineFunction();
5138 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5139 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5142 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5143 dl, MemVT, VTs, InChain,
5151 DAG.setRoot(OutChain);
5154void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5157 switch (
I.getOperation()) {
5175 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5178 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5181 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5184 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5187 NT = ISD::ATOMIC_LOAD_USUB_COND;
5190 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5199 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5202 MachineFunction &MF =
DAG.getMachineFunction();
5204 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5205 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5208 DAG.getAtomic(NT, dl, MemVT, InChain,
5215 DAG.setRoot(OutChain);
5218void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5220 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5223 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5225 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5232void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5239 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5250 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5251 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5252 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5262 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5265 DAG.setRoot(OutChain);
5268void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5276 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5286 MachineFunction &MF =
DAG.getMachineFunction();
5288 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5289 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5293 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5297 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val,
Ptr, MMO);
5300 DAG.setRoot(OutChain);
5305void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5311 bool HasChain = !
F->doesNotAccessMemory();
5313 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5320 Ops.push_back(
DAG.getRoot());
5327 TargetLowering::IntrinsicInfo
Info;
5328 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5330 DAG.getMachineFunction(),
5340 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5341 const Value *Arg =
I.getArgOperand(i);
5342 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5350 assert(CI->getBitWidth() <= 64 &&
5351 "large intrinsic immediates not handled");
5352 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5365 SDVTList VTs =
DAG.getVTList(ValueVTs);
5370 Flags.copyFMF(*FPMO);
5371 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5377 auto *Token = Bundle->Inputs[0].get();
5379 assert(
Ops.back().getValueType() != MVT::Glue &&
5380 "Did not expected another glue node here.");
5382 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5383 Ops.push_back(ConvControlToken);
5388 if (IsTgtIntrinsic) {
5393 MachinePointerInfo MPI;
5395 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5396 else if (
Info.fallbackAddressSpace)
5397 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5398 EVT MemVT =
Info.memVT;
5400 if (
Size.hasValue() && !
Size.getValue())
5402 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5403 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5404 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5408 }
else if (!HasChain) {
5410 }
else if (!
I.getType()->isVoidTy()) {
5424 if (!
I.getType()->isVoidTy()) {
5428 MaybeAlign Alignment =
I.getRetAlign();
5451 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5492 SDValue TwoToFractionalPartOfX;
5560 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5561 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5569 if (
Op.getValueType() == MVT::f32 &&
5584 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5593 if (
Op.getValueType() == MVT::f32 &&
5683 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5692 if (
Op.getValueType() == MVT::f32 &&
5776 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5780 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5789 if (
Op.getValueType() == MVT::f32 &&
5866 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5870 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5877 if (
Op.getValueType() == MVT::f32 &&
5882 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5890 bool IsExp10 =
false;
5891 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5895 IsExp10 = LHSC->isExactlyValue(Ten);
5922 unsigned Val = RHSC->getSExtValue();
5951 CurSquare, CurSquare);
5956 if (RHSC->getSExtValue() < 0)
5970 EVT VT =
LHS.getValueType();
5993 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5997 Opcode, VT, ScaleInt);
6032 switch (
N.getOpcode()) {
6036 Op.getValueType().getSizeInBits());
6061bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6068 MachineFunction &MF =
DAG.getMachineFunction();
6069 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6073 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6078 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6085 auto *NewDIExpr = FragExpr;
6092 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6095 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6096 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6100 if (Kind == FuncArgumentDbgValueKind::Value) {
6105 if (!IsInEntryBlock)
6121 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6122 !
DL->getInlinedAt();
6124 if (!IsInPrologue && !VariableIsFunctionInputArg)
6158 if (VariableIsFunctionInputArg) {
6160 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6161 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6162 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6163 return !NodeMap[
V].getNode();
6168 bool IsIndirect =
false;
6169 std::optional<MachineOperand>
Op;
6171 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6172 if (FI != std::numeric_limits<int>::max())
6176 if (!
Op &&
N.getNode()) {
6179 if (ArgRegsAndSizes.
size() == 1)
6180 Reg = ArgRegsAndSizes.
front().first;
6183 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6190 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6194 if (!
Op &&
N.getNode()) {
6198 if (FrameIndexSDNode *FINode =
6208 for (
const auto &RegAndSize : SplitRegs) {
6212 int RegFragmentSizeInBits = RegAndSize.second;
6214 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6217 if (
Offset >= ExprFragmentSizeInBits)
6221 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6222 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6227 Expr,
Offset, RegFragmentSizeInBits);
6228 Offset += RegAndSize.second;
6231 if (!FragmentExpr) {
6232 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6234 DAG.AddDbgValue(SDV,
false);
6237 MachineInstr *NewMI =
6238 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6239 Kind != FuncArgumentDbgValueKind::Value);
6240 FuncInfo.ArgDbgValues.push_back(NewMI);
6247 if (VMI !=
FuncInfo.ValueMap.end()) {
6248 const auto &TLI =
DAG.getTargetLoweringInfo();
6249 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6250 V->getType(), std::nullopt);
6251 if (RFV.occupiesMultipleRegs()) {
6252 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6257 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6258 }
else if (ArgRegsAndSizes.
size() > 1) {
6261 splitMultiRegDbgValue(ArgRegsAndSizes);
6270 "Expected inlined-at fields to agree");
6271 MachineInstr *NewMI =
nullptr;
6274 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6276 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6280 FuncInfo.ArgDbgValues.push_back(NewMI);
6289 unsigned DbgSDNodeOrder) {
6301 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6302 false, dl, DbgSDNodeOrder);
6304 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6305 false, dl, DbgSDNodeOrder);
6310 case Intrinsic::smul_fix:
6312 case Intrinsic::umul_fix:
6314 case Intrinsic::smul_fix_sat:
6316 case Intrinsic::umul_fix_sat:
6318 case Intrinsic::sdiv_fix:
6320 case Intrinsic::udiv_fix:
6322 case Intrinsic::sdiv_fix_sat:
6324 case Intrinsic::udiv_fix_sat:
6337 "expected call_preallocated_setup Value");
6338 for (
const auto *U : PreallocatedSetup->
users()) {
6340 const Function *Fn = UseCall->getCalledFunction();
6341 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6351bool SelectionDAGBuilder::visitEntryValueDbgValue(
6361 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6362 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6364 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6365 "couldn't find an associated register for the Argument\n");
6368 Register ArgVReg = ArgIt->getSecond();
6370 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6371 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6372 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6373 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6374 DAG.AddDbgValue(SDV,
false );
6377 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6378 "couldn't find a physical register\n");
6383void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6386 switch (Intrinsic) {
6387 case Intrinsic::experimental_convergence_anchor:
6388 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6390 case Intrinsic::experimental_convergence_entry:
6391 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6393 case Intrinsic::experimental_convergence_loop: {
6395 auto *Token = Bundle->Inputs[0].get();
6396 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6403void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6404 unsigned IntrinsicID) {
6407 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6408 "Tried to lower unsupported histogram type");
6410 Value *
Ptr =
I.getOperand(0);
6414 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6415 DataLayout TargetDL =
DAG.getDataLayout();
6417 Align Alignment =
DAG.getEVTAlign(VT);
6428 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6430 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6431 MachinePointerInfo(AS),
6442 EVT IdxVT =
Index.getValueType();
6449 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6452 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6456 DAG.setRoot(Histogram);
6459void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6461 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6462 "Tried lowering invalid vector extract last");
6464 const DataLayout &Layout =
DAG.getDataLayout();
6468 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6472 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6478 EVT BoolVT =
Mask.getValueType().getScalarType();
6479 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6480 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6487void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6489 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6496 Flags.copyFMF(*FPOp);
6498 switch (Intrinsic) {
6501 visitTargetIntrinsic(
I, Intrinsic);
6503 case Intrinsic::vscale: {
6508 case Intrinsic::vastart: visitVAStart(
I);
return;
6509 case Intrinsic::vaend: visitVAEnd(
I);
return;
6510 case Intrinsic::vacopy: visitVACopy(
I);
return;
6511 case Intrinsic::returnaddress:
6516 case Intrinsic::addressofreturnaddress:
6521 case Intrinsic::sponentry:
6526 case Intrinsic::frameaddress:
6531 case Intrinsic::read_volatile_register:
6532 case Intrinsic::read_register: {
6533 Value *
Reg =
I.getArgOperand(0);
6539 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6544 case Intrinsic::write_register: {
6545 Value *
Reg =
I.getArgOperand(0);
6546 Value *RegValue =
I.getArgOperand(1);
6554 case Intrinsic::memcpy:
6555 case Intrinsic::memcpy_inline: {
6561 "memcpy_inline needs constant size");
6563 Align DstAlign = MCI.getDestAlign().valueOrOne();
6564 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6565 Align Alignment = std::min(DstAlign, SrcAlign);
6566 bool isVol = MCI.isVolatile();
6570 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6571 MCI.isForceInlined(), &
I, std::nullopt,
6572 MachinePointerInfo(
I.getArgOperand(0)),
6573 MachinePointerInfo(
I.getArgOperand(1)),
6575 updateDAGForMaybeTailCall(MC);
6578 case Intrinsic::memset:
6579 case Intrinsic::memset_inline: {
6585 "memset_inline needs constant size");
6587 Align DstAlign = MSII.getDestAlign().valueOrOne();
6588 bool isVol = MSII.isVolatile();
6591 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6592 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6593 updateDAGForMaybeTailCall(MC);
6596 case Intrinsic::memmove: {
6602 Align DstAlign = MMI.getDestAlign().valueOrOne();
6603 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6604 Align Alignment = std::min(DstAlign, SrcAlign);
6605 bool isVol = MMI.isVolatile();
6609 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6611 MachinePointerInfo(
I.getArgOperand(0)),
6612 MachinePointerInfo(
I.getArgOperand(1)),
6614 updateDAGForMaybeTailCall(MM);
6617 case Intrinsic::memcpy_element_unordered_atomic: {
6623 Type *LengthTy =
MI.getLength()->getType();
6624 unsigned ElemSz =
MI.getElementSizeInBytes();
6628 isTC, MachinePointerInfo(
MI.getRawDest()),
6629 MachinePointerInfo(
MI.getRawSource()));
6630 updateDAGForMaybeTailCall(MC);
6633 case Intrinsic::memmove_element_unordered_atomic: {
6639 Type *LengthTy =
MI.getLength()->getType();
6640 unsigned ElemSz =
MI.getElementSizeInBytes();
6644 isTC, MachinePointerInfo(
MI.getRawDest()),
6645 MachinePointerInfo(
MI.getRawSource()));
6646 updateDAGForMaybeTailCall(MC);
6649 case Intrinsic::memset_element_unordered_atomic: {
6655 Type *LengthTy =
MI.getLength()->getType();
6656 unsigned ElemSz =
MI.getElementSizeInBytes();
6660 isTC, MachinePointerInfo(
MI.getRawDest()));
6661 updateDAGForMaybeTailCall(MC);
6664 case Intrinsic::call_preallocated_setup: {
6666 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6667 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6673 case Intrinsic::call_preallocated_arg: {
6675 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6682 ISD::PREALLOCATED_ARG, sdl,
6689 case Intrinsic::eh_typeid_for: {
6692 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6693 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6698 case Intrinsic::eh_return_i32:
6699 case Intrinsic::eh_return_i64:
6700 DAG.getMachineFunction().setCallsEHReturn(
true);
6707 case Intrinsic::eh_unwind_init:
6708 DAG.getMachineFunction().setCallsUnwindInit(
true);
6710 case Intrinsic::eh_dwarf_cfa:
6715 case Intrinsic::eh_sjlj_callsite: {
6717 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6722 case Intrinsic::eh_sjlj_functioncontext: {
6724 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6727 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6731 case Intrinsic::eh_sjlj_setjmp: {
6736 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6738 DAG.setRoot(
Op.getValue(1));
6741 case Intrinsic::eh_sjlj_longjmp:
6745 case Intrinsic::eh_sjlj_setup_dispatch:
6749 case Intrinsic::masked_gather:
6750 visitMaskedGather(
I);
6752 case Intrinsic::masked_load:
6755 case Intrinsic::masked_scatter:
6756 visitMaskedScatter(
I);
6758 case Intrinsic::masked_store:
6759 visitMaskedStore(
I);
6761 case Intrinsic::masked_expandload:
6762 visitMaskedLoad(
I,
true );
6764 case Intrinsic::masked_compressstore:
6765 visitMaskedStore(
I,
true );
6767 case Intrinsic::powi:
6771 case Intrinsic::log:
6774 case Intrinsic::log2:
6778 case Intrinsic::log10:
6782 case Intrinsic::exp:
6785 case Intrinsic::exp2:
6789 case Intrinsic::pow:
6793 case Intrinsic::sqrt:
6794 case Intrinsic::fabs:
6795 case Intrinsic::sin:
6796 case Intrinsic::cos:
6797 case Intrinsic::tan:
6798 case Intrinsic::asin:
6799 case Intrinsic::acos:
6800 case Intrinsic::atan:
6801 case Intrinsic::sinh:
6802 case Intrinsic::cosh:
6803 case Intrinsic::tanh:
6804 case Intrinsic::exp10:
6805 case Intrinsic::floor:
6806 case Intrinsic::ceil:
6807 case Intrinsic::trunc:
6808 case Intrinsic::rint:
6809 case Intrinsic::nearbyint:
6810 case Intrinsic::round:
6811 case Intrinsic::roundeven:
6812 case Intrinsic::canonicalize: {
6815 switch (Intrinsic) {
6817 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6818 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6819 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6820 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6821 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6822 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6823 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6824 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6825 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6826 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6827 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6828 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6829 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6830 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6831 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6832 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6833 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6834 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6835 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6841 getValue(
I.getArgOperand(0)).getValueType(),
6845 case Intrinsic::atan2:
6847 getValue(
I.getArgOperand(0)).getValueType(),
6851 case Intrinsic::lround:
6852 case Intrinsic::llround:
6853 case Intrinsic::lrint:
6854 case Intrinsic::llrint: {
6857 switch (Intrinsic) {
6859 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6860 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6861 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6862 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6871 case Intrinsic::minnum:
6873 getValue(
I.getArgOperand(0)).getValueType(),
6877 case Intrinsic::maxnum:
6879 getValue(
I.getArgOperand(0)).getValueType(),
6883 case Intrinsic::minimum:
6885 getValue(
I.getArgOperand(0)).getValueType(),
6889 case Intrinsic::maximum:
6891 getValue(
I.getArgOperand(0)).getValueType(),
6895 case Intrinsic::minimumnum:
6897 getValue(
I.getArgOperand(0)).getValueType(),
6901 case Intrinsic::maximumnum:
6903 getValue(
I.getArgOperand(0)).getValueType(),
6907 case Intrinsic::copysign:
6909 getValue(
I.getArgOperand(0)).getValueType(),
6913 case Intrinsic::ldexp:
6915 getValue(
I.getArgOperand(0)).getValueType(),
6919 case Intrinsic::modf:
6920 case Intrinsic::sincos:
6921 case Intrinsic::sincospi:
6922 case Intrinsic::frexp: {
6924 switch (Intrinsic) {
6927 case Intrinsic::sincos:
6928 Opcode = ISD::FSINCOS;
6930 case Intrinsic::sincospi:
6931 Opcode = ISD::FSINCOSPI;
6933 case Intrinsic::modf:
6934 Opcode = ISD::FMODF;
6936 case Intrinsic::frexp:
6937 Opcode = ISD::FFREXP;
6942 SDVTList VTs =
DAG.getVTList(ValueVTs);
6944 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
6947 case Intrinsic::arithmetic_fence: {
6949 getValue(
I.getArgOperand(0)).getValueType(),
6953 case Intrinsic::fma:
6959#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6960 case Intrinsic::INTRINSIC:
6961#include "llvm/IR/ConstrainedOps.def"
6964#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6965#include "llvm/IR/VPIntrinsics.def"
6968 case Intrinsic::fptrunc_round: {
6972 std::optional<RoundingMode> RoundMode =
6980 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
6985 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
6990 case Intrinsic::fmuladd: {
6995 getValue(
I.getArgOperand(0)).getValueType(),
7011 case Intrinsic::convert_to_fp16:
7015 DAG.getTargetConstant(0, sdl,
7018 case Intrinsic::convert_from_fp16:
7021 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7024 case Intrinsic::fptosi_sat: {
7031 case Intrinsic::fptoui_sat: {
7038 case Intrinsic::set_rounding:
7039 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7044 case Intrinsic::is_fpclass: {
7045 const DataLayout DLayout =
DAG.getDataLayout();
7047 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7050 MachineFunction &MF =
DAG.getMachineFunction();
7054 Flags.setNoFPExcept(
7055 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7071 case Intrinsic::get_fpenv: {
7072 const DataLayout DLayout =
DAG.getDataLayout();
7074 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7080 ISD::GET_FPENV, sdl,
7089 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7092 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7093 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7099 case Intrinsic::set_fpenv: {
7100 const DataLayout DLayout =
DAG.getDataLayout();
7103 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7108 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7116 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7118 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7121 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7126 case Intrinsic::reset_fpenv:
7127 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7129 case Intrinsic::get_fpmode:
7131 ISD::GET_FPMODE, sdl,
7138 case Intrinsic::set_fpmode:
7139 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7143 case Intrinsic::reset_fpmode: {
7144 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7148 case Intrinsic::pcmarker: {
7150 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7153 case Intrinsic::readcyclecounter: {
7155 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7156 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7161 case Intrinsic::readsteadycounter: {
7163 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7164 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7169 case Intrinsic::bitreverse:
7171 getValue(
I.getArgOperand(0)).getValueType(),
7174 case Intrinsic::bswap:
7176 getValue(
I.getArgOperand(0)).getValueType(),
7179 case Intrinsic::cttz: {
7187 case Intrinsic::ctlz: {
7195 case Intrinsic::ctpop: {
7201 case Intrinsic::fshl:
7202 case Intrinsic::fshr: {
7203 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7207 EVT VT =
X.getValueType();
7218 case Intrinsic::sadd_sat: {
7224 case Intrinsic::uadd_sat: {
7230 case Intrinsic::ssub_sat: {
7236 case Intrinsic::usub_sat: {
7242 case Intrinsic::sshl_sat: {
7248 case Intrinsic::ushl_sat: {
7254 case Intrinsic::smul_fix:
7255 case Intrinsic::umul_fix:
7256 case Intrinsic::smul_fix_sat:
7257 case Intrinsic::umul_fix_sat: {
7265 case Intrinsic::sdiv_fix:
7266 case Intrinsic::udiv_fix:
7267 case Intrinsic::sdiv_fix_sat:
7268 case Intrinsic::udiv_fix_sat: {
7273 Op1, Op2, Op3,
DAG, TLI));
7276 case Intrinsic::smax: {
7282 case Intrinsic::smin: {
7288 case Intrinsic::umax: {
7294 case Intrinsic::umin: {
7300 case Intrinsic::abs: {
7306 case Intrinsic::scmp: {
7313 case Intrinsic::ucmp: {
7320 case Intrinsic::stacksave: {
7323 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7328 case Intrinsic::stackrestore:
7330 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7332 case Intrinsic::get_dynamic_area_offset: {
7335 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7341 case Intrinsic::stackguard: {
7342 MachineFunction &MF =
DAG.getMachineFunction();
7348 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7353 MachinePointerInfo(
Global, 0), Align,
7362 case Intrinsic::stackprotector: {
7364 MachineFunction &MF =
DAG.getMachineFunction();
7384 Chain, sdl, Src, FIN,
7391 case Intrinsic::objectsize:
7394 case Intrinsic::is_constant:
7397 case Intrinsic::annotation:
7398 case Intrinsic::ptr_annotation:
7399 case Intrinsic::launder_invariant_group:
7400 case Intrinsic::strip_invariant_group:
7405 case Intrinsic::type_test:
7406 case Intrinsic::public_type_test:
7410 case Intrinsic::assume:
7411 case Intrinsic::experimental_noalias_scope_decl:
7412 case Intrinsic::var_annotation:
7413 case Intrinsic::sideeffect:
7418 case Intrinsic::codeview_annotation: {
7420 MachineFunction &MF =
DAG.getMachineFunction();
7424 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7429 case Intrinsic::init_trampoline: {
7437 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7440 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7445 case Intrinsic::adjust_trampoline:
7450 case Intrinsic::gcroot: {
7451 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7452 "only valid in functions with gc specified, enforced by Verifier");
7454 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7461 case Intrinsic::gcread:
7462 case Intrinsic::gcwrite:
7464 case Intrinsic::get_rounding:
7470 case Intrinsic::expect:
7471 case Intrinsic::expect_with_probability:
7477 case Intrinsic::ubsantrap:
7478 case Intrinsic::debugtrap:
7479 case Intrinsic::trap: {
7480 StringRef TrapFuncName =
7481 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7482 if (TrapFuncName.
empty()) {
7483 switch (Intrinsic) {
7484 case Intrinsic::trap:
7485 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7487 case Intrinsic::debugtrap:
7488 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7490 case Intrinsic::ubsantrap:
7492 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7493 DAG.getTargetConstant(
7499 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7500 I.hasFnAttr(Attribute::NoMerge));
7504 if (Intrinsic == Intrinsic::ubsantrap) {
7505 Value *Arg =
I.getArgOperand(0);
7509 TargetLowering::CallLoweringInfo CLI(
DAG);
7510 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7512 DAG.getExternalSymbol(TrapFuncName.
data(),
7515 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7521 case Intrinsic::allow_runtime_check:
7522 case Intrinsic::allow_ubsan_check:
7526 case Intrinsic::uadd_with_overflow:
7527 case Intrinsic::sadd_with_overflow:
7528 case Intrinsic::usub_with_overflow:
7529 case Intrinsic::ssub_with_overflow:
7530 case Intrinsic::umul_with_overflow:
7531 case Intrinsic::smul_with_overflow: {
7533 switch (Intrinsic) {
7535 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7536 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7537 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7538 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7539 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7540 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7546 EVT OverflowVT = MVT::i1;
7551 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7555 case Intrinsic::prefetch: {
7568 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7570 std::nullopt, Flags);
7576 DAG.setRoot(Result);
7579 case Intrinsic::lifetime_start:
7580 case Intrinsic::lifetime_end: {
7581 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7587 if (!LifetimeObject)
7592 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7593 if (SI ==
FuncInfo.StaticAllocaMap.end())
7597 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7601 case Intrinsic::pseudoprobe: {
7609 case Intrinsic::invariant_start:
7614 case Intrinsic::invariant_end:
7617 case Intrinsic::clear_cache: {
7622 {InputChain, StartVal, EndVal});
7627 case Intrinsic::donothing:
7628 case Intrinsic::seh_try_begin:
7629 case Intrinsic::seh_scope_begin:
7630 case Intrinsic::seh_try_end:
7631 case Intrinsic::seh_scope_end:
7634 case Intrinsic::experimental_stackmap:
7637 case Intrinsic::experimental_patchpoint_void:
7638 case Intrinsic::experimental_patchpoint:
7641 case Intrinsic::experimental_gc_statepoint:
7644 case Intrinsic::experimental_gc_result:
7647 case Intrinsic::experimental_gc_relocate:
7650 case Intrinsic::instrprof_cover:
7652 case Intrinsic::instrprof_increment:
7654 case Intrinsic::instrprof_timestamp:
7656 case Intrinsic::instrprof_value_profile:
7658 case Intrinsic::instrprof_mcdc_parameters:
7660 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7662 case Intrinsic::localescape: {
7663 MachineFunction &MF =
DAG.getMachineFunction();
7664 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7668 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7674 "can only escape static allocas");
7679 TII->get(TargetOpcode::LOCAL_ESCAPE))
7687 case Intrinsic::localrecover: {
7689 MachineFunction &MF =
DAG.getMachineFunction();
7695 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7699 Value *
FP =
I.getArgOperand(1);
7705 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7710 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7716 case Intrinsic::fake_use: {
7717 Value *
V =
I.getArgOperand(0);
7722 auto FakeUseValue = [&]() ->
SDValue {
7736 if (!FakeUseValue || FakeUseValue.isUndef())
7739 Ops[1] = FakeUseValue;
7744 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7748 case Intrinsic::eh_exceptionpointer:
7749 case Intrinsic::eh_exceptioncode: {
7755 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7756 if (Intrinsic == Intrinsic::eh_exceptioncode)
7757 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7761 case Intrinsic::xray_customevent: {
7764 const auto &Triple =
DAG.getTarget().getTargetTriple();
7773 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7775 Ops.push_back(LogEntryVal);
7776 Ops.push_back(StrSizeVal);
7777 Ops.push_back(Chain);
7783 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7786 DAG.setRoot(patchableNode);
7790 case Intrinsic::xray_typedevent: {
7793 const auto &Triple =
DAG.getTarget().getTargetTriple();
7805 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7807 Ops.push_back(LogTypeId);
7808 Ops.push_back(LogEntryVal);
7809 Ops.push_back(StrSizeVal);
7810 Ops.push_back(Chain);
7816 MachineSDNode *MN =
DAG.getMachineNode(
7817 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7819 DAG.setRoot(patchableNode);
7823 case Intrinsic::experimental_deoptimize:
7826 case Intrinsic::stepvector:
7829 case Intrinsic::vector_reduce_fadd:
7830 case Intrinsic::vector_reduce_fmul:
7831 case Intrinsic::vector_reduce_add:
7832 case Intrinsic::vector_reduce_mul:
7833 case Intrinsic::vector_reduce_and:
7834 case Intrinsic::vector_reduce_or:
7835 case Intrinsic::vector_reduce_xor:
7836 case Intrinsic::vector_reduce_smax:
7837 case Intrinsic::vector_reduce_smin:
7838 case Intrinsic::vector_reduce_umax:
7839 case Intrinsic::vector_reduce_umin:
7840 case Intrinsic::vector_reduce_fmax:
7841 case Intrinsic::vector_reduce_fmin:
7842 case Intrinsic::vector_reduce_fmaximum:
7843 case Intrinsic::vector_reduce_fminimum:
7844 visitVectorReduce(
I, Intrinsic);
7847 case Intrinsic::icall_branch_funnel: {
7853 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7856 "llvm.icall.branch.funnel operand must be a GlobalValue");
7857 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7859 struct BranchFunnelTarget {
7865 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7868 if (ElemBase !=
Base)
7870 "to the same GlobalValue");
7876 "llvm.icall.branch.funnel operand must be a GlobalValue");
7882 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7883 return T1.Offset < T2.Offset;
7886 for (
auto &
T : Targets) {
7887 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7888 Ops.push_back(
T.Target);
7891 Ops.push_back(
DAG.getRoot());
7892 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7901 case Intrinsic::wasm_landingpad_index:
7907 case Intrinsic::aarch64_settag:
7908 case Intrinsic::aarch64_settag_zero: {
7909 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
7910 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7913 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
7919 case Intrinsic::amdgcn_cs_chain: {
7924 Type *RetTy =
I.getType();
7934 for (
unsigned Idx : {2, 3, 1}) {
7935 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7937 Arg.setAttributes(&
I, Idx);
7938 Args.push_back(Arg);
7941 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7942 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7943 Args[2].IsInReg =
true;
7946 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
7947 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7949 Arg.setAttributes(&
I, Idx);
7950 Args.push_back(Arg);
7953 TargetLowering::CallLoweringInfo CLI(
DAG);
7956 .setCallee(CC, RetTy, Callee, std::move(Args))
7959 .setConvergent(
I.isConvergent());
7961 std::pair<SDValue, SDValue>
Result =
7965 "Should've lowered as tail call");
7970 case Intrinsic::amdgcn_call_whole_wave: {
7972 bool isTailCall =
I.isTailCall();
7975 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
7976 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
7977 I.getArgOperand(Idx)->getType());
7978 Arg.setAttributes(&
I, Idx);
7985 Args.push_back(Arg);
7990 auto *Token = Bundle->Inputs[0].get();
7991 ConvControlToken =
getValue(Token);
7994 TargetLowering::CallLoweringInfo CLI(
DAG);
7998 getValue(
I.getArgOperand(0)), std::move(Args))
8002 .setConvergent(
I.isConvergent())
8003 .setConvergenceControlToken(ConvControlToken);
8006 std::pair<SDValue, SDValue>
Result =
8009 if (
Result.first.getNode())
8013 case Intrinsic::ptrmask: {
8029 auto HighOnes =
DAG.getNode(
8030 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8031 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8034 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8035 }
else if (
Mask.getValueType() != PtrVT)
8036 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8042 case Intrinsic::threadlocal_address: {
8046 case Intrinsic::get_active_lane_mask: {
8050 EVT ElementVT =
Index.getValueType();
8053 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8061 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8062 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8063 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8066 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8071 case Intrinsic::experimental_get_vector_length: {
8073 "Expected positive VF");
8078 EVT CountVT =
Count.getValueType();
8081 visitTargetIntrinsic(
I, Intrinsic);
8090 if (CountVT.
bitsLT(VT)) {
8095 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8105 case Intrinsic::vector_partial_reduce_add: {
8107 visitTargetIntrinsic(
I, Intrinsic);
8117 case Intrinsic::experimental_cttz_elts: {
8120 EVT OpVT =
Op.getValueType();
8123 visitTargetIntrinsic(
I, Intrinsic);
8139 ConstantRange VScaleRange(1,
true);
8168 case Intrinsic::vector_insert: {
8176 if (
Index.getValueType() != VectorIdxTy)
8177 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8184 case Intrinsic::vector_extract: {
8192 if (
Index.getValueType() != VectorIdxTy)
8193 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8199 case Intrinsic::experimental_vector_match: {
8205 EVT ResVT =
Mask.getValueType();
8211 visitTargetIntrinsic(
I, Intrinsic);
8217 for (
unsigned i = 0; i < SearchSize; ++i) {
8220 DAG.getVectorIdxConstant(i, sdl));
8229 case Intrinsic::vector_reverse:
8230 visitVectorReverse(
I);
8232 case Intrinsic::vector_splice:
8233 visitVectorSplice(
I);
8235 case Intrinsic::callbr_landingpad:
8236 visitCallBrLandingPad(
I);
8238 case Intrinsic::vector_interleave2:
8239 visitVectorInterleave(
I, 2);
8241 case Intrinsic::vector_interleave3:
8242 visitVectorInterleave(
I, 3);
8244 case Intrinsic::vector_interleave4:
8245 visitVectorInterleave(
I, 4);
8247 case Intrinsic::vector_interleave5:
8248 visitVectorInterleave(
I, 5);
8250 case Intrinsic::vector_interleave6:
8251 visitVectorInterleave(
I, 6);
8253 case Intrinsic::vector_interleave7:
8254 visitVectorInterleave(
I, 7);
8256 case Intrinsic::vector_interleave8:
8257 visitVectorInterleave(
I, 8);
8259 case Intrinsic::vector_deinterleave2:
8260 visitVectorDeinterleave(
I, 2);
8262 case Intrinsic::vector_deinterleave3:
8263 visitVectorDeinterleave(
I, 3);
8265 case Intrinsic::vector_deinterleave4:
8266 visitVectorDeinterleave(
I, 4);
8268 case Intrinsic::vector_deinterleave5:
8269 visitVectorDeinterleave(
I, 5);
8271 case Intrinsic::vector_deinterleave6:
8272 visitVectorDeinterleave(
I, 6);
8274 case Intrinsic::vector_deinterleave7:
8275 visitVectorDeinterleave(
I, 7);
8277 case Intrinsic::vector_deinterleave8:
8278 visitVectorDeinterleave(
I, 8);
8280 case Intrinsic::experimental_vector_compress:
8282 getValue(
I.getArgOperand(0)).getValueType(),
8287 case Intrinsic::experimental_convergence_anchor:
8288 case Intrinsic::experimental_convergence_entry:
8289 case Intrinsic::experimental_convergence_loop:
8290 visitConvergenceControl(
I, Intrinsic);
8292 case Intrinsic::experimental_vector_histogram_add: {
8293 visitVectorHistogram(
I, Intrinsic);
8296 case Intrinsic::experimental_vector_extract_last_active: {
8297 visitVectorExtractLastActive(
I, Intrinsic);
8300 case Intrinsic::loop_dependence_war_mask:
8306 case Intrinsic::loop_dependence_raw_mask:
8315void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8343 PendingConstrainedFP.push_back(OutChain);
8349 PendingConstrainedFPStrict.push_back(OutChain);
8354 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8356 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8361 Flags.setNoFPExcept(
true);
8364 Flags.copyFMF(*FPOp);
8369#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8370 case Intrinsic::INTRINSIC: \
8371 Opcode = ISD::STRICT_##DAGN; \
8373#include "llvm/IR/ConstrainedOps.def"
8374 case Intrinsic::experimental_constrained_fmuladd: {
8381 pushOutChain(
Mul, EB);
8404 if (TM.Options.NoNaNsFPMath)
8412 pushOutChain(Result, EB);
8419 std::optional<unsigned> ResOPC;
8421 case Intrinsic::vp_ctlz: {
8423 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8426 case Intrinsic::vp_cttz: {
8428 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8431 case Intrinsic::vp_cttz_elts: {
8433 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8436#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8437 case Intrinsic::VPID: \
8438 ResOPC = ISD::VPSD; \
8440#include "llvm/IR/VPIntrinsics.def"
8445 "Inconsistency: no SDNode available for this VPIntrinsic!");
8447 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8448 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8450 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8451 : ISD::VP_REDUCE_FMUL;
8457void SelectionDAGBuilder::visitVPLoad(
8469 Alignment =
DAG.getEVTAlign(VT);
8472 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8473 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8476 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8477 MachinePointerInfo(PtrOperand), MMOFlags,
8479 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8486void SelectionDAGBuilder::visitVPLoadFF(
8489 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8499 Alignment =
DAG.getEVTAlign(VT);
8502 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8503 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8506 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8511 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8514void SelectionDAGBuilder::visitVPGather(
8518 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8530 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8532 *Alignment, AAInfo, Ranges);
8542 EVT IdxVT =
Index.getValueType();
8548 LD =
DAG.getGatherVP(
8549 DAG.getVTList(VT, MVT::Other), VT,
DL,
8550 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8556void SelectionDAGBuilder::visitVPStore(
8560 EVT VT = OpValues[0].getValueType();
8565 Alignment =
DAG.getEVTAlign(VT);
8568 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8571 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8572 MachinePointerInfo(PtrOperand), MMOFlags,
8581void SelectionDAGBuilder::visitVPScatter(
8584 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8586 EVT VT = OpValues[0].getValueType();
8596 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8598 *Alignment, AAInfo);
8608 EVT IdxVT =
Index.getValueType();
8614 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8615 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8616 OpValues[2], OpValues[3]},
8622void SelectionDAGBuilder::visitVPStridedLoad(
8634 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8636 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8639 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8641 *Alignment, AAInfo, Ranges);
8643 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8644 OpValues[2], OpValues[3], MMO,
8652void SelectionDAGBuilder::visitVPStridedStore(
8656 EVT VT = OpValues[0].getValueType();
8662 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8665 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8667 *Alignment, AAInfo);
8671 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8679void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8680 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8691 if (TM.Options.NoNaNsFPMath)
8704 "Unexpected target EVL type");
8707 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8710 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8713void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8721 return visitVPCmp(*CmpI);
8724 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8726 SDVTList VTs =
DAG.getVTList(ValueVTs);
8732 "Unexpected target EVL type");
8736 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8738 if (
I == EVLParamPos)
8745 SDNodeFlags SDFlags;
8753 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8755 case ISD::VP_LOAD_FF:
8756 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8758 case ISD::VP_GATHER:
8759 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8761 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8762 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8765 visitVPStore(VPIntrin, OpValues);
8767 case ISD::VP_SCATTER:
8768 visitVPScatter(VPIntrin, OpValues);
8770 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8771 visitVPStridedStore(VPIntrin, OpValues);
8773 case ISD::VP_FMULADD: {
8774 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8775 SDNodeFlags SDFlags;
8780 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8783 ISD::VP_FMUL,
DL, VTs,
8784 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8786 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8787 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8792 case ISD::VP_IS_FPCLASS: {
8793 const DataLayout DLayout =
DAG.getDataLayout();
8795 auto Constant = OpValues[1]->getAsZExtVal();
8798 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8802 case ISD::VP_INTTOPTR: {
8813 case ISD::VP_PTRTOINT: {
8815 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8828 case ISD::VP_CTLZ_ZERO_UNDEF:
8830 case ISD::VP_CTTZ_ZERO_UNDEF:
8831 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8832 case ISD::VP_CTTZ_ELTS: {
8834 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8844 MachineFunction &MF =
DAG.getMachineFunction();
8852 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8853 if (CallSiteIndex) {
8867 assert(BeginLabel &&
"BeginLabel should've been set");
8869 MachineFunction &MF =
DAG.getMachineFunction();
8881 assert(
II &&
"II should've been set");
8892std::pair<SDValue, SDValue>
8906 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8909 "Non-null chain expected with non-tail call!");
8910 assert((Result.second.getNode() || !Result.first.getNode()) &&
8911 "Null value expected with tail call!");
8913 if (!Result.second.getNode()) {
8920 PendingExports.clear();
8922 DAG.setRoot(Result.second);
8940 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8948 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
8949 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8958 bool isTailCall,
bool isMustTailCall,
8961 auto &
DL =
DAG.getDataLayout();
8968 const Value *SwiftErrorVal =
nullptr;
8975 const Value *V = *
I;
8978 if (V->getType()->isEmptyTy())
8983 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8995 Args.push_back(Entry);
9006 Value *V = Bundle->Inputs[0];
9008 Entry.IsCFGuardTarget =
true;
9009 Args.push_back(Entry);
9022 "Target doesn't support calls with kcfi operand bundles.");
9030 auto *Token = Bundle->Inputs[0].get();
9031 ConvControlToken =
getValue(Token);
9037 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9049 "This target doesn't support calls with ptrauth operand bundles.");
9053 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9055 if (Result.first.getNode()) {
9069 DAG.setRoot(CopyNode);
9085 LoadTy, Builder.DAG.getDataLayout()))
9086 return Builder.getValue(LoadCst);
9092 bool ConstantMemory =
false;
9095 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9096 Root = Builder.DAG.getEntryNode();
9097 ConstantMemory =
true;
9100 Root = Builder.DAG.getRoot();
9105 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr,
9108 if (!ConstantMemory)
9109 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9115void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9118 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9129bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9130 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9131 const Value *
Size =
I.getArgOperand(2);
9134 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9140 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9144 if (Res.first.getNode()) {
9145 processIntegerCallValue(
I, Res.first,
true);
9159 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9160 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9182 switch (NumBitsToCompare) {
9194 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9207 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9208 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9212 processIntegerCallValue(
I, Cmp,
false);
9221bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9222 const Value *Src =
I.getArgOperand(0);
9223 const Value *
Char =
I.getArgOperand(1);
9224 const Value *
Length =
I.getArgOperand(2);
9226 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9227 std::pair<SDValue, SDValue> Res =
9230 MachinePointerInfo(Src));
9231 if (Res.first.getNode()) {
9245bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9250 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9251 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9253 Align Alignment = std::min(DstAlign, SrcAlign);
9262 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9263 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9264 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9266 "** memcpy should not be lowered as TailCall in mempcpy context **");
9270 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9283bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9284 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9286 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9287 std::pair<SDValue, SDValue> Res =
9290 MachinePointerInfo(Arg0),
9291 MachinePointerInfo(Arg1), isStpcpy);
9292 if (Res.first.getNode()) {
9294 DAG.setRoot(Res.second);
9306bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9307 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9309 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9310 std::pair<SDValue, SDValue> Res =
9313 MachinePointerInfo(Arg0),
9314 MachinePointerInfo(Arg1));
9315 if (Res.first.getNode()) {
9316 processIntegerCallValue(
I, Res.first,
true);
9329bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9330 const Value *Arg0 =
I.getArgOperand(0);
9332 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9333 std::pair<SDValue, SDValue> Res =
9335 getValue(Arg0), MachinePointerInfo(Arg0));
9336 if (Res.first.getNode()) {
9337 processIntegerCallValue(
I, Res.first,
false);
9350bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9351 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9353 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9354 std::pair<SDValue, SDValue> Res =
9357 MachinePointerInfo(Arg0));
9358 if (Res.first.getNode()) {
9359 processIntegerCallValue(
I, Res.first,
false);
9372bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9375 if (!
I.onlyReadsMemory())
9392bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9395 if (!
I.onlyReadsMemory())
9408void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9410 if (
I.isInlineAsm()) {
9417 if (Function *
F =
I.getCalledFunction()) {
9418 if (
F->isDeclaration()) {
9420 if (
unsigned IID =
F->getIntrinsicID()) {
9421 visitIntrinsicCall(
I, IID);
9430 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9431 F->hasName() &&
LibInfo->getLibFunc(*
F, Func) &&
9432 LibInfo->hasOptimizedCodeGen(Func)) {
9436 if (visitMemCmpBCmpCall(
I))
9439 case LibFunc_copysign:
9440 case LibFunc_copysignf:
9441 case LibFunc_copysignl:
9444 if (
I.onlyReadsMemory()) {
9455 if (visitUnaryFloatCall(
I, ISD::FABS))
9461 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9467 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9470 case LibFunc_fminimum_num:
9471 case LibFunc_fminimum_numf:
9472 case LibFunc_fminimum_numl:
9473 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9476 case LibFunc_fmaximum_num:
9477 case LibFunc_fmaximum_numf:
9478 case LibFunc_fmaximum_numl:
9479 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9485 if (visitUnaryFloatCall(
I, ISD::FSIN))
9491 if (visitUnaryFloatCall(
I, ISD::FCOS))
9497 if (visitUnaryFloatCall(
I, ISD::FTAN))
9503 if (visitUnaryFloatCall(
I, ISD::FASIN))
9509 if (visitUnaryFloatCall(
I, ISD::FACOS))
9515 if (visitUnaryFloatCall(
I, ISD::FATAN))
9519 case LibFunc_atan2f:
9520 case LibFunc_atan2l:
9521 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9527 if (visitUnaryFloatCall(
I, ISD::FSINH))
9533 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9539 if (visitUnaryFloatCall(
I, ISD::FTANH))
9545 case LibFunc_sqrt_finite:
9546 case LibFunc_sqrtf_finite:
9547 case LibFunc_sqrtl_finite:
9548 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9552 case LibFunc_floorf:
9553 case LibFunc_floorl:
9554 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9557 case LibFunc_nearbyint:
9558 case LibFunc_nearbyintf:
9559 case LibFunc_nearbyintl:
9560 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9566 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9572 if (visitUnaryFloatCall(
I, ISD::FRINT))
9576 case LibFunc_roundf:
9577 case LibFunc_roundl:
9578 if (visitUnaryFloatCall(
I, ISD::FROUND))
9582 case LibFunc_truncf:
9583 case LibFunc_truncl:
9584 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9590 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9596 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9600 case LibFunc_exp10f:
9601 case LibFunc_exp10l:
9602 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9606 case LibFunc_ldexpf:
9607 case LibFunc_ldexpl:
9608 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9611 case LibFunc_memcmp:
9612 if (visitMemCmpBCmpCall(
I))
9615 case LibFunc_mempcpy:
9616 if (visitMemPCpyCall(
I))
9619 case LibFunc_memchr:
9620 if (visitMemChrCall(
I))
9623 case LibFunc_strcpy:
9624 if (visitStrCpyCall(
I,
false))
9627 case LibFunc_stpcpy:
9628 if (visitStrCpyCall(
I,
true))
9631 case LibFunc_strcmp:
9632 if (visitStrCmpCall(
I))
9635 case LibFunc_strlen:
9636 if (visitStrLenCall(
I))
9639 case LibFunc_strnlen:
9640 if (visitStrNLenCall(
I))
9664 if (
I.hasDeoptState())
9681 const Value *Discriminator = PAB->Inputs[1];
9683 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9684 assert(Discriminator->getType()->isIntegerTy(64) &&
9685 "Invalid ptrauth discriminator");
9690 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9691 DAG.getDataLayout()))
9731 for (
const auto &Code : Codes)
9746 SDISelAsmOperandInfo &MatchingOpInfo,
9748 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9754 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9756 OpInfo.ConstraintVT);
9757 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9759 MatchingOpInfo.ConstraintVT);
9760 const bool OutOpIsIntOrFP =
9761 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9762 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9763 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9764 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9767 " with a matching output constraint of"
9768 " incompatible type!");
9770 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9777 SDISelAsmOperandInfo &OpInfo,
9790 const Value *OpVal = OpInfo.CallOperandVal;
9808 DL.getPrefTypeAlign(Ty),
false,
9811 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9814 OpInfo.CallOperand = StackSlot;
9827static std::optional<unsigned>
9829 SDISelAsmOperandInfo &OpInfo,
9830 SDISelAsmOperandInfo &RefOpInfo) {
9841 return std::nullopt;
9845 unsigned AssignedReg;
9848 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9851 return std::nullopt;
9856 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9858 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9867 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9872 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9877 OpInfo.CallOperand =
9878 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9879 OpInfo.ConstraintVT = RegVT;
9883 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9886 OpInfo.CallOperand =
9887 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9888 OpInfo.ConstraintVT = VT;
9895 if (OpInfo.isMatchingInputConstraint())
9896 return std::nullopt;
9898 EVT ValueVT = OpInfo.ConstraintVT;
9899 if (OpInfo.ConstraintVT == MVT::Other)
9903 unsigned NumRegs = 1;
9904 if (OpInfo.ConstraintVT != MVT::Other)
9919 I = std::find(
I, RC->
end(), AssignedReg);
9920 if (
I == RC->
end()) {
9923 return {AssignedReg};
9927 for (; NumRegs; --NumRegs, ++
I) {
9928 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9933 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9934 return std::nullopt;
9939 const std::vector<SDValue> &AsmNodeOperands) {
9942 for (; OperandNo; --OperandNo) {
9944 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9947 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9948 "Skipped past definitions?");
9949 CurOp +=
F.getNumOperandRegisters() + 1;
9960 explicit ExtraFlags(
const CallBase &
Call) {
9962 if (
IA->hasSideEffects())
9964 if (
IA->isAlignStack())
9971 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
9987 unsigned get()
const {
return Flags; }
10010void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10017 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10019 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10023 bool HasSideEffect =
IA->hasSideEffects();
10024 ExtraFlags ExtraInfo(
Call);
10026 for (
auto &
T : TargetConstraints) {
10027 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10028 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10030 if (OpInfo.CallOperandVal)
10031 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10033 if (!HasSideEffect)
10034 HasSideEffect = OpInfo.hasMemory(TLI);
10046 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10047 "' expects an integer constant "
10050 ExtraInfo.update(
T);
10058 if (EmitEHLabels) {
10059 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10063 if (IsCallBr || EmitEHLabels) {
10071 if (EmitEHLabels) {
10072 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10077 IA->collectAsmStrs(AsmStrs);
10080 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10088 if (OpInfo.hasMatchingInput()) {
10089 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10120 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10123 OpInfo.isIndirect =
false;
10130 !OpInfo.isIndirect) {
10131 assert((OpInfo.isMultipleAlternative ||
10133 "Can only indirectify direct input operands!");
10139 OpInfo.CallOperandVal =
nullptr;
10142 OpInfo.isIndirect =
true;
10148 std::vector<SDValue> AsmNodeOperands;
10149 AsmNodeOperands.push_back(
SDValue());
10150 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10157 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10161 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10166 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10168 SDISelAsmOperandInfo &RefOpInfo =
10169 OpInfo.isMatchingInputConstraint()
10170 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10172 const auto RegError =
10175 const MachineFunction &MF =
DAG.getMachineFunction();
10177 const char *
RegName =
TRI.getName(*RegError);
10178 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10179 "' allocated for constraint '" +
10180 Twine(OpInfo.ConstraintCode) +
10181 "' does not match required type");
10185 auto DetectWriteToReservedRegister = [&]() {
10186 const MachineFunction &MF =
DAG.getMachineFunction();
10191 emitInlineAsmError(
Call,
"write to reserved register '" +
10200 !OpInfo.isMatchingInputConstraint())) &&
10201 "Only address as input operand is allowed.");
10203 switch (OpInfo.Type) {
10209 "Failed to convert memory constraint code to constraint id.");
10213 OpFlags.setMemConstraint(ConstraintID);
10214 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10216 AsmNodeOperands.push_back(OpInfo.CallOperand);
10221 if (OpInfo.AssignedRegs.
Regs.empty()) {
10222 emitInlineAsmError(
10223 Call,
"couldn't allocate output register for constraint '" +
10224 Twine(OpInfo.ConstraintCode) +
"'");
10228 if (DetectWriteToReservedRegister())
10242 SDValue InOperandVal = OpInfo.CallOperand;
10244 if (OpInfo.isMatchingInputConstraint()) {
10249 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10250 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10251 if (OpInfo.isIndirect) {
10253 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10254 "don't know how to handle tied "
10255 "indirect register inputs");
10260 MachineFunction &MF =
DAG.getMachineFunction();
10265 MVT RegVT =
R->getSimpleValueType(0);
10266 const TargetRegisterClass *RC =
10269 :
TRI.getMinimalPhysRegClass(TiedReg);
10270 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10273 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10277 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10279 OpInfo.getMatchedOperand(), dl,
DAG,
10284 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10285 assert(
Flag.getNumOperandRegisters() == 1 &&
10286 "Unexpected number of operands");
10289 Flag.clearMemConstraint();
10290 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10291 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10293 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10304 std::vector<SDValue>
Ops;
10310 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10311 Twine(OpInfo.ConstraintCode) +
"'");
10315 emitInlineAsmError(
Call,
10316 "invalid operand for inline asm constraint '" +
10317 Twine(OpInfo.ConstraintCode) +
"'");
10323 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10330 assert((OpInfo.isIndirect ||
10332 "Operand must be indirect to be a mem!");
10335 "Memory operands expect pointer values");
10340 "Failed to convert memory constraint code to constraint id.");
10344 ResOpType.setMemConstraint(ConstraintID);
10345 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10348 AsmNodeOperands.push_back(InOperandVal);
10356 "Failed to convert memory constraint code to constraint id.");
10360 SDValue AsmOp = InOperandVal;
10364 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10370 ResOpType.setMemConstraint(ConstraintID);
10372 AsmNodeOperands.push_back(
10375 AsmNodeOperands.push_back(AsmOp);
10381 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10382 Twine(OpInfo.ConstraintCode) +
"'");
10387 if (OpInfo.isIndirect) {
10388 emitInlineAsmError(
10389 Call,
"Don't know how to handle indirect register inputs yet "
10390 "for constraint '" +
10391 Twine(OpInfo.ConstraintCode) +
"'");
10396 if (OpInfo.AssignedRegs.
Regs.empty()) {
10397 emitInlineAsmError(
Call,
10398 "couldn't allocate input reg for constraint '" +
10399 Twine(OpInfo.ConstraintCode) +
"'");
10403 if (DetectWriteToReservedRegister())
10412 0, dl,
DAG, AsmNodeOperands);
10418 if (!OpInfo.AssignedRegs.
Regs.empty())
10428 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10430 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10432 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10444 ResultTypes = StructResult->elements();
10445 else if (!CallResultType->
isVoidTy())
10446 ResultTypes =
ArrayRef(CallResultType);
10448 auto CurResultType = ResultTypes.
begin();
10449 auto handleRegAssign = [&](
SDValue V) {
10450 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10451 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10452 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10464 if (ResultVT !=
V.getValueType() &&
10467 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10468 V.getValueType().isInteger()) {
10474 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10480 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10484 if (OpInfo.AssignedRegs.
Regs.empty())
10487 switch (OpInfo.ConstraintType) {
10491 Chain, &Glue, &
Call);
10503 assert(
false &&
"Unexpected unknown constraint");
10507 if (OpInfo.isIndirect) {
10508 const Value *
Ptr = OpInfo.CallOperandVal;
10509 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10511 MachinePointerInfo(
Ptr));
10518 handleRegAssign(V);
10520 handleRegAssign(Val);
10526 if (!ResultValues.
empty()) {
10527 assert(CurResultType == ResultTypes.
end() &&
10528 "Mismatch in number of ResultTypes");
10530 "Mismatch in number of output operands in asm result");
10533 DAG.getVTList(ResultVTs), ResultValues);
10538 if (!OutChains.
empty())
10541 if (EmitEHLabels) {
10546 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10548 DAG.setRoot(Chain);
10551void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10552 const Twine &Message) {
10553 LLVMContext &Ctx = *
DAG.getContext();
10557 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10561 if (ValueVTs.
empty())
10565 for (
const EVT &VT : ValueVTs)
10566 Ops.push_back(
DAG.getUNDEF(VT));
10571void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10575 DAG.getSrcValue(
I.getArgOperand(0))));
10578void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10579 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10580 const DataLayout &
DL =
DAG.getDataLayout();
10584 DL.getABITypeAlign(
I.getType()).value());
10585 DAG.setRoot(
V.getValue(1));
10587 if (
I.getType()->isPointerTy())
10588 V =
DAG.getPtrExtOrTrunc(
10593void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10597 DAG.getSrcValue(
I.getArgOperand(0))));
10600void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10605 DAG.getSrcValue(
I.getArgOperand(0)),
10606 DAG.getSrcValue(
I.getArgOperand(1))));
10612 std::optional<ConstantRange> CR =
getRange(
I);
10614 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10617 APInt Lo = CR->getUnsignedMin();
10618 if (!
Lo.isMinValue())
10621 APInt Hi = CR->getUnsignedMax();
10622 unsigned Bits = std::max(
Hi.getActiveBits(),
10630 DAG.getValueType(SmallVT));
10631 unsigned NumVals =
Op.getNode()->getNumValues();
10637 Ops.push_back(ZExt);
10638 for (
unsigned I = 1;
I != NumVals; ++
I)
10639 Ops.push_back(
Op.getValue(
I));
10641 return DAG.getMergeValues(
Ops,
SL);
10652 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10655 Args.reserve(NumArgs);
10659 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10660 ArgI != ArgE; ++ArgI) {
10661 const Value *V =
Call->getOperand(ArgI);
10663 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10666 Entry.setAttributes(
Call, ArgI);
10667 Args.push_back(Entry);
10672 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10701 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10710 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10716void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10742 Ops.push_back(Chain);
10743 Ops.push_back(InGlue);
10750 assert(
ID.getValueType() == MVT::i64);
10752 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10753 Ops.push_back(IDConst);
10759 Ops.push_back(ShadConst);
10765 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10766 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10769 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10774 DAG.setRoot(Chain);
10777 FuncInfo.MF->getFrameInfo().setHasStackMap();
10781void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10798 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10801 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10802 SDLoc(SymbolicCallee),
10803 SymbolicCallee->getValueType(0));
10813 "Not enough arguments provided to the patchpoint intrinsic");
10816 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10820 TargetLowering::CallLoweringInfo CLI(
DAG);
10825 SDNode *CallEnd =
Result.second.getNode();
10826 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10834 "Expected a callseq node.");
10836 bool HasGlue =
Call->getGluedNode();
10861 Ops.push_back(Callee);
10867 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10868 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10871 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10876 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10887 if (IsAnyRegCC && HasDef) {
10889 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10892 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10897 NodeTys =
DAG.getVTList(ValueVTs);
10899 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10902 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
10916 if (IsAnyRegCC && HasDef) {
10919 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10925 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10928void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10930 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10933 if (
I.arg_size() > 1)
10938 SDNodeFlags SDFlags;
10942 switch (Intrinsic) {
10943 case Intrinsic::vector_reduce_fadd:
10946 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10949 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10951 case Intrinsic::vector_reduce_fmul:
10954 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10957 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10959 case Intrinsic::vector_reduce_add:
10960 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10962 case Intrinsic::vector_reduce_mul:
10963 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10965 case Intrinsic::vector_reduce_and:
10966 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10968 case Intrinsic::vector_reduce_or:
10969 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10971 case Intrinsic::vector_reduce_xor:
10972 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10974 case Intrinsic::vector_reduce_smax:
10975 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10977 case Intrinsic::vector_reduce_smin:
10978 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10980 case Intrinsic::vector_reduce_umax:
10981 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10983 case Intrinsic::vector_reduce_umin:
10984 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10986 case Intrinsic::vector_reduce_fmax:
10987 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10989 case Intrinsic::vector_reduce_fmin:
10990 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10992 case Intrinsic::vector_reduce_fmaximum:
10993 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10995 case Intrinsic::vector_reduce_fminimum:
10996 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11009 Attrs.push_back(Attribute::SExt);
11011 Attrs.push_back(Attribute::ZExt);
11013 Attrs.push_back(Attribute::InReg);
11015 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11023std::pair<SDValue, SDValue>
11037 "Only supported for non-aggregate returns");
11040 for (
Type *Ty : RetOrigTys)
11049 RetOrigTys.
swap(OldRetOrigTys);
11050 RetVTs.
swap(OldRetVTs);
11051 Offsets.swap(OldOffsets);
11053 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11054 EVT RetVT = OldRetVTs[i];
11058 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11059 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11060 RetVTs.
append(NumRegs, RegisterVT);
11061 for (
unsigned j = 0; j != NumRegs; ++j)
11074 int DemoteStackIdx = -100;
11087 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11088 Entry.IsSRet =
true;
11089 Entry.Alignment = Alignment;
11101 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11103 if (NeedsRegBlock) {
11104 Flags.setInConsecutiveRegs();
11105 if (
I == RetVTs.
size() - 1)
11106 Flags.setInConsecutiveRegsLast();
11108 EVT VT = RetVTs[
I];
11112 for (
unsigned i = 0; i != NumRegs; ++i) {
11116 Ret.Flags.setPointer();
11117 Ret.Flags.setPointerAddrSpace(
11121 Ret.Flags.setSExt();
11123 Ret.Flags.setZExt();
11125 Ret.Flags.setInReg();
11126 CLI.
Ins.push_back(Ret);
11135 if (Arg.IsSwiftError) {
11141 CLI.
Ins.push_back(Ret);
11149 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11153 Type *FinalType = Args[i].Ty;
11154 if (Args[i].IsByVal)
11155 FinalType = Args[i].IndirectType;
11158 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11161 Type *ArgTy = OrigArgTy;
11162 if (Args[i].Ty != Args[i].OrigTy) {
11163 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11164 ArgTy = Args[i].Ty;
11169 Args[i].Node.getResNo() +
Value);
11176 Flags.setOrigAlign(OriginalAlignment);
11181 Flags.setPointer();
11184 if (Args[i].IsZExt)
11186 if (Args[i].IsSExt)
11188 if (Args[i].IsNoExt)
11190 if (Args[i].IsInReg) {
11197 Flags.setHvaStart();
11203 if (Args[i].IsSRet)
11205 if (Args[i].IsSwiftSelf)
11206 Flags.setSwiftSelf();
11207 if (Args[i].IsSwiftAsync)
11208 Flags.setSwiftAsync();
11209 if (Args[i].IsSwiftError)
11210 Flags.setSwiftError();
11211 if (Args[i].IsCFGuardTarget)
11212 Flags.setCFGuardTarget();
11213 if (Args[i].IsByVal)
11215 if (Args[i].IsByRef)
11217 if (Args[i].IsPreallocated) {
11218 Flags.setPreallocated();
11226 if (Args[i].IsInAlloca) {
11227 Flags.setInAlloca();
11236 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11237 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11238 Flags.setByValSize(FrameSize);
11241 if (
auto MA = Args[i].Alignment)
11245 }
else if (
auto MA = Args[i].Alignment) {
11248 MemAlign = OriginalAlignment;
11250 Flags.setMemAlign(MemAlign);
11251 if (Args[i].IsNest)
11254 Flags.setInConsecutiveRegs();
11257 unsigned NumParts =
11262 if (Args[i].IsSExt)
11264 else if (Args[i].IsZExt)
11269 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11274 Args[i].Ty->getPointerAddressSpace())) &&
11275 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11288 CLI.
RetZExt == Args[i].IsZExt))
11289 Flags.setReturned();
11295 for (
unsigned j = 0; j != NumParts; ++j) {
11301 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11302 if (NumParts > 1 && j == 0)
11306 if (j == NumParts - 1)
11310 CLI.
Outs.push_back(MyFlags);
11311 CLI.
OutVals.push_back(Parts[j]);
11314 if (NeedsRegBlock &&
Value == NumValues - 1)
11315 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11327 "LowerCall didn't return a valid chain!");
11329 "LowerCall emitted a return value for a tail call!");
11331 "LowerCall didn't emit the correct number of values!");
11343 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11344 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11345 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11346 "LowerCall emitted a value with the wrong type!");
11356 unsigned NumValues = RetVTs.
size();
11357 ReturnValues.
resize(NumValues);
11364 for (
unsigned i = 0; i < NumValues; ++i) {
11371 DemoteStackIdx, Offsets[i]),
11373 ReturnValues[i] = L;
11374 Chains[i] = L.getValue(1);
11381 std::optional<ISD::NodeType> AssertOp;
11386 unsigned CurReg = 0;
11387 for (
EVT VT : RetVTs) {
11393 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11401 if (ReturnValues.
empty())
11407 return std::make_pair(Res, CLI.
Chain);
11424 if (
N->getNumValues() == 1) {
11432 "Lowering returned the wrong number of results!");
11435 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11449 "Copy from a reg to the same reg!");
11450 assert(!Reg.isPhysical() &&
"Is a physreg");
11456 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11461 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11462 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11463 ExtendType = PreferredExtendIt->second;
11466 PendingExports.push_back(Chain);
11478 return A->use_empty();
11480 const BasicBlock &Entry =
A->getParent()->front();
11481 for (
const User *U :
A->users())
11490 std::pair<const AllocaInst *, const StoreInst *>>;
11502 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11504 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11505 StaticAllocas.
reserve(NumArgs * 2);
11507 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11510 V = V->stripPointerCasts();
11512 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11515 return &Iter.first->second;
11532 if (
I.isDebugOrPseudoInst())
11536 for (
const Use &U :
I.operands()) {
11537 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11538 *
Info = StaticAllocaInfo::Clobbered;
11544 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11545 *
Info = StaticAllocaInfo::Clobbered;
11548 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11549 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11555 if (*
Info != StaticAllocaInfo::Unknown)
11563 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11565 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11569 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11570 ArgCopyElisionCandidates.count(Arg)) {
11571 *
Info = StaticAllocaInfo::Clobbered;
11575 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11579 *
Info = StaticAllocaInfo::Elidable;
11580 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11585 if (ArgCopyElisionCandidates.size() == NumArgs)
11609 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11610 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11611 const AllocaInst *AI = ArgCopyIter->second.first;
11612 int FixedIndex = FINode->getIndex();
11614 int OldIndex = AllocaIndex;
11618 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11624 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11625 "greater than stack argument alignment ("
11626 <<
DebugStr(RequiredAlignment) <<
" vs "
11634 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11635 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11641 AllocaIndex = FixedIndex;
11642 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11643 for (
SDValue ArgVal : ArgVals)
11647 const StoreInst *
SI = ArgCopyIter->second.second;
11660void SelectionDAGISel::LowerArguments(
const Function &
F) {
11661 SelectionDAG &DAG =
SDB->DAG;
11662 SDLoc dl =
SDB->getCurSDLoc();
11667 if (
F.hasFnAttribute(Attribute::Naked))
11672 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11674 ISD::ArgFlagsTy
Flags;
11676 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11677 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11679 Ins.push_back(RetArg);
11687 ArgCopyElisionCandidates);
11690 for (
const Argument &Arg :
F.args()) {
11691 unsigned ArgNo = Arg.getArgNo();
11694 bool isArgValueUsed = !Arg.
use_empty();
11695 unsigned PartBase = 0;
11697 if (Arg.hasAttribute(Attribute::ByVal))
11698 FinalType = Arg.getParamByValType();
11699 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11700 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11701 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11704 EVT VT =
TLI->getValueType(
DL, ArgTy);
11705 ISD::ArgFlagsTy
Flags;
11708 Flags.setPointer();
11711 if (Arg.hasAttribute(Attribute::ZExt))
11713 if (Arg.hasAttribute(Attribute::SExt))
11715 if (Arg.hasAttribute(Attribute::InReg)) {
11722 Flags.setHvaStart();
11728 if (Arg.hasAttribute(Attribute::StructRet))
11730 if (Arg.hasAttribute(Attribute::SwiftSelf))
11731 Flags.setSwiftSelf();
11732 if (Arg.hasAttribute(Attribute::SwiftAsync))
11733 Flags.setSwiftAsync();
11734 if (Arg.hasAttribute(Attribute::SwiftError))
11735 Flags.setSwiftError();
11736 if (Arg.hasAttribute(Attribute::ByVal))
11738 if (Arg.hasAttribute(Attribute::ByRef))
11740 if (Arg.hasAttribute(Attribute::InAlloca)) {
11741 Flags.setInAlloca();
11749 if (Arg.hasAttribute(Attribute::Preallocated)) {
11750 Flags.setPreallocated();
11762 const Align OriginalAlignment(
11763 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11764 Flags.setOrigAlign(OriginalAlignment);
11767 Type *ArgMemTy =
nullptr;
11768 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11771 ArgMemTy = Arg.getPointeeInMemoryValueType();
11773 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11778 if (
auto ParamAlign = Arg.getParamStackAlign())
11779 MemAlign = *ParamAlign;
11780 else if ((ParamAlign = Arg.getParamAlign()))
11781 MemAlign = *ParamAlign;
11783 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11784 if (
Flags.isByRef())
11785 Flags.setByRefSize(MemSize);
11787 Flags.setByValSize(MemSize);
11788 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11789 MemAlign = *ParamAlign;
11791 MemAlign = OriginalAlignment;
11793 Flags.setMemAlign(MemAlign);
11795 if (Arg.hasAttribute(Attribute::Nest))
11798 Flags.setInConsecutiveRegs();
11799 if (ArgCopyElisionCandidates.count(&Arg))
11800 Flags.setCopyElisionCandidate();
11801 if (Arg.hasAttribute(Attribute::Returned))
11802 Flags.setReturned();
11804 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11805 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11806 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11807 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11808 for (
unsigned i = 0; i != NumRegs; ++i) {
11812 ISD::InputArg MyFlags(
11813 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11815 if (NumRegs > 1 && i == 0)
11816 MyFlags.Flags.setSplit();
11819 MyFlags.Flags.setOrigAlign(
Align(1));
11820 if (i == NumRegs - 1)
11821 MyFlags.Flags.setSplitEnd();
11823 Ins.push_back(MyFlags);
11825 if (NeedsRegBlock &&
Value == NumValues - 1)
11826 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11833 SDValue NewRoot =
TLI->LowerFormalArguments(
11834 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11838 "LowerFormalArguments didn't return a valid chain!");
11840 "LowerFormalArguments didn't emit the correct number of values!");
11842 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11844 "LowerFormalArguments emitted a null value!");
11846 "LowerFormalArguments emitted a value with the wrong type!");
11858 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11859 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11860 std::optional<ISD::NodeType> AssertOp;
11863 F.getCallingConv(), AssertOp);
11865 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11866 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11868 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11869 FuncInfo->DemoteRegister = SRetReg;
11871 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11879 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11880 for (
const Argument &Arg :
F.args()) {
11884 unsigned NumValues = ValueVTs.
size();
11885 if (NumValues == 0)
11892 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11893 unsigned NumParts = 0;
11894 for (EVT VT : ValueVTs)
11895 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
11896 F.getCallingConv(), VT);
11900 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11905 bool isSwiftErrorArg =
11906 TLI->supportSwiftError() &&
11907 Arg.hasAttribute(Attribute::SwiftError);
11908 if (!ArgHasUses && !isSwiftErrorArg) {
11909 SDB->setUnusedArgValue(&Arg, InVals[i]);
11912 if (FrameIndexSDNode *FI =
11914 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11917 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11918 EVT VT = ValueVTs[Val];
11919 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
11920 F.getCallingConv(), VT);
11921 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
11922 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11927 if (ArgHasUses || isSwiftErrorArg) {
11928 std::optional<ISD::NodeType> AssertOp;
11929 if (Arg.hasAttribute(Attribute::SExt))
11931 else if (Arg.hasAttribute(Attribute::ZExt))
11936 NewRoot,
F.getCallingConv(), AssertOp);
11939 if (NoFPClass !=
fcNone) {
11941 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
11943 OutVal, SDNoFPClass);
11952 if (ArgValues.
empty())
11956 if (FrameIndexSDNode *FI =
11958 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11961 SDB->getCurSDLoc());
11963 SDB->setValue(&Arg, Res);
11973 if (LoadSDNode *LNode =
11975 if (FrameIndexSDNode *FI =
11977 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12005 FuncInfo->InitializeRegForValue(&Arg);
12006 SDB->CopyToExportRegsIfNeeded(&Arg);
12010 if (!Chains.
empty()) {
12017 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12021 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12022 for (MachineFunction::VariableDbgInfo &VI :
12023 MF->getInStackSlotVariableDbgInfo()) {
12024 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12025 if (
I != ArgCopyElisionFrameIndexMap.
end())
12026 VI.updateStackSlot(
I->second);
12041SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12042 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12044 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12050 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12054 if (!SuccsHandled.
insert(SuccMBB).second)
12062 for (
const PHINode &PN : SuccBB->phis()) {
12064 if (PN.use_empty())
12068 if (PN.getType()->isEmptyTy())
12072 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12077 RegOut =
FuncInfo.CreateRegs(&PN);
12095 "Didn't codegen value into a register!??");
12105 for (EVT VT : ValueVTs) {
12107 for (
unsigned i = 0; i != NumRegisters; ++i)
12109 Reg += NumRegisters;
12129void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12131 if (MaybeTC.
getNode() !=
nullptr)
12132 DAG.setRoot(MaybeTC);
12137void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12140 MachineFunction *CurMF =
FuncInfo.MF;
12141 MachineBasicBlock *NextMBB =
nullptr;
12146 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12148 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12150 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12158 CaseCluster &
Small = *
W.FirstCluster;
12159 CaseCluster &
Big = *
W.LastCluster;
12163 const APInt &SmallValue =
Small.Low->getValue();
12164 const APInt &BigValue =
Big.Low->getValue();
12167 APInt CommonBit = BigValue ^ SmallValue;
12174 DAG.getConstant(CommonBit,
DL, VT));
12176 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12182 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12184 addSuccessorWithProb(
12185 SwitchMBB, DefaultMBB,
12189 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12196 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12197 DAG.getBasicBlock(DefaultMBB));
12199 DAG.setRoot(BrCond);
12211 [](
const CaseCluster &a,
const CaseCluster &b) {
12212 return a.Prob != b.Prob ?
12214 a.Low->getValue().slt(b.Low->getValue());
12221 if (
I->Prob >
W.LastCluster->Prob)
12223 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12231 BranchProbability DefaultProb =
W.DefaultProb;
12232 BranchProbability UnhandledProbs = DefaultProb;
12234 UnhandledProbs +=
I->Prob;
12236 MachineBasicBlock *CurMBB =
W.MBB;
12238 bool FallthroughUnreachable =
false;
12239 MachineBasicBlock *Fallthrough;
12240 if (
I ==
W.LastCluster) {
12242 Fallthrough = DefaultMBB;
12247 CurMF->
insert(BBI, Fallthrough);
12251 UnhandledProbs -=
I->Prob;
12256 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12257 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12260 MachineBasicBlock *JumpMBB =
JT->MBB;
12261 CurMF->
insert(BBI, JumpMBB);
12263 auto JumpProb =
I->Prob;
12264 auto FallthroughProb = UnhandledProbs;
12272 if (*SI == DefaultMBB) {
12273 JumpProb += DefaultProb / 2;
12274 FallthroughProb -= DefaultProb / 2;
12292 if (FallthroughUnreachable) {
12299 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12300 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12306 JT->Default = Fallthrough;
12309 if (CurMBB == SwitchMBB) {
12317 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12320 for (BitTestCase &BTC : BTB->
Cases)
12332 BTB->
Prob += DefaultProb / 2;
12336 if (FallthroughUnreachable)
12340 if (CurMBB == SwitchMBB) {
12347 const Value *
RHS, *
LHS, *MHS;
12349 if (
I->Low ==
I->High) {
12364 if (FallthroughUnreachable)
12368 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12371 if (CurMBB == SwitchMBB)
12374 SL->SwitchCases.push_back(CB);
12379 CurMBB = Fallthrough;
12383void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12384 const SwitchWorkListItem &W,
12387 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12388 "Clusters not sorted?");
12389 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12391 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12392 SL->computeSplitWorkItemInfo(W);
12397 assert(PivotCluster >
W.FirstCluster);
12398 assert(PivotCluster <=
W.LastCluster);
12403 const ConstantInt *Pivot = PivotCluster->Low;
12412 MachineBasicBlock *LeftMBB;
12413 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12414 FirstLeft->Low ==
W.GE &&
12415 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12416 LeftMBB = FirstLeft->MBB;
12418 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12419 FuncInfo.MF->insert(BBI, LeftMBB);
12421 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12429 MachineBasicBlock *RightMBB;
12430 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12431 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12432 RightMBB = FirstRight->MBB;
12434 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12435 FuncInfo.MF->insert(BBI, RightMBB);
12437 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12443 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12446 if (
W.MBB == SwitchMBB)
12449 SL->SwitchCases.push_back(CB);
12474 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12482 unsigned PeeledCaseIndex = 0;
12483 bool SwitchPeeled =
false;
12484 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12485 CaseCluster &CC = Clusters[
Index];
12486 if (CC.
Prob < TopCaseProb)
12488 TopCaseProb = CC.
Prob;
12489 PeeledCaseIndex =
Index;
12490 SwitchPeeled =
true;
12495 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12496 << TopCaseProb <<
"\n");
12501 MachineBasicBlock *PeeledSwitchMBB =
12503 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12506 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12507 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12508 nullptr,
nullptr, TopCaseProb.
getCompl()};
12509 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12511 Clusters.erase(PeeledCaseIt);
12512 for (CaseCluster &CC : Clusters) {
12514 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12515 << CC.
Prob <<
"\n");
12519 PeeledCaseProb = TopCaseProb;
12520 return PeeledSwitchMBB;
12523void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12525 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12527 Clusters.reserve(
SI.getNumCases());
12528 for (
auto I :
SI.cases()) {
12529 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12530 const ConstantInt *CaseVal =
I.getCaseValue();
12531 BranchProbability Prob =
12533 : BranchProbability(1,
SI.getNumCases() + 1);
12537 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12546 MachineBasicBlock *PeeledSwitchMBB =
12547 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12550 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12551 if (Clusters.empty()) {
12552 assert(PeeledSwitchMBB == SwitchMBB);
12554 if (DefaultMBB != NextBlock(SwitchMBB)) {
12561 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12563 SL->findBitTestClusters(Clusters, &SI);
12566 dbgs() <<
"Case clusters: ";
12567 for (
const CaseCluster &
C : Clusters) {
12573 C.Low->getValue().print(
dbgs(),
true);
12574 if (
C.Low !=
C.High) {
12576 C.High->getValue().print(
dbgs(),
true);
12583 assert(!Clusters.empty());
12587 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12591 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12594 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12596 while (!WorkList.
empty()) {
12598 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12603 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12607 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12611void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12612 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12618void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12619 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12624 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12633 SmallVector<int, 8>
Mask;
12635 for (
unsigned i = 0; i != NumElts; ++i)
12636 Mask.push_back(NumElts - 1 - i);
12641void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12650 EVT OutVT = ValueVTs[0];
12654 for (
unsigned i = 0; i != Factor; ++i) {
12655 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12657 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12663 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12665 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12673 DAG.getVTList(ValueVTs), SubVecs);
12677void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12680 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12685 for (
unsigned i = 0; i < Factor; ++i) {
12688 "Expected VTs to be the same");
12706 for (
unsigned i = 0; i < Factor; ++i)
12713void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12717 unsigned NumValues = ValueVTs.
size();
12718 if (NumValues == 0)
return;
12723 for (
unsigned i = 0; i != NumValues; ++i)
12728 DAG.getVTList(ValueVTs), Values));
12731void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12732 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12744 DAG.getSignedConstant(
12751 uint64_t Idx = (NumElts +
Imm) % NumElts;
12754 SmallVector<int, 8>
Mask;
12755 for (
unsigned i = 0; i < NumElts; ++i)
12756 Mask.push_back(Idx + i);
12784 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12785 "start of copy chain MUST be COPY");
12786 Reg =
MI->getOperand(1).getReg();
12789 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12790 MI =
MRI.def_begin(
Reg)->getParent();
12793 if (
MI->getOpcode() == TargetOpcode::COPY) {
12794 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12795 Reg =
MI->getOperand(1).getReg();
12796 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12799 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12800 "end of copy chain MUST be INLINEASM_BR");
12810void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12816 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12817 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12818 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12826 for (
auto &
T : TargetConstraints) {
12827 SDISelAsmOperandInfo OpInfo(
T);
12835 switch (OpInfo.ConstraintType) {
12846 FuncInfo.MBB->addLiveIn(OriginalDef);
12854 ResultVTs.
push_back(OpInfo.ConstraintVT);
12863 ResultVTs.
push_back(OpInfo.ConstraintVT);
12871 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li)
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.vector.partial.reduce.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)