16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
88 const T *thisT()
const {
return static_cast<const T *
>(
this); }
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
137 "Can only extract subvectors from vectors");
139 assert((!isa<FixedVectorType>(VTy) ||
140 (
Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
148 for (
int i = 0; i != NumSubElts; ++i) {
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
165 "Can only insert subvectors into vectors");
167 assert((!isa<FixedVectorType>(VTy) ||
168 (
Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
176 for (
int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
CostKind,
181 i +
Index,
nullptr,
nullptr);
188 return static_cast<const T *
>(
this)->getST();
193 return static_cast<const T *
>(
this)->getTLI();
215 bool IsGatherScatter,
219 if (isa<ScalableVectorType>(DataTy))
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
244 Opcode == Instruction::Store,
CostKind);
258 VF * (thisT()->getCFInstrCost(Instruction::Br,
CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI,
CostKind));
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
272 bool IsCompared =
false;
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (
static_cast<unsigned>(
P.value()) >= NumSrcElts * 2)
280 SplatIdx =
P.value();
281 return P.index() != Mask.size() - 1;
284 return SplatIdx ==
P.value();
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
306 std::optional<unsigned> CallRetElementIndex = {})
const {
310 if (!LibInfo || !isa<StructType>(
RetTy) ||
323 for (
bool Masked : {
false,
true}) {
324 if ((VD = LibInfo->getVectorMappingInfo(LCName, VF,
Masked)))
336 VecTy, {},
CostKind, 0,
nullptr, {});
342 if (
Idx == CallRetElementIndex)
344 Cost += thisT()->getMemoryOpCost(
345 Instruction::Load, VectorTy,
359 if (isa<Constant>(
Op) || !UniqueOperands.
insert(
Op).second)
378 unsigned *
Fast)
const override {
385 const Function *Callee)
const override {
389 TM.getSubtargetImpl(*Caller)->getFeatureBits();
391 TM.getSubtargetImpl(*Callee)->getFeatureBits();
395 return (CallerBits & CalleeBits) == CalleeBits;
437 std::pair<const Value *, unsigned>
443 Value *NewV)
const override {
460 bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
462 int64_t ScalableOffset = 0)
const override {
477 Type *ScalarValTy)
const override {
478 auto &&IsSupportedByTarget = [
this, ScalarMemTy, ScalarValTy](
unsigned VF) {
481 if (getTLI()->isOperationLegal(
ISD::STORE, VT) ||
491 while (VF > 2 && IsSupportedByTarget(VF))
526 unsigned AddrSpace)
const override {
574 unsigned N = SI.getNumCases();
579 bool IsJTAllowed = TLI->
areJTsAllowed(SI.getParent()->getParent());
585 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
586 APInt MinCaseVal = MaxCaseVal;
587 for (
auto CI : SI.cases()) {
588 const APInt &CaseVal = CI.getCaseValue()->getValue();
589 if (CaseVal.
sgt(MaxCaseVal))
590 MaxCaseVal = CaseVal;
591 if (CaseVal.
slt(MinCaseVal))
592 MinCaseVal = CaseVal;
598 for (
auto I : SI.cases())
599 Dests.
insert(
I.getCaseSuccessor());
608 if (
N < 2 || N < TLI->getMinimumJumpTableEntries())
611 (MaxCaseVal - MinCaseVal)
612 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
615 JumpTableSize =
Range;
631 if (!TM.isPositionIndependent())
641 const Triple &TargetTriple = TM.getTargetTriple();
673 const Function &Fn)
const override {
677 case Instruction::SDiv:
678 case Instruction::SRem:
679 case Instruction::UDiv:
680 case Instruction::URem: {
732 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
733 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
740 if (isa<CallInst>(
I) || isa<InvokeInst>(
I)) {
750 <<
"advising against unrolling the loop because it "
801 std::optional<Instruction *>
806 std::optional<Value *>
809 bool &KnownBitsComputed)
const override {
818 SimplifyAndSetOp)
const override {
820 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
824 virtual std::optional<unsigned>
826 return std::optional<unsigned>(
830 virtual std::optional<unsigned>
832 std::optional<unsigned> TargetResult =
850 unsigned NumStridedMemAccesses,
851 unsigned NumPrefetches,
852 bool HasCall)
const override {
854 NumPrefetches, HasCall);
879 std::optional<unsigned>
getMaxVScale()
const override {
return std::nullopt; }
889 VectorType *InTy,
const APInt &DemandedElts,
bool Insert,
bool Extract,
894 if (isa<ScalableVectorType>(InTy))
896 auto *Ty = cast<FixedVectorType>(InTy);
899 (VL.empty() || VL.size() == Ty->getNumElements()) &&
900 "Vector size mismatch");
904 for (
int i = 0, e = Ty->getNumElements(); i < e; ++i) {
905 if (!DemandedElts[i])
908 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
909 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
913 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
926 unsigned ScalarOpdIdx)
const override {
931 int OpdIdx)
const override {
937 int RetIdx)
const override {
945 if (isa<ScalableVectorType>(InTy))
947 auto *Ty = cast<FixedVectorType>(InTy);
950 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
960 for (
Type *Ty : Tys) {
962 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
963 !Ty->isPtrOrPtrVectorTy())
966 if (
auto *VecTy = dyn_cast<VectorType>(Ty))
986 filterConstantAndDuplicatedOperands(Args, Tys),
CostKind);
1023 if (MTy == LK.second)
1037 ArrayRef<const Value *> Args = {},
1038 const Instruction *CxtI =
nullptr)
const override {
1040 const TargetLoweringBase *TLI = getTLI();
1041 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1042 assert(ISD &&
"Invalid opcode");
1057 if (TLI->isOperationLegalOrPromote(ISD,
LT.second)) {
1060 return LT.first * OpCost;
1063 if (!TLI->isOperationExpand(ISD,
LT.second)) {
1066 return LT.first * 2 * OpCost;
1078 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1080 DivOpc, Ty,
CostKind, Opd1Info, Opd2Info);
1082 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty,
CostKind);
1084 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty,
CostKind);
1085 return DivCost + MulCost + SubCost;
1090 if (isa<ScalableVectorType>(Ty))
1096 if (
auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1102 SmallVector<Type *> Tys(
Args.size(), Ty);
1117 int NumDstElts = Mask.size();
1125 if (isSplatMask(Mask, NumSrcElts,
Index))
1128 (
Index + NumDstElts) <= NumSrcElts) {
1135 if (
all_of(Mask, [NumSrcElts](
int M) {
return M < NumSrcElts; }))
1140 Mask, NumSrcElts, NumSubElts,
Index)) {
1141 if (
Index + NumSubElts > NumSrcElts)
1170 const Instruction *CxtI =
nullptr)
const override {
1173 if (
auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1174 return getBroadcastShuffleOverhead(FVT,
CostKind);
1182 if (
auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1183 return getPermuteShuffleOverhead(FVT,
CostKind);
1187 cast<FixedVectorType>(SubTp));
1190 cast<FixedVectorType>(SubTp));
1204 assert(ISD &&
"Invalid opcode");
1208 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1209 TypeSize DstSize = DstLT.second.getSizeInBits();
1210 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1211 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1216 case Instruction::Trunc:
1221 case Instruction::BitCast:
1224 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1228 case Instruction::FPExt:
1229 if (
I && getTLI()->isExtFree(
I))
1232 case Instruction::ZExt:
1233 if (TLI->
isZExtFree(SrcLT.second, DstLT.second))
1236 case Instruction::SExt:
1237 if (
I && getTLI()->isExtFree(
I))
1247 if (DstLT.first == SrcLT.first &&
1252 case Instruction::AddrSpaceCast:
1254 Dst->getPointerAddressSpace()))
1259 auto *SrcVTy = dyn_cast<VectorType>(Src);
1260 auto *DstVTy = dyn_cast<VectorType>(Dst);
1263 if (SrcLT.first == DstLT.first &&
1268 if (!SrcVTy && !DstVTy) {
1279 if (DstVTy && SrcVTy) {
1281 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1284 if (Opcode == Instruction::ZExt)
1288 if (Opcode == Instruction::SExt)
1289 return SrcLT.first * 2;
1295 return SrcLT.first * 1;
1308 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1309 DstVTy->getElementCount().isVector()) {
1312 const T *
TTI = thisT();
1315 (!SplitSrc || !SplitDst) ?
TTI->getVectorSplitCost() : 0;
1322 if (isa<ScalableVectorType>(DstVTy))
1327 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1329 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH,
CostKind,
I);
1342 if (Opcode == Instruction::BitCast) {
1359 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1376 const Instruction *
I =
nullptr)
const override {
1377 const TargetLoweringBase *TLI = getTLI();
1378 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1379 assert(ISD &&
"Invalid opcode");
1383 Op1Info, Op2Info,
I);
1387 assert(CondTy &&
"CondTy must exist");
1394 !TLI->isOperationExpand(ISD,
LT.second)) {
1397 return LT.first * 1;
1403 if (
auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1404 if (isa<ScalableVectorType>(ValTy))
1407 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1409 Opcode, ValVTy->getScalarType(), CondTy->
getScalarType(), VecPred,
1426 const Value *Op1)
const override {
1437 ArrayRef<std::tuple<Value *, User *, int>>
1438 ScalarUserAndIdx)
const override {
1439 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind,
Index,
nullptr,
1445 unsigned Index)
const override {
1446 Value *Op0 =
nullptr;
1447 Value *Op1 =
nullptr;
1448 if (
auto *IE = dyn_cast<InsertElementInst>(&
I)) {
1449 Op0 = IE->getOperand(0);
1450 Op1 = IE->getOperand(1);
1452 return thisT()->getVectorInstrCost(
I.getOpcode(), Val,
CostKind,
Index, Op0,
1459 unsigned Index)
const override {
1460 unsigned NewIndex = -1;
1461 if (
auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1463 "Unexpected index from end of vector");
1464 NewIndex = FVTy->getNumElements() - 1 -
Index;
1466 return thisT()->getVectorInstrCost(Opcode, Val,
CostKind, NewIndex,
nullptr,
1472 const APInt &DemandedDstElts,
1475 "Unexpected size of DemandedDstElts.");
1493 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1496 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1508 assert(!Src->isVoidTy() &&
"Invalid type");
1525 LT.second.getSizeInBits())) {
1531 if (Opcode == Instruction::Store)
1540 cast<VectorType>(Src), Opcode != Instruction::Store,
1541 Opcode == Instruction::Store,
CostKind);
1553 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
true,
false,
1559 bool VariableMask,
Align Alignment,
1562 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1568 bool VariableMask,
Align Alignment,
1573 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1578 const Value *
Ptr,
bool VariableMask,
1585 return thisT()->getGatherScatterOpCost(Opcode, DataTy,
Ptr, VariableMask,
1592 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false)
const override {
1595 if (isa<ScalableVectorType>(VecTy))
1598 auto *VT = cast<FixedVectorType>(VecTy);
1600 unsigned NumElts = VT->getNumElements();
1601 assert(Factor > 1 && NumElts % Factor == 0 &&
"Invalid interleave factor");
1603 unsigned NumSubElts = NumElts / Factor;
1608 if (UseMaskForCond || UseMaskForGaps)
1609 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1618 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1638 unsigned NumLegalInsts =
divideCeil(VecTySize, VecTyLTSize);
1642 unsigned NumEltsPerLegalInst =
divideCeil(NumElts, NumLegalInsts);
1645 BitVector UsedInsts(NumLegalInsts,
false);
1646 for (
unsigned Index : Indices)
1647 for (
unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1648 UsedInsts.
set((
Index + Elt * Factor) / NumEltsPerLegalInst);
1657 "Interleaved memory op has too many members");
1663 for (
unsigned Index : Indices) {
1664 assert(
Index < Factor &&
"Invalid index for interleaved memory op");
1665 for (
unsigned Elm = 0; Elm < NumSubElts; Elm++)
1666 DemandedLoadStoreElts.
setBit(
Index + Elm * Factor);
1669 if (Opcode == Instruction::Load) {
1679 SubVT, DemandedAllSubElts,
1681 Cost += Indices.
size() * InsSubCost;
1682 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1700 SubVT, DemandedAllSubElts,
1702 Cost += ExtSubCost * Indices.
size();
1703 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1708 if (!UseMaskForCond)
1713 Cost += thisT()->getReplicationShuffleCost(
1714 I8Type, Factor, NumSubElts,
1715 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1723 if (UseMaskForGaps) {
1725 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1751 std::optional<unsigned> FOp =
1754 if (ICA.
getID() == Intrinsic::vp_load) {
1756 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1757 Alignment = VPI->getPointerAlignment().valueOrOne();
1760 if (
auto *PtrTy = dyn_cast<PointerType>(ICA.
getArgTypes()[0]))
1761 AS = PtrTy->getAddressSpace();
1762 return thisT()->getMemoryOpCost(*FOp, ICA.
getReturnType(), Alignment,
1765 if (ICA.
getID() == Intrinsic::vp_store) {
1767 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1768 Alignment = VPI->getPointerAlignment().valueOrOne();
1771 if (
auto *PtrTy = dyn_cast<PointerType>(ICA.
getArgTypes()[1]))
1772 AS = PtrTy->getAddressSpace();
1773 return thisT()->getMemoryOpCost(*FOp, ICA.
getArgTypes()[0], Alignment,
1777 ICA.
getID() == Intrinsic::vp_fneg) {
1778 return thisT()->getArithmeticInstrCost(*FOp, ICA.
getReturnType(),
1782 return thisT()->getCastInstrCost(
1790 auto *UI = cast<VPCmpIntrinsic>(ICA.
getInst());
1791 return thisT()->getCmpSelInstrCost(*FOp, ICA.
getArgTypes()[0],
1798 if (ICA.
getID() == Intrinsic::vp_scatter) {
1807 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1808 Alignment = VPI->getPointerAlignment().valueOrOne();
1809 bool VarMask = isa<Constant>(ICA.
getArgs()[2]);
1810 return thisT()->getGatherScatterOpCost(
1814 if (ICA.
getID() == Intrinsic::vp_gather) {
1823 if (
auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.
getInst()))
1824 Alignment = VPI->getPointerAlignment().valueOrOne();
1825 bool VarMask = isa<Constant>(ICA.
getArgs()[1]);
1826 return thisT()->getGatherScatterOpCost(
1831 if (ICA.
getID() == Intrinsic::vp_select ||
1832 ICA.
getID() == Intrinsic::vp_merge) {
1843 std::optional<Intrinsic::ID> FID =
1847 if (ICA.
getID() == Intrinsic::experimental_vp_reverse)
1848 FID = Intrinsic::vector_reverse;
1854 "Expected VPIntrinsic to have Mask and Vector Length args and "
1866 *FID != Intrinsic::vector_reduce_fadd &&
1867 *FID != Intrinsic::vector_reduce_fmul) {
1875 return thisT()->getIntrinsicInstrCost(NewICA,
CostKind);
1894 case Intrinsic::powi:
1895 if (
auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1896 bool ShouldOptForSize =
I->getParent()->getParent()->hasOptSize();
1897 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1898 ShouldOptForSize)) {
1902 unsigned ActiveBits =
Exponent.getActiveBits();
1903 unsigned PopCount =
Exponent.popcount();
1905 thisT()->getArithmeticInstrCost(
1907 if (RHSC->isNegative())
1908 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv,
RetTy,
1914 case Intrinsic::cttz:
1920 case Intrinsic::ctlz:
1926 case Intrinsic::memcpy:
1927 return thisT()->getMemcpyCost(ICA.
getInst());
1929 case Intrinsic::masked_scatter: {
1930 const Value *Mask = Args[3];
1931 bool VarMask = !isa<Constant>(Mask);
1932 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1933 return thisT()->getGatherScatterOpCost(Instruction::Store,
1937 case Intrinsic::masked_gather: {
1938 const Value *Mask = Args[2];
1939 bool VarMask = !isa<Constant>(Mask);
1940 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1941 return thisT()->getGatherScatterOpCost(Instruction::Load,
RetTy, Args[0],
1944 case Intrinsic::masked_compressstore: {
1946 const Value *Mask = Args[2];
1947 Align Alignment =
I->getParamAlign(1).valueOrOne();
1948 return thisT()->getExpandCompressMemoryOpCost(
1949 Instruction::Store,
Data->getType(), !isa<Constant>(Mask), Alignment,
1952 case Intrinsic::masked_expandload: {
1953 const Value *Mask = Args[1];
1954 Align Alignment =
I->getParamAlign(0).valueOrOne();
1955 return thisT()->getExpandCompressMemoryOpCost(Instruction::Load,
RetTy,
1956 !isa<Constant>(Mask),
1959 case Intrinsic::experimental_vp_strided_store: {
1962 const Value *Mask = Args[3];
1963 const Value *EVL = Args[4];
1964 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1965 Type *EltTy = cast<VectorType>(
Data->getType())->getElementType();
1968 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1969 Data->getType(),
Ptr, VarMask,
1972 case Intrinsic::experimental_vp_strided_load: {
1974 const Value *Mask = Args[2];
1975 const Value *EVL = Args[3];
1976 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1977 Type *EltTy = cast<VectorType>(
RetTy)->getElementType();
1980 return thisT()->getStridedMemoryOpCost(Instruction::Load,
RetTy,
Ptr,
1983 case Intrinsic::stepvector: {
1984 if (isa<ScalableVectorType>(
RetTy))
1989 case Intrinsic::vector_extract: {
1992 if (isa<ScalableVectorType>(
RetTy))
1994 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1996 cast<VectorType>(
RetTy),
1997 cast<VectorType>(Args[0]->
getType()), {},
2000 case Intrinsic::vector_insert: {
2003 if (isa<ScalableVectorType>(Args[1]->
getType()))
2005 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2006 return thisT()->getShuffleCost(
2009 cast<VectorType>(Args[1]->
getType()));
2011 case Intrinsic::vector_splice: {
2012 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2014 cast<VectorType>(Args[0]->
getType()), {},
2017 case Intrinsic::vector_reduce_add:
2018 case Intrinsic::vector_reduce_mul:
2019 case Intrinsic::vector_reduce_and:
2020 case Intrinsic::vector_reduce_or:
2021 case Intrinsic::vector_reduce_xor:
2022 case Intrinsic::vector_reduce_smax:
2023 case Intrinsic::vector_reduce_smin:
2024 case Intrinsic::vector_reduce_fmax:
2025 case Intrinsic::vector_reduce_fmin:
2026 case Intrinsic::vector_reduce_fmaximum:
2027 case Intrinsic::vector_reduce_fminimum:
2028 case Intrinsic::vector_reduce_umax:
2029 case Intrinsic::vector_reduce_umin: {
2033 case Intrinsic::vector_reduce_fadd:
2034 case Intrinsic::vector_reduce_fmul: {
2036 IID,
RetTy, {Args[0]->getType(), Args[1]->
getType()}, FMF,
I, 1);
2039 case Intrinsic::fshl:
2040 case Intrinsic::fshr: {
2041 const Value *
X = Args[0];
2042 const Value *
Y = Args[1];
2043 const Value *Z = Args[2];
2052 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
2054 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
2055 Cost += thisT()->getArithmeticInstrCost(
2058 Cost += thisT()->getArithmeticInstrCost(
2065 Cost += thisT()->getArithmeticInstrCost(
2067 : BinaryOperator::URem,
2069 {TTI::OK_UniformConstantValue, TTI::OP_None});
2072 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2074 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2077 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2082 case Intrinsic::experimental_cttz_elts: {
2087 if (!getTLI()->shouldExpandCttzElements(ArgType))
2095 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2097 if (isa<ScalableVectorType>(ICA.
getArgTypes()[0]) &&
I &&
I->getCaller())
2106 NewEltTy, cast<VectorType>(Args[0]->
getType())->getElementCount());
2111 thisT()->getIntrinsicInstrCost(StepVecAttrs,
CostKind);
2114 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy,
CostKind);
2115 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2119 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy,
CostKind);
2122 NewEltTy, NewVecTy, FMF,
I, 1);
2123 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs,
CostKind);
2125 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy,
CostKind);
2129 case Intrinsic::get_active_lane_mask:
2130 case Intrinsic::experimental_vector_match:
2131 case Intrinsic::experimental_vector_histogram_add:
2132 case Intrinsic::experimental_vector_histogram_uadd_sat:
2133 case Intrinsic::experimental_vector_histogram_umax:
2134 case Intrinsic::experimental_vector_histogram_umin:
2135 return thisT()->getTypeBasedIntrinsicInstrCost(ICA,
CostKind);
2136 case Intrinsic::modf:
2137 case Intrinsic::sincos:
2138 case Intrinsic::sincospi: {
2142 RTLIB::Libcall LC = [&] {
2143 switch (ICA.
getID()) {
2144 case Intrinsic::modf:
2146 case Intrinsic::sincos:
2148 case Intrinsic::sincospi:
2155 std::optional<unsigned> CallRetElementIndex;
2158 if (ICA.
getID() == Intrinsic::modf)
2159 CallRetElementIndex = 0;
2161 if (
auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2162 ICA,
CostKind, LC, CallRetElementIndex))
2174 ScalarizationCost = 0;
2175 if (!
RetTy->isVoidTy()) {
2178 cast<VectorType>(VectorTy),
2183 filterConstantAndDuplicatedOperands(Args, ICA.
getArgTypes()),
2189 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2210 unsigned VecTyIndex = 0;
2211 if (IID == Intrinsic::vector_reduce_fadd ||
2212 IID == Intrinsic::vector_reduce_fmul)
2214 assert(Tys.
size() > VecTyIndex &&
"Unexpected IntrinsicCostAttributes");
2215 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2224 if (isa<ScalableVectorType>(
RetTy) ||
any_of(Tys, [](
const Type *Ty) {
2225 return isa<ScalableVectorType>(Ty);
2231 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2232 unsigned ScalarCalls = 1;
2234 if (
auto *RetVTy = dyn_cast<VectorType>(
RetTy)) {
2235 if (!SkipScalarizationCost)
2238 ScalarCalls = std::max(ScalarCalls,
2240 ScalarRetTy =
RetTy->getScalarType();
2243 for (
Type *Ty : Tys) {
2244 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2245 if (!SkipScalarizationCost)
2248 ScalarCalls = std::max(ScalarCalls,
2254 if (ScalarCalls == 1)
2259 thisT()->getIntrinsicInstrCost(ScalarAttrs,
CostKind);
2261 return ScalarCalls * ScalarCost + ScalarizationCost;
2265 case Intrinsic::sqrt:
2268 case Intrinsic::sin:
2271 case Intrinsic::cos:
2274 case Intrinsic::sincos:
2277 case Intrinsic::sincospi:
2280 case Intrinsic::modf:
2283 case Intrinsic::tan:
2286 case Intrinsic::asin:
2289 case Intrinsic::acos:
2292 case Intrinsic::atan:
2295 case Intrinsic::atan2:
2298 case Intrinsic::sinh:
2301 case Intrinsic::cosh:
2304 case Intrinsic::tanh:
2307 case Intrinsic::exp:
2310 case Intrinsic::exp2:
2313 case Intrinsic::exp10:
2316 case Intrinsic::log:
2319 case Intrinsic::log10:
2322 case Intrinsic::log2:
2325 case Intrinsic::ldexp:
2328 case Intrinsic::fabs:
2331 case Intrinsic::canonicalize:
2334 case Intrinsic::minnum:
2337 case Intrinsic::maxnum:
2340 case Intrinsic::minimum:
2343 case Intrinsic::maximum:
2346 case Intrinsic::minimumnum:
2349 case Intrinsic::maximumnum:
2352 case Intrinsic::copysign:
2355 case Intrinsic::floor:
2358 case Intrinsic::ceil:
2361 case Intrinsic::trunc:
2364 case Intrinsic::nearbyint:
2367 case Intrinsic::rint:
2370 case Intrinsic::lrint:
2373 case Intrinsic::llrint:
2376 case Intrinsic::round:
2379 case Intrinsic::roundeven:
2382 case Intrinsic::lround:
2385 case Intrinsic::llround:
2388 case Intrinsic::pow:
2391 case Intrinsic::fma:
2394 case Intrinsic::fmuladd:
2397 case Intrinsic::experimental_constrained_fmuladd:
2401 case Intrinsic::lifetime_start:
2402 case Intrinsic::lifetime_end:
2403 case Intrinsic::sideeffect:
2404 case Intrinsic::pseudoprobe:
2405 case Intrinsic::arithmetic_fence:
2407 case Intrinsic::masked_store: {
2409 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2410 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2413 case Intrinsic::masked_load: {
2415 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2416 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2419 case Intrinsic::experimental_vp_strided_store: {
2420 auto *Ty = cast<VectorType>(ICA.
getArgTypes()[0]);
2421 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2422 return thisT()->getStridedMemoryOpCost(
2423 Instruction::Store, Ty,
nullptr,
true,
2426 case Intrinsic::experimental_vp_strided_load: {
2428 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2429 return thisT()->getStridedMemoryOpCost(
2430 Instruction::Load, Ty,
nullptr,
true,
2433 case Intrinsic::vector_reduce_add:
2434 case Intrinsic::vector_reduce_mul:
2435 case Intrinsic::vector_reduce_and:
2436 case Intrinsic::vector_reduce_or:
2437 case Intrinsic::vector_reduce_xor:
2438 return thisT()->getArithmeticReductionCost(
2441 case Intrinsic::vector_reduce_fadd:
2442 case Intrinsic::vector_reduce_fmul:
2443 return thisT()->getArithmeticReductionCost(
2445 case Intrinsic::vector_reduce_smax:
2446 case Intrinsic::vector_reduce_smin:
2447 case Intrinsic::vector_reduce_umax:
2448 case Intrinsic::vector_reduce_umin:
2449 case Intrinsic::vector_reduce_fmax:
2450 case Intrinsic::vector_reduce_fmin:
2451 case Intrinsic::vector_reduce_fmaximum:
2452 case Intrinsic::vector_reduce_fminimum:
2455 case Intrinsic::experimental_vector_match: {
2456 auto *SearchTy = cast<VectorType>(ICA.
getArgTypes()[0]);
2457 auto *NeedleTy = cast<FixedVectorType>(ICA.
getArgTypes()[1]);
2458 unsigned SearchSize = NeedleTy->getNumElements();
2463 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2469 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2471 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2475 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy,
RetTy,
2478 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
2481 thisT()->getArithmeticInstrCost(BinaryOperator::And,
RetTy,
CostKind);
2484 case Intrinsic::vector_reverse:
2488 case Intrinsic::experimental_vector_histogram_add:
2489 case Intrinsic::experimental_vector_histogram_uadd_sat:
2490 case Intrinsic::experimental_vector_histogram_umax:
2491 case Intrinsic::experimental_vector_histogram_umin: {
2499 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2501 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2503 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2508 case Intrinsic::experimental_vector_histogram_add:
2510 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy,
CostKind);
2512 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2514 Cost += thisT()->getIntrinsicInstrCost(UAddSat,
CostKind);
2517 case Intrinsic::experimental_vector_histogram_umax: {
2522 case Intrinsic::experimental_vector_histogram_umin: {
2528 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2533 case Intrinsic::get_active_lane_mask: {
2540 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2549 thisT()->getTypeBasedIntrinsicInstrCost(Attrs,
CostKind);
2550 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy,
RetTy,
2554 case Intrinsic::experimental_memset_pattern:
2559 case Intrinsic::abs:
2562 case Intrinsic::fshl:
2565 case Intrinsic::fshr:
2568 case Intrinsic::smax:
2571 case Intrinsic::smin:
2574 case Intrinsic::umax:
2577 case Intrinsic::umin:
2580 case Intrinsic::sadd_sat:
2583 case Intrinsic::ssub_sat:
2586 case Intrinsic::uadd_sat:
2589 case Intrinsic::usub_sat:
2592 case Intrinsic::smul_fix:
2595 case Intrinsic::umul_fix:
2598 case Intrinsic::sadd_with_overflow:
2601 case Intrinsic::ssub_with_overflow:
2604 case Intrinsic::uadd_with_overflow:
2607 case Intrinsic::usub_with_overflow:
2610 case Intrinsic::smul_with_overflow:
2613 case Intrinsic::umul_with_overflow:
2616 case Intrinsic::fptosi_sat:
2617 case Intrinsic::fptoui_sat: {
2623 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2629 case Intrinsic::ctpop:
2635 case Intrinsic::ctlz:
2638 case Intrinsic::cttz:
2641 case Intrinsic::bswap:
2644 case Intrinsic::bitreverse:
2647 case Intrinsic::ucmp:
2650 case Intrinsic::scmp:
2655 auto *ST = dyn_cast<StructType>(
RetTy);
2656 Type *LegalizeTy = ST ? ST->getContainedType(0) :
RetTy;
2662 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2672 return (LT.first * 2);
2674 return (LT.first * 1);
2678 return (LT.first * 2);
2682 case Intrinsic::fmuladd: {
2686 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul,
RetTy,
2688 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd,
RetTy,
2691 case Intrinsic::experimental_constrained_fmuladd: {
2693 Intrinsic::experimental_constrained_fmul,
RetTy, Tys);
2695 Intrinsic::experimental_constrained_fadd,
RetTy, Tys);
2696 return thisT()->getIntrinsicInstrCost(FMulAttrs,
CostKind) +
2697 thisT()->getIntrinsicInstrCost(FAddAttrs,
CostKind);
2699 case Intrinsic::smin:
2700 case Intrinsic::smax:
2701 case Intrinsic::umin:
2702 case Intrinsic::umax: {
2704 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2705 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2709 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2711 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2715 case Intrinsic::sadd_with_overflow:
2716 case Intrinsic::ssub_with_overflow: {
2717 Type *SumTy =
RetTy->getContainedType(0);
2718 Type *OverflowTy =
RetTy->getContainedType(1);
2719 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2720 ? BinaryOperator::Add
2721 : BinaryOperator::Sub;
2728 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2730 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2732 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2736 case Intrinsic::uadd_with_overflow:
2737 case Intrinsic::usub_with_overflow: {
2738 Type *SumTy =
RetTy->getContainedType(0);
2739 Type *OverflowTy =
RetTy->getContainedType(1);
2740 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2741 ? BinaryOperator::Add
2742 : BinaryOperator::Sub;
2748 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy,
CostKind);
2749 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2753 case Intrinsic::smul_with_overflow:
2754 case Intrinsic::umul_with_overflow: {
2755 Type *MulTy =
RetTy->getContainedType(0);
2756 Type *OverflowTy =
RetTy->getContainedType(1);
2759 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2761 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2765 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH,
CostKind);
2767 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2768 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2770 Cost += thisT()->getArithmeticInstrCost(
2775 Cost += thisT()->getArithmeticInstrCost(
2776 Instruction::AShr, MulTy,
CostKind,
2780 Cost += thisT()->getCmpSelInstrCost(
2784 case Intrinsic::sadd_sat:
2785 case Intrinsic::ssub_sat: {
2787 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2791 ? Intrinsic::sadd_with_overflow
2792 : Intrinsic::ssub_with_overflow;
2799 nullptr, ScalarizationCostPassed);
2800 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2801 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2803 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy,
2807 case Intrinsic::uadd_sat:
2808 case Intrinsic::usub_sat: {
2809 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2813 ? Intrinsic::uadd_with_overflow
2814 : Intrinsic::usub_with_overflow;
2818 nullptr, ScalarizationCostPassed);
2819 Cost += thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
2821 thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2825 case Intrinsic::smul_fix:
2826 case Intrinsic::umul_fix: {
2827 unsigned ExtSize =
RetTy->getScalarSizeInBits() * 2;
2828 Type *ExtTy =
RetTy->getWithNewBitWidth(ExtSize);
2831 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2837 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
2838 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc,
RetTy, ExtTy,
2840 Cost += thisT()->getArithmeticInstrCost(
2843 Cost += thisT()->getArithmeticInstrCost(
2849 case Intrinsic::abs: {
2851 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2854 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2856 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2859 Cost += thisT()->getArithmeticInstrCost(
2864 case Intrinsic::fshl:
2865 case Intrinsic::fshr: {
2868 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2871 thisT()->getArithmeticInstrCost(BinaryOperator::Or,
RetTy,
CostKind);
2873 thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
CostKind);
2875 thisT()->getArithmeticInstrCost(BinaryOperator::Shl,
RetTy,
CostKind);
2876 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr,
RetTy,
2881 Cost += thisT()->getArithmeticInstrCost(
2883 : BinaryOperator::URem,
2885 {TTI::OK_UniformConstantValue, TTI::OP_None});
2887 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp,
RetTy, CondTy,
2889 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select,
RetTy, CondTy,
2893 case Intrinsic::fptosi_sat:
2894 case Intrinsic::fptoui_sat: {
2897 Type *FromTy = Tys[0];
2898 bool IsSigned = IID == Intrinsic::fptosi_sat;
2903 Cost += thisT()->getIntrinsicInstrCost(Attrs1,
CostKind);
2906 Cost += thisT()->getIntrinsicInstrCost(Attrs2,
CostKind);
2907 Cost += thisT()->getCastInstrCost(
2908 IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
RetTy, FromTy,
2911 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2912 Cost += thisT()->getCmpSelInstrCost(
2914 Cost += thisT()->getCmpSelInstrCost(
2919 case Intrinsic::ucmp:
2920 case Intrinsic::scmp: {
2921 Type *CmpTy = Tys[0];
2922 Type *CondTy =
RetTy->getWithNewBitWidth(1);
2924 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2927 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2934 Cost += 2 * thisT()->getCmpSelInstrCost(
2935 BinaryOperator::Select,
RetTy, CondTy,
2940 2 * thisT()->getCastInstrCost(CastInst::ZExt,
RetTy, CondTy,
2942 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub,
RetTy,
2947 case Intrinsic::maximumnum:
2948 case Intrinsic::minimumnum: {
2963 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs,
CostKind);
2964 return LT.first + FCanonicalizeCost * 2;
2979 if (
any_of(concat<Type *const>(RetVTys, Tys),
2980 [](
Type *Ty) {
return isa<ScalableVectorType>(Ty); }))
2984 if (!SkipScalarizationCost) {
2985 ScalarizationCost = 0;
2986 for (
Type *RetVTy : RetVTys) {
2988 cast<VectorType>(RetVTy),
true,
2995 for (
Type *Ty : Tys) {
3002 thisT()->getIntrinsicInstrCost(Attrs,
CostKind);
3003 for (
Type *Ty : Tys) {
3004 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
3008 ScalarCalls = std::max(ScalarCalls,
3012 return ScalarCalls * ScalarCost + ScalarizationCost;
3016 return SingleCallCost;
3038 if (!LT.first.isValid())
3042 if (
auto *FTp = dyn_cast<FixedVectorType>(Tp);
3043 Tp && LT.second.isFixedLengthVector() &&
3045 if (
auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3048 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3050 return LT.first.getValue();
3084 if (isa<ScalableVectorType>(Ty))
3088 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3089 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3099 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3101 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3105 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3108 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3109 unsigned LongVectorCount = 0;
3111 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3112 while (NumVecElts > MVTLen) {
3115 ShuffleCost += thisT()->getShuffleCost(
3117 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy,
CostKind);
3122 NumReduxLevels -= LongVectorCount;
3134 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty,
CostKind);
3135 return ShuffleCost + ArithCost +
3136 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3160 if (isa<ScalableVectorType>(Ty))
3163 auto *VTy = cast<FixedVectorType>(Ty);
3170 return ExtractCost + ArithCost;
3175 std::optional<FastMathFlags> FMF,
3177 assert(Ty &&
"Unknown reduction vector type");
3190 if (isa<ScalableVectorType>(Ty))
3194 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3195 unsigned NumReduxLevels =
Log2_32(NumVecElts);
3198 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3199 unsigned LongVectorCount = 0;
3201 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3202 while (NumVecElts > MVTLen) {
3206 ShuffleCost += thisT()->getShuffleCost(
3215 NumReduxLevels -= LongVectorCount;
3228 return ShuffleCost + MinMaxCost +
3229 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3235 VectorType *Ty, std::optional<FastMathFlags> FMF,
3237 if (
auto *FTy = dyn_cast<FixedVectorType>(Ty);
3238 FTy && IsUnsigned && Opcode == Instruction::Add &&
3246 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3248 thisT()->getIntrinsicInstrCost(ICA,
CostKind);
3254 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF,
CostKind);
3256 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3259 return RedCost + ExtCost;
3270 Instruction::Add, ExtTy, std::nullopt,
CostKind);
3272 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3276 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy,
CostKind);
3278 return RedCost + MulCost + 2 * ExtCost;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Base class which can be used to help build a TTI implementation.
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
virtual bool enableWritePrefetching() const override
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
virtual unsigned getPrefetchDistance() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
virtual unsigned getCacheLineSize() const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const override
virtual ~BasicTTIImplBase()=default
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
virtual unsigned getMaxPrefetchIterationsAhead() const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
CmpInst::Predicate getLTPredicate() const
CmpInst::Predicate getGTPredicate() const
This class represents a range of values.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getFixed(ScalarTy MinVal)
constexpr bool isScalar() const
Exactly one element.
Convenience struct for specifying and reasoning about fast-math flags.
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
AttributeList getAttributes() const
Return the attribute list for this Function.
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
FastMathFlags getFlags() const
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
Type * getReturnType() const
bool skipScalarizationCost() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
Intrinsic::ID getID() const
bool isTypeBasedOnly() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Represents a single loop in the control flow graph.
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
@ TypeScalarizeScalableVector
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExpandCmpUsingSelects(EVT VT) const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Value * getOperand(unsigned i) const
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Provides info so a possible vectorization of a function can be computed.
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT RetVT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
template class LLVM_TEMPLATE_ABI opt< unsigned >
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
constexpr unsigned BitWidth
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...