84#include <system_error>
89using namespace lowertypetests;
91#define DEBUG_TYPE "lowertypetests"
93STATISTIC(ByteArraySizeBits,
"Byte array size in bits");
94STATISTIC(ByteArraySizeBytes,
"Byte array size in bytes");
95STATISTIC(NumByteArraysCreated,
"Number of byte arrays created");
96STATISTIC(NumTypeTestCallsLowered,
"Number of type test calls lowered");
97STATISTIC(NumTypeIdDisjointSets,
"Number of disjoint sets of type identifiers");
100 "lowertypetests-avoid-reuse",
101 cl::desc(
"Try to avoid reuse of byte array addresses using aliases"),
105 "lowertypetests-summary-action",
106 cl::desc(
"What to do with the summary when running this pass"),
108 clEnumValN(PassSummaryAction::Import,
"import",
109 "Import typeid resolutions from summary and globals"),
110 clEnumValN(PassSummaryAction::Export,
"export",
111 "Export typeid resolutions to summary and globals")),
115 "lowertypetests-read-summary",
116 cl::desc(
"Read summary from given YAML file before running pass"),
120 "lowertypetests-write-summary",
121 cl::desc(
"Write summary to given YAML file after running pass"),
126 cl::desc(
"Simply drop type test sequences"),
128 "Do not drop any type tests"),
130 "Drop type test assume sequences"),
132 "Drop all type test sequences")),
204 std::vector<uint64_t> &Fragment =
Fragments.back();
207 for (
auto ObjIndex :
F) {
209 if (OldFragmentIndex == 0) {
212 Fragment.push_back(ObjIndex);
219 std::vector<uint64_t> &OldFragment =
Fragments[OldFragmentIndex];
242 unsigned ReqSize = AllocByteOffset + BitSize;
244 if (
Bytes.size() < ReqSize)
245 Bytes.resize(ReqSize);
248 AllocMask = 1 << Bit;
250 Bytes[AllocByteOffset +
B] |= AllocMask;
254 if (
F->isDeclarationForLinker())
256 auto *CI = mdconst::extract_or_null<ConstantInt>(
257 F->getParent()->getModuleFlag(
"CFI Canonical Jump Tables"));
258 if (!CI || !CI->isZero())
260 return F->hasFnAttribute(
"cfi-canonical-jump-table");
265struct ByteArrayInfo {
266 std::set<uint64_t> Bits;
278class GlobalTypeMember final :
TrailingObjects<GlobalTypeMember, MDNode *> {
289 bool IsJumpTableCanonical;
297 bool IsJumpTableCanonical,
bool IsExported,
299 auto *GTM =
static_cast<GlobalTypeMember *
>(Alloc.
Allocate(
300 totalSizeToAlloc<MDNode *>(Types.size()),
alignof(GlobalTypeMember)));
302 GTM->NTypes = Types.size();
303 GTM->IsJumpTableCanonical = IsJumpTableCanonical;
304 GTM->IsExported = IsExported;
314 return IsJumpTableCanonical;
317 bool isExported()
const {
324struct ICallBranchFunnel final
329 auto *
Call =
static_cast<ICallBranchFunnel *
>(
330 Alloc.
Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.
size()),
331 alignof(ICallBranchFunnel)));
333 Call->UniqueId = UniqueId;
341 return getTrailingObjects(NTargets);
350struct ScopedSaveAliaseesAndUsed {
353 std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
354 std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
359 void collectAndEraseUsedFunctions(
Module &M,
368 GV->eraseFromParent();
371 return isa<Function>(GV);
380 ScopedSaveAliaseesAndUsed(
Module &M) :
M(
M) {
393 collectAndEraseUsedFunctions(M, Used,
false);
394 collectAndEraseUsedFunctions(M, CompilerUsed,
true);
396 for (
auto &GA :
M.aliases()) {
399 if (
auto *
F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
400 FunctionAliases.push_back({&GA,
F});
403 for (
auto &GI :
M.ifuncs())
404 if (
auto *
F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
405 ResolverIFuncs.push_back({&GI,
F});
408 ~ScopedSaveAliaseesAndUsed() {
412 for (
auto P : FunctionAliases)
413 P.first->setAliasee(
P.second);
415 for (
auto P : ResolverIFuncs) {
419 P.first->setResolver(
P.second);
424class LowerTypeTestsModule {
440 bool CanUseArmJumpTable =
false, CanUseThumbBWJumpTable =
false;
443 int HasBranchTargetEnforcement = -1;
447 PointerType *PtrTy = PointerType::getUnqual(
M.getContext());
451 IntegerType *IntPtrTy =
M.getDataLayout().getIntPtrType(
M.getContext(), 0);
459 struct TypeIdUserInfo {
460 std::vector<CallInst *> CallSites;
461 bool IsExported =
false;
469 struct TypeIdLowering {
494 std::vector<ByteArrayInfo> ByteArrayInfos;
496 Function *WeakInitializerFn =
nullptr;
501 bool shouldExportConstantsAsAbsoluteSymbols();
503 TypeIdLowering importTypeId(
StringRef TypeId);
510 ByteArrayInfo *createByteArray(
BitSetInfo &BSI);
511 void allocateByteArrays();
514 void lowerTypeTestCalls(
518 const TypeIdLowering &TIL);
524 bool hasBranchTargetEnforcement();
540 bool IsJumpTableCanonical);
542 void findGlobalVariableUsersOf(
Constant *
C,
553 void replaceCfiUses(
Function *Old,
Value *New,
bool IsJumpTableCanonical);
557 void replaceDirectCalls(
Value *Old,
Value *New);
559 bool isFunctionAnnotation(
Value *V)
const {
560 return FunctionAnnotations.
contains(V);
588 for (
const auto &GlobalAndOffset : GlobalLayout) {
589 for (
MDNode *
Type : GlobalAndOffset.first->types()) {
590 if (
Type->getOperand(1) != TypeId)
594 cast<ConstantAsMetadata>(
Type->getOperand(0))->getValue())
607 auto BitsType = cast<IntegerType>(Bits->getType());
608 unsigned BitWidth = BitsType->getBitWidth();
610 BitOffset =
B.CreateZExtOrTrunc(BitOffset, BitsType);
612 B.CreateAnd(BitOffset, ConstantInt::get(BitsType,
BitWidth - 1));
613 Value *BitMask =
B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
614 Value *MaskedBits =
B.CreateAnd(Bits, BitMask);
615 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
618ByteArrayInfo *LowerTypeTestsModule::createByteArray(
BitSetInfo &BSI) {
627 ByteArrayInfos.emplace_back();
628 ByteArrayInfo *BAI = &ByteArrayInfos.back();
630 BAI->Bits = BSI.
Bits;
632 BAI->ByteArray = ByteArrayGlobal;
633 BAI->MaskGlobal = MaskGlobal;
637void LowerTypeTestsModule::allocateByteArrays() {
639 [](
const ByteArrayInfo &BAI1,
const ByteArrayInfo &BAI2) {
640 return BAI1.BitSize > BAI2.BitSize;
643 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
646 for (
unsigned I = 0;
I != ByteArrayInfos.size(); ++
I) {
647 ByteArrayInfo *BAI = &ByteArrayInfos[
I];
650 BAB.
allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[
I], Mask);
652 BAI->MaskGlobal->replaceAllUsesWith(
654 BAI->MaskGlobal->eraseFromParent();
656 *BAI->MaskPtr =
Mask;
664 for (
unsigned I = 0;
I != ByteArrayInfos.size(); ++
I) {
665 ByteArrayInfo *BAI = &ByteArrayInfos[
I];
667 Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
668 ConstantInt::get(IntPtrTy, ByteArrayOffsets[
I])};
670 ByteArrayConst->
getType(), ByteArray, Idxs);
677 BAI->ByteArray->replaceAllUsesWith(Alias);
678 BAI->ByteArray->eraseFromParent();
684 ByteArraySizeBytes = BAB.
Bytes.size();
690 const TypeIdLowering &TIL,
704 "bits_use", ByteArray, &M);
707 Value *ByteAddr =
B.CreateGEP(Int8Ty, ByteArray, BitOffset);
712 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
718 if (
auto GV = dyn_cast<GlobalObject>(V)) {
720 GV->getMetadata(LLVMContext::MD_type, Types);
722 if (
Type->getOperand(1) != TypeId)
726 cast<ConstantAsMetadata>(
Type->getOperand(0))->getValue())
734 if (
auto GEP = dyn_cast<GEPOperator>(V)) {
735 APInt APOffset(
DL.getIndexSizeInBits(0), 0);
736 bool Result =
GEP->accumulateConstantOffset(
DL, APOffset);
743 if (
auto Op = dyn_cast<Operator>(V)) {
744 if (
Op->getOpcode() == Instruction::BitCast)
747 if (
Op->getOpcode() == Instruction::Select)
758 const TypeIdLowering &TIL) {
774 Value *PtrAsInt =
B.CreatePtrToInt(
Ptr, IntPtrTy);
779 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
785 Value *PtrOffset =
B.CreateSub(OffsetedGlobalAsInt, PtrAsInt);
795 Value *BitOffset =
B.CreateIntrinsic(IntPtrTy, Intrinsic::fshr,
796 {PtrOffset, PtrOffset, TIL.AlignLog2});
798 Value *OffsetInRange =
B.CreateICmpULE(BitOffset, TIL.SizeM1);
802 return OffsetInRange;
809 if (
auto *Br = dyn_cast<BranchInst>(*CI->
user_begin()))
815 Br->getMetadata(LLVMContext::MD_prof));
819 for (
auto &Phi :
Else->phis())
820 Phi.addIncoming(
Phi.getIncomingValueForBlock(Then), InitialBB);
823 return createBitSetTest(ThenB, TIL, BitOffset);
830 Value *
Bit = createBitSetTest(ThenB, TIL, BitOffset);
835 B.SetInsertPoint(CI);
837 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
838 P->addIncoming(Bit, ThenB.GetInsertBlock());
844void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
851 std::vector<Constant *> GlobalInits;
857 for (GlobalTypeMember *
G : Globals) {
858 auto *GV = cast<GlobalVariable>(
G->getGlobal());
860 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
861 MaxAlign = std::max(MaxAlign, Alignment);
863 GlobalLayout[
G] = GVOffset;
866 GlobalInits.push_back(
870 GlobalInits.push_back(GV->getInitializer());
871 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
872 CurOffset = GVOffset + InitSize;
881 if (DesiredPadding > 32)
882 DesiredPadding =
alignTo(InitSize, 32) - InitSize;
886 auto *CombinedGlobal =
889 CombinedGlobal->setAlignment(MaxAlign);
892 lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
897 for (
unsigned I = 0;
I != Globals.size(); ++
I) {
901 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
902 ConstantInt::get(Int32Ty,
I * 2)};
904 NewInit->
getType(), CombinedGlobal, CombinedGlobalIdxs);
908 "", CombinedGlobalElemPtr, &M);
916bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
930 const TypeIdLowering &TIL) {
938 "__typeid_" + TypeId +
"_" +
Name,
C, &M);
943 if (shouldExportConstantsAsAbsoluteSymbols())
946 Storage = cast<ConstantInt>(
C)->getZExtValue();
950 ExportGlobal(
"global_addr", TIL.OffsetedGlobal);
955 ExportConstant(
"align", TTRes.
AlignLog2, TIL.AlignLog2);
956 ExportConstant(
"size_m1", TTRes.
SizeM1, TIL.SizeM1);
958 uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
966 ExportGlobal(
"byte_array", TIL.TheByteArray);
967 if (shouldExportConstantsAsAbsoluteSymbols())
968 ExportGlobal(
"bit_mask", TIL.BitMask);
974 ExportConstant(
"inline_bits", TTRes.
InlineBits, TIL.InlineBits);
979LowerTypeTestsModule::TypeIdLowering
980LowerTypeTestsModule::importTypeId(
StringRef TypeId) {
993 (
"__typeid_" + TypeId +
"_" +
Name).str(), Int8Arr0Ty);
1000 if (!shouldExportConstantsAsAbsoluteSymbols()) {
1002 ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
1003 if (!isa<IntegerType>(Ty))
1009 auto *GV = cast<GlobalVariable>(
C->stripPointerCasts());
1010 if (isa<IntegerType>(Ty))
1012 if (GV->
getMetadata(LLVMContext::MD_absolute_symbol))
1022 SetAbsRange(~0ull, ~0ull);
1024 SetAbsRange(0, 1ull << AbsWidth);
1029 auto *GV = ImportGlobal(
"global_addr");
1042 TIL.OffsetedGlobal = GV;
1048 TIL.AlignLog2 = ImportConstant(
"align", TTRes.
AlignLog2, 8, IntPtrTy);
1054 TIL.TheByteArray = ImportGlobal(
"byte_array");
1055 TIL.BitMask = ImportConstant(
"bit_mask", TTRes.
BitMask, 8, PtrTy);
1059 TIL.InlineBits = ImportConstant(
1066void LowerTypeTestsModule::importTypeTest(
CallInst *CI) {
1067 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->
getArgOperand(1));
1071 auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1078 TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1079 Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1086void LowerTypeTestsModule::maybeReplaceComdat(
Function *
F,
1093 F->getComdat()->getName() == OriginalName) {
1094 Comdat *OldComdat =
F->getComdat();
1095 Comdat *NewComdat =
M.getOrInsertComdat(
F->getName());
1105void LowerTypeTestsModule::importFunction(
Function *
F,
1107 assert(
F->getType()->getAddressSpace() == 0);
1110 std::string
Name = std::string(
F->getName());
1115 if (
F->isDSOLocal()) {
1118 F->getAddressSpace(),
1121 replaceDirectCalls(
F, RealF);
1131 F->getAddressSpace(),
Name +
".cfi_jt", &M);
1134 F->setName(
Name +
".cfi");
1135 maybeReplaceComdat(
F,
Name);
1137 F->getAddressSpace(),
Name, &M);
1144 for (
auto &U :
F->uses()) {
1145 if (
auto *
A = dyn_cast<GlobalAlias>(
U.getUser())) {
1146 std::string AliasName =
A->getName().str() +
".cfi";
1149 F->getAddressSpace(),
"", &M);
1151 A->replaceAllUsesWith(AliasDecl);
1152 A->setName(AliasName);
1157 if (
F->hasExternalWeakLinkage())
1164 F->setVisibility(Visibility);
1167void LowerTypeTestsModule::lowerTypeTestCalls(
1173 BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1175 if (
auto MDS = dyn_cast<MDString>(TypeId))
1176 dbgs() << MDS->getString() <<
": ";
1178 dbgs() <<
"<unnamed>: ";
1182 ByteArrayInfo *BAI =
nullptr;
1188 Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, GlobalOffset)),
1189 TIL.AlignLog2 = ConstantInt::get(IntPtrTy, BSI.
AlignLog2);
1190 TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.
BitSize - 1);
1197 for (
auto Bit : BSI.
Bits)
1199 if (InlineBits == 0)
1202 TIL.InlineBits = ConstantInt::get(
1203 (BSI.
BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1206 ++NumByteArraysCreated;
1207 BAI = createByteArray(BSI);
1208 TIL.TheByteArray = BAI->ByteArray;
1209 TIL.BitMask = BAI->MaskGlobal;
1212 TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1214 if (TIUI.IsExported) {
1215 uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1217 BAI->MaskPtr = MaskPtr;
1221 for (
CallInst *CI : TIUI.CallSites) {
1222 ++NumTypeTestCallsLowered;
1223 Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1233 if (
Type->getNumOperands() != 2)
1238 if (isa<GlobalVariable>(GO) && GO->
hasSection())
1240 "A member of a type identifier may not have an explicit section");
1246 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(
Type->getOperand(0));
1249 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1262bool LowerTypeTestsModule::hasBranchTargetEnforcement() {
1263 if (HasBranchTargetEnforcement == -1) {
1266 if (
const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1267 M.getModuleFlag(
"branch-target-enforcement")))
1268 HasBranchTargetEnforcement = (BTE->getZExtValue() != 0);
1270 HasBranchTargetEnforcement = 0;
1272 return HasBranchTargetEnforcement;
1276LowerTypeTestsModule::getJumpTableEntrySize(
Triple::ArchType JumpTableArch) {
1277 switch (JumpTableArch) {
1280 if (
const auto *MD = mdconst::extract_or_null<ConstantInt>(
1281 M.getModuleFlag(
"cf-protection-branch")))
1282 if (MD->getZExtValue())
1288 if (CanUseThumbBWJumpTable) {
1289 if (hasBranchTargetEnforcement())
1296 if (hasBranchTargetEnforcement())
1313LowerTypeTestsModule::createJumpTableEntryAsm(
Triple::ArchType JumpTableArch) {
1319 if (
const auto *MD = mdconst::extract_or_null<ConstantInt>(
1320 M.getModuleFlag(
"cf-protection-branch")))
1321 Endbr = !MD->isZero();
1323 AsmOS << (JumpTableArch ==
Triple::x86 ?
"endbr32\n" :
"endbr64\n");
1324 AsmOS <<
"jmp ${0:c}@plt\n";
1326 AsmOS <<
".balign 16, 0xcc\n";
1328 AsmOS <<
"int3\nint3\nint3\n";
1332 if (hasBranchTargetEnforcement())
1336 if (!CanUseThumbBWJumpTable) {
1352 AsmOS <<
"push {r0,r1}\n"
1354 <<
"0: add r0, r0, pc\n"
1355 <<
"str r0, [sp, #4]\n"
1358 <<
"1: .word $0 - (0b + 4)\n";
1360 if (hasBranchTargetEnforcement())
1362 AsmOS <<
"b.w $0\n";
1366 AsmOS <<
"tail $0@plt\n";
1368 AsmOS <<
"pcalau12i $$t0, %pc_hi20($0)\n"
1369 <<
"jirl $$r0, $$t0, %pc_lo12($0)\n";
1382void LowerTypeTestsModule::buildBitSetsFromFunctions(
1388 buildBitSetsFromFunctionsNative(TypeIds, Functions);
1390 buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1395void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1397 if (WeakInitializerFn ==
nullptr) {
1402 M.getDataLayout().getProgramAddressSpace(),
1403 "__cfi_global_var_init", &M);
1407 WeakInitializerFn->setSection(
1409 ?
"__TEXT,__StaticInit,regular,pure_instructions"
1416 IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1422void LowerTypeTestsModule::findGlobalVariableUsersOf(
1424 for (
auto *U :
C->users()){
1425 if (
auto *GV = dyn_cast<GlobalVariable>(U))
1427 else if (
auto *C2 = dyn_cast<Constant>(U))
1428 findGlobalVariableUsersOf(C2, Out);
1433void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1438 findGlobalVariableUsersOf(
F, GlobalVarUsers);
1439 for (
auto *GV : GlobalVarUsers) {
1440 if (GV == GlobalAnnotation)
1442 moveInitializerToModuleConstructor(GV);
1450 F->getAddressSpace(),
"", &M);
1451 replaceCfiUses(
F, PlaceholderFn, IsJumpTableCanonical);
1457 auto *InsertPt = dyn_cast<Instruction>(
U.getUser());
1458 assert(InsertPt &&
"Non-instruction users should have been eliminated");
1459 auto *PN = dyn_cast<PHINode>(InsertPt);
1461 InsertPt = PN->getIncomingBlock(U)->getTerminator();
1470 PN->setIncomingValueForBlock(InsertPt->getParent(),
Select);
1478 Attribute TFAttr =
F->getFnAttribute(
"target-features");
1483 if (Feature ==
"-thumb-mode")
1485 else if (Feature ==
"+thumb-mode")
1501 if (!CanUseThumbBWJumpTable && CanUseArmJumpTable) {
1509 unsigned ArmCount = 0, ThumbCount = 0;
1510 for (
const auto GTM : Functions) {
1511 if (!GTM->isJumpTableCanonical()) {
1518 Function *
F = cast<Function>(GTM->getGlobal());
1525void LowerTypeTestsModule::createJumpTable(
1531 InlineAsm *JumpTableAsm = createJumpTableEntryAsm(JumpTableArch);
1537 bool areAllEntriesNounwind =
true;
1538 for (GlobalTypeMember *GTM : Functions) {
1539 if (!llvm::cast<llvm::Function>(GTM->getGlobal())
1540 ->hasFnAttribute(llvm::Attribute::NoUnwind)) {
1541 areAllEntriesNounwind =
false;
1543 IRB.CreateCall(JumpTableAsm, GTM->getGlobal());
1545 IRB.CreateUnreachable();
1548 F->setAlignment(
Align(getJumpTableEntrySize(JumpTableArch)));
1554 F->addFnAttr(Attribute::Naked);
1556 F->addFnAttr(
"target-features",
"-thumb-mode");
1558 if (hasBranchTargetEnforcement()) {
1561 F->addFnAttr(
"target-features",
"+thumb-mode,+pacbti");
1563 F->addFnAttr(
"target-features",
"+thumb-mode");
1564 if (CanUseThumbBWJumpTable) {
1567 F->addFnAttr(
"target-cpu",
"cortex-a8");
1575 if (
F->hasFnAttribute(
"branch-target-enforcement"))
1576 F->removeFnAttr(
"branch-target-enforcement");
1577 if (
F->hasFnAttribute(
"sign-return-address"))
1578 F->removeFnAttr(
"sign-return-address");
1583 F->addFnAttr(
"target-features",
"-c,-relax");
1589 F->addFnAttr(Attribute::NoCfCheck);
1592 if (areAllEntriesNounwind)
1593 F->addFnAttr(Attribute::NoUnwind);
1596 F->addFnAttr(Attribute::NoInline);
1601void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1687 unsigned EntrySize = getJumpTableEntrySize(JumpTableArch);
1688 for (
unsigned I = 0;
I != Functions.
size(); ++
I)
1689 GlobalLayout[Functions[
I]] =
I * EntrySize;
1695 M.getDataLayout().getProgramAddressSpace(),
1696 ".cfi.jumptable", &M);
1703 lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1707 for (
unsigned I = 0;
I != Functions.
size(); ++
I) {
1708 Function *
F = cast<Function>(Functions[
I]->getGlobal());
1709 bool IsJumpTableCanonical = Functions[
I]->isJumpTableCanonical();
1712 JumpTableType, JumpTable,
1714 ConstantInt::get(IntPtrTy,
I)});
1716 const bool IsExported = Functions[
I]->isExported();
1717 if (!IsJumpTableCanonical) {
1721 F->getName() +
".cfi_jt",
1722 CombinedGlobalElemPtr, &M);
1730 if (IsJumpTableCanonical)
1736 if (!IsJumpTableCanonical) {
1737 if (
F->hasExternalWeakLinkage())
1738 replaceWeakDeclarationWithJumpTablePtr(
F, CombinedGlobalElemPtr,
1739 IsJumpTableCanonical);
1741 replaceCfiUses(
F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1743 assert(
F->getType()->getAddressSpace() == 0);
1747 CombinedGlobalElemPtr, &M);
1751 F->setName(FAlias->
getName() +
".cfi");
1752 maybeReplaceComdat(
F, FAlias->
getName());
1754 replaceCfiUses(
F, FAlias, IsJumpTableCanonical);
1755 if (!
F->hasLocalLinkage())
1760 createJumpTable(JumpTableFn, Functions, JumpTableArch);
1769void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1776 for (GlobalTypeMember *GTM : Functions) {
1777 Function *
F = cast<Function>(GTM->getGlobal());
1780 if (!
F->hasAddressTaken())
1786 ConstantInt::get(Int64Ty, IndirectIndex))));
1787 F->setMetadata(
"wasm.index", MD);
1790 GlobalLayout[GTM] = IndirectIndex++;
1799void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1803 for (
unsigned I = 0;
I != TypeIds.
size(); ++
I)
1804 TypeIdIndices[TypeIds[
I]] =
I;
1808 std::vector<std::set<uint64_t>> TypeMembers(TypeIds.
size());
1809 unsigned GlobalIndex = 0;
1811 for (GlobalTypeMember *GTM : Globals) {
1814 auto I = TypeIdIndices.
find(
Type->getOperand(1));
1815 if (
I != TypeIdIndices.
end())
1816 TypeMembers[
I->second].insert(GlobalIndex);
1818 GlobalIndices[GTM] = GlobalIndex;
1822 for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1823 TypeMembers.emplace_back();
1824 std::set<uint64_t> &TMSet = TypeMembers.back();
1825 for (GlobalTypeMember *
T :
JT->targets())
1826 TMSet.insert(GlobalIndices[
T]);
1832 const std::set<uint64_t> &O2) {
1833 return O1.size() < O2.size();
1840 for (
auto &&MemSet : TypeMembers)
1841 GLB.addFragment(MemSet);
1845 Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1846 std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1847 auto OGTMI = OrderedGTMs.begin();
1848 for (
auto &&
F : GLB.Fragments) {
1850 if (IsGlobalSet != isa<GlobalVariable>(Globals[
Offset]->getGlobal()))
1852 "variables and functions");
1853 *OGTMI++ = Globals[
Offset];
1859 buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1861 buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1865LowerTypeTestsModule::LowerTypeTestsModule(
1868 :
M(
M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1871 assert(!(ExportSummary && ImportSummary));
1872 Triple TargetTriple(
M.getTargetTriple());
1873 Arch = TargetTriple.getArch();
1875 CanUseArmJumpTable =
true;
1881 if (
F.isDeclaration())
1885 CanUseArmJumpTable =
true;
1887 CanUseThumbBWJumpTable =
true;
1890 OS = TargetTriple.getOS();
1891 ObjectFormat = TargetTriple.getObjectFormat();
1895 GlobalAnnotation =
M.getGlobalVariable(
"llvm.global.annotations");
1896 if (GlobalAnnotation && GlobalAnnotation->hasInitializer()) {
1898 cast<ConstantArray>(GlobalAnnotation->getInitializer());
1899 FunctionAnnotations.insert_range(CA->
operands());
1914 yaml::Input
In(ReadSummaryFile->getBuffer());
1920 LowerTypeTestsModule(
1934 yaml::Output Out(
OS);
1942 auto *Usr = dyn_cast<CallInst>(U.getUser());
1944 auto *CB = dyn_cast<CallBase>(Usr);
1945 if (CB && CB->isCallee(&U))
1951void LowerTypeTestsModule::replaceCfiUses(
Function *Old,
Value *New,
1952 bool IsJumpTableCanonical) {
1957 if (isa<NoCFIValue>(
U.getUser()))
1965 if (isFunctionAnnotation(
U.getUser()))
1970 if (
auto *
C = dyn_cast<Constant>(
U.getUser())) {
1971 if (!isa<GlobalValue>(
C)) {
1983 for (
auto *
C : Constants)
1984 C->handleOperandChange(Old, New);
1987void LowerTypeTestsModule::replaceDirectCalls(
Value *Old,
Value *New) {
1992 bool ShouldDropAll) {
1994 auto *CI = cast<CallInst>(U.getUser());
1997 if (
auto *
Assume = dyn_cast<AssumeInst>(CIU.getUser()))
1998 Assume->eraseFromParent();
2007 return isa<PHINode>(U);
2015bool LowerTypeTestsModule::lower() {
2019 if (DropTypeTests != DropTestKind::None) {
2020 bool ShouldDropAll = DropTypeTests == DropTestKind::All;
2028 if (PublicTypeTestFunc)
2030 if (TypeTestFunc || PublicTypeTestFunc) {
2051 if ((!TypeTestFunc || TypeTestFunc->
use_empty()) &&
2052 (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->
use_empty()) &&
2053 !ExportSummary && !ImportSummary)
2056 if (ImportSummary) {
2059 importTypeTest(cast<CallInst>(
U.getUser()));
2061 if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->
use_empty())
2063 "unexpected call to llvm.icall.branch.funnel during import phase");
2070 if (
F.hasLocalLinkage())
2079 ScopedSaveAliaseesAndUsed S(M);
2080 for (
auto *
F : Defs)
2081 importFunction(
F,
true);
2082 for (
auto *
F : Decls)
2083 importFunction(
F,
false);
2094 GlobalClassesTy GlobalClasses;
2106 std::vector<GlobalTypeMember *> RefGlobals;
2109 unsigned CurUniqueId = 0;
2114 const bool CrossDsoCfi =
M.getModuleFlag(
"Cross-DSO CFI") !=
nullptr;
2116 struct ExportedFunctionInfo {
2121 if (ExportSummary) {
2122 NamedMDNode *CfiFunctionsMD =
M.getNamedMetadata(
"cfi.functions");
2123 if (CfiFunctionsMD) {
2126 for (
auto &
I : *ExportSummary)
2127 for (
auto &GVS :
I.second.SummaryList)
2129 for (
const auto &
Ref : GVS->refs()) {
2131 for (
auto &RefGVS :
Ref.getSummaryList())
2132 if (
auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
2133 AddressTaken.
insert(Alias->getAliaseeGUID());
2136 if (AddressTaken.
count(GUID))
2138 auto VI = ExportSummary->getValueInfo(GUID);
2141 for (
auto &
I :
VI.getSummaryList())
2142 if (
auto Alias = dyn_cast<AliasSummary>(
I.get()))
2143 if (AddressTaken.
count(Alias->getAliaseeGUID()))
2147 for (
auto *FuncMD : CfiFunctionsMD->
operands()) {
2148 assert(FuncMD->getNumOperands() >= 2);
2150 cast<MDString>(FuncMD->getOperand(0))->getString();
2152 cast<ConstantAsMetadata>(FuncMD->getOperand(1))
2154 ->getUniqueInteger()
2161 if (!ExportSummary->isGUIDLive(GUID))
2168 if (
auto VI = ExportSummary->getValueInfo(GUID))
2169 for (
const auto &GVS :
VI.getSummaryList())
2176 auto P = ExportedFunctions.
insert({FunctionName, {
Linkage, FuncMD}});
2178 P.first->second = {
Linkage, FuncMD};
2181 for (
const auto &
P : ExportedFunctions) {
2184 MDNode *FuncMD =
P.second.FuncMD;
2186 if (
F &&
F->hasLocalLinkage()) {
2193 F->setName(
F->getName() +
".1");
2200 GlobalVariable::ExternalLinkage,
2201 M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
2208 if (
F->hasAvailableExternallyLinkage()) {
2211 F->setComdat(
nullptr);
2224 if (
F->isDeclaration()) {
2228 F->eraseMetadata(LLVMContext::MD_type);
2230 F->addMetadata(LLVMContext::MD_type,
2237 struct AliasToCreate {
2239 std::string TargetName;
2241 std::vector<AliasToCreate> AliasesToCreate;
2245 if (ExportSummary) {
2246 if (
NamedMDNode *AliasesMD =
M.getNamedMetadata(
"aliases")) {
2247 for (
auto *AliasMD : AliasesMD->operands()) {
2249 for (
Metadata *MD : AliasMD->operands()) {
2250 auto *MDS = dyn_cast<MDString>(MD);
2254 if (!ExportedFunctions.count(AliasName))
2256 auto *AliasF =
M.getFunction(AliasName);
2261 if (Aliases.
empty())
2264 for (
unsigned I = 1;
I != Aliases.
size(); ++
I) {
2265 auto *AliasF = Aliases[
I];
2266 ExportedFunctions.
erase(AliasF->getName());
2267 AliasesToCreate.push_back(
2268 {AliasF, std::string(Aliases[0]->
getName())});
2282 bool IsJumpTableCanonical =
false;
2283 bool IsExported =
false;
2284 if (
Function *
F = dyn_cast<Function>(&GO)) {
2286 if (
auto It = ExportedFunctions.find(
F->getName());
2287 It != ExportedFunctions.end()) {
2294 }
else if (!
F->hasAddressTaken()) {
2295 if (!CrossDsoCfi || !IsJumpTableCanonical ||
F->hasLocalLinkage())
2300 auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2302 GlobalTypeMembers[&GO] = GTM;
2304 verifyTypeMDNode(&GO,
Type);
2305 auto &
Info = TypeIdInfo[
Type->getOperand(1)];
2306 Info.UniqueId = ++CurUniqueId;
2307 Info.RefGlobals.push_back(GTM);
2311 auto AddTypeIdUse = [&](
Metadata *TypeId) -> TypeIdUserInfo & {
2316 auto Ins = TypeIdUsers.insert({TypeId, {}});
2319 auto &GCI = GlobalClasses.insert(TypeId);
2320 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2323 for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2324 CurSet = GlobalClasses.unionSets(
2325 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2328 return Ins.first->second;
2332 for (
const Use &U : TypeTestFunc->
uses()) {
2333 auto CI = cast<CallInst>(
U.getUser());
2341 for (
const Use &CIU : CI->
uses()) {
2342 if (isa<AssumeInst>(CIU.getUser()))
2344 OnlyAssumeUses =
false;
2350 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->
getArgOperand(1));
2353 auto TypeId = TypeIdMDVal->getMetadata();
2354 AddTypeIdUse(TypeId).CallSites.push_back(CI);
2358 if (ICallBranchFunnelFunc) {
2359 for (
const Use &U : ICallBranchFunnelFunc->
uses()) {
2362 "llvm.icall.branch.funnel not supported on this target");
2364 auto CI = cast<CallInst>(
U.getUser());
2366 std::vector<GlobalTypeMember *> Targets;
2370 GlobalClassesTy::member_iterator CurSet;
2371 for (
unsigned I = 1;
I != CI->
arg_size();
I += 2) {
2377 "Expected branch funnel operand to be global value");
2379 GlobalTypeMember *GTM = GlobalTypeMembers[
Base];
2380 Targets.push_back(GTM);
2381 GlobalClassesTy::member_iterator NewSet =
2382 GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2386 CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2389 GlobalClasses.unionSets(
2390 CurSet, GlobalClasses.findLeader(
2391 GlobalClasses.insert(ICallBranchFunnel::create(
2392 Alloc, CI, Targets, ++CurUniqueId))));
2396 if (ExportSummary) {
2398 for (
auto &
P : TypeIdInfo) {
2399 if (
auto *TypeId = dyn_cast<MDString>(
P.first))
2401 TypeId->getString())]
2405 for (
auto &
P : *ExportSummary) {
2406 for (
auto &S :
P.second.SummaryList) {
2407 if (!ExportSummary->isGlobalValueLive(S.get()))
2409 if (
auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2412 AddTypeIdUse(MD).IsExported =
true;
2417 if (GlobalClasses.empty())
2421 ScopedSaveAliaseesAndUsed S(M);
2423 for (
const auto &
C : GlobalClasses) {
2427 ++NumTypeIdDisjointSets;
2429 std::vector<Metadata *> TypeIds;
2430 std::vector<GlobalTypeMember *> Globals;
2431 std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2432 for (
auto M : GlobalClasses.members(*
C)) {
2433 if (isa<Metadata *>(M))
2434 TypeIds.push_back(cast<Metadata *>(M));
2435 else if (isa<GlobalTypeMember *>(M))
2436 Globals.push_back(cast<GlobalTypeMember *>(M));
2438 ICallBranchFunnels.push_back(cast<ICallBranchFunnel *>(M));
2445 return TypeIdInfo[
M1].UniqueId < TypeIdInfo[M2].UniqueId;
2450 [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2451 return F1->UniqueId < F2->UniqueId;
2455 buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2459 allocateByteArrays();
2461 for (
auto A : AliasesToCreate) {
2462 auto *
Target =
M.getNamedValue(
A.TargetName);
2463 if (!isa<GlobalAlias>(
Target))
2466 AliasGA->setVisibility(
A.Alias->getVisibility());
2467 AliasGA->setLinkage(
A.Alias->getLinkage());
2468 AliasGA->takeName(
A.Alias);
2469 A.Alias->replaceAllUsesWith(AliasGA);
2470 A.Alias->eraseFromParent();
2474 if (ExportSummary) {
2475 if (
NamedMDNode *SymversMD =
M.getNamedMetadata(
"symvers")) {
2476 for (
auto *Symver : SymversMD->operands()) {
2477 assert(Symver->getNumOperands() >= 2);
2479 cast<MDString>(Symver->getOperand(0))->getString();
2480 StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2482 if (!ExportedFunctions.count(SymbolName))
2485 M.appendModuleInlineAsm(
2486 (
llvm::Twine(
".symver ") + SymbolName +
", " + Alias).str());
2498 Changed = LowerTypeTestsModule::runForTesting(M, AM);
2501 LowerTypeTestsModule(M, AM, ExportSummary, ImportSummary, DropTypeTests)
2510 bool Changed =
false;
2529 for (
auto &GV : M.globals()) {
2536 auto MaySimplifyPtr = [&](
Value *
Ptr) {
2537 if (
auto *GV = dyn_cast<GlobalValue>(
Ptr))
2538 if (
auto *CFIGV = M.getNamedValue((GV->
getName() +
".cfi").str()))
2542 auto MaySimplifyInt = [&](
Value *
Op) {
2543 auto *PtrAsInt = dyn_cast<ConstantExpr>(
Op);
2544 if (!PtrAsInt || PtrAsInt->getOpcode() != Instruction::PtrToInt)
2546 return MaySimplifyPtr(PtrAsInt->getOperand(0));
2549 if (
auto *CI = dyn_cast<ICmpInst>(U)) {
2561 auto *CE = dyn_cast<ConstantExpr>(U);
2562 if (!CE || CE->getOpcode() != Instruction::PtrToInt)
2565 auto *CE = dyn_cast<ConstantExpr>(U.getUser());
2566 if (U.getOperandNo() == 0 && CE &&
2567 CE->getOpcode() == Instruction::Sub &&
2568 MaySimplifyInt(CE->getOperand(1))) {
2574 CE->replaceAllUsesWith(ConstantInt::get(CE->getType(), 0));
2577 auto *CI = dyn_cast<ICmpInst>(U.getUser());
2578 if (U.getOperandNo() == 1 && CI &&
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static const unsigned kARMJumpTableEntrySize
static const unsigned kLOONGARCH64JumpTableEntrySize
static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL, Value *V, uint64_t COffset)
static const unsigned kX86IBTJumpTableEntrySize
static cl::opt< std::string > ClReadSummary("lowertypetests-read-summary", cl::desc("Read summary from given YAML file before running pass"), cl::Hidden)
static const unsigned kRISCVJumpTableEntrySize
static void dropTypeTests(Module &M, Function &TypeTestFunc, bool ShouldDropAll)
static Value * createMaskedBitTest(IRBuilder<> &B, Value *Bits, Value *BitOffset)
Build a test that bit BitOffset mod sizeof(Bits)*8 is set in Bits.
static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch)
static const unsigned kX86JumpTableEntrySize
static cl::opt< bool > AvoidReuse("lowertypetests-avoid-reuse", cl::desc("Try to avoid reuse of byte array addresses using aliases"), cl::Hidden, cl::init(true))
static cl::opt< PassSummaryAction > ClSummaryAction("lowertypetests-summary-action", cl::desc("What to do with the summary when running this pass"), cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"), clEnumValN(PassSummaryAction::Import, "import", "Import typeid resolutions from summary and globals"), clEnumValN(PassSummaryAction::Export, "export", "Export typeid resolutions to summary and globals")), cl::Hidden)
static const unsigned kARMBTIJumpTableEntrySize
static cl::opt< std::string > ClWriteSummary("lowertypetests-write-summary", cl::desc("Write summary to given YAML file after running pass"), cl::Hidden)
static bool isDirectCall(Use &U)
static const unsigned kARMv6MJumpTableEntrySize
static cl::opt< DropTestKind > ClDropTypeTests("lowertypetests-drop-type-tests", cl::desc("Simply drop type test sequences"), cl::values(clEnumValN(DropTestKind::None, "none", "Do not drop any type tests"), clEnumValN(DropTestKind::Assume, "assume", "Drop type test assume sequences"), clEnumValN(DropTestKind::All, "all", "Drop all type test sequences")), cl::Hidden, cl::init(DropTestKind::None))
ModuleSummaryIndex.h This file contains the declarations the classes that hold the module index and s...
FunctionAnalysisManager FAM
This file defines the PointerUnion class, which is a discriminated union of pointer types.
static StringRef getName(Value *V)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This header defines support for implementing classes that have some trailing object (or arrays of obj...
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Allocate memory in an ever growing pool, as if by bump-pointer.
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
void emplace(Args &&...A)
size_t count(StringRef S) const
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
ConstantArray - Constant Array Declarations.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList)
Create an "inbounds" getelementptr.
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
Implements a dense probed hash-table based set.
Analysis pass which computes a DominatorTree.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
Helper for check-and-exit error handling.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI void setComdat(Comdat *C)
const Comdat * getComdat() const
LLVM_ABI bool eraseMetadata(unsigned KindID)
Erase all metadata attachments with the given kind.
bool hasSection() const
Check if this global has a custom object file section.
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
static LLVM_ABI GUID getGUIDAssumingExternalLinkage(StringRef GlobalName)
Return a 64-bit global unique ID constructed from the name of a global symbol.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
static bool isLocalLinkage(LinkageTypes Linkage)
LinkageTypes getLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool isDeclarationForLinker() const
PointerType * getType() const
Global values are always pointers.
VisibilityTypes
An enumeration for the kinds of visibility of global values.
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ InternalLinkage
Rename collisions when linking (static functions).
@ ExternalLinkage
Externally visible function.
@ ExternalWeakLinkage
ExternalWeak linkage description.
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
MaybeAlign getAlign() const
Returns the alignment of the given variable.
void setConstant(bool Val)
LLVM_ABI void setCodeModel(CodeModel::Model CM)
Change the code model for this global.
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Analysis pass that exposes the LoopInfo for a function.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
This class implements a map that also provides access to all stored values in a deterministic order.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
Class to hold module path string table and global value map, and encapsulate methods for operating on...
TypeIdSummary & getOrInsertTypeIdSummary(StringRef TypeId)
Return an existing or new TypeIdSummary entry for TypeId.
const TypeIdSummary * getTypeIdSummary(StringRef TypeId) const
This returns either a pointer to the type id summary (if present in the summary map) or null (if not ...
CfiFunctionIndex & cfiFunctionDecls()
bool partiallySplitLTOUnits() const
CfiFunctionIndex & cfiFunctionDefs()
A Module instance is used to store all the information related to an LLVM module.
iterator_range< op_iterator > operands()
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator erase(const_iterator CI)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Class to represent struct types.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetTransformInfo.
Target - Wrapper for Target specific information.
See the file comment for details on the usage of the TrailingObjects type.
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to a file descriptor.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Linkage
Describes symbol linkage. This can be used to resolve definition clashes.
DropTestKind
Specifies how to drop type tests.
@ Assume
Do not drop type tests (default).
LLVM_ABI bool isJumpTableCanonical(Function *F)
SmallVector< unsigned char, 0 > ByteArray
NodeAddr< PhiNode * > Phi
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned M1(unsigned Val)
LLVM_ABI bool convertUsersOfConstantsToInstructions(ArrayRef< Constant * > Consts, Function *RestrictToFunc=nullptr, bool RemoveDeadConstants=true, bool IncludeSelf=false)
Replace constant expressions users of the given constants with instructions.
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
@ Ref
The access may reference the value stored in memory.
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
Expected< T > errorOrToExpected(ErrorOr< T > &&EO)
Convert an ErrorOr<T> to an Expected<T>.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
CfiFunctionLinkage
The type of CFI jumptable needed for a function.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
LLVM_ABI GlobalVariable * collectUsedGlobalVariables(const Module &M, SmallVectorImpl< GlobalValue * > &Vec, bool CompilerUsed)
Given "llvm.used" or "llvm.compiler.used" as a global name, collect the initializer elements of that ...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Kind
Specifies which kind of type check we should emit for this byte array.
@ Unknown
Unknown (analysis not performed, don't lower)
@ Single
Single element (last example in "Short Inline Bit Vectors")
@ Inline
Inlined bit vector ("Short Inline Bit Vectors")
@ Unsat
Unsatisfiable type (i.e. no global has this type metadata)
@ AllOnes
All-ones bit vector ("Eliminating Bit Vector Checks for All-Ones Bit Vectors")
@ ByteArray
Test a byte array (first example)
unsigned SizeM1BitWidth
Range of size-1 expressed as a bit width.
enum llvm::TypeTestResolution::Kind TheKind
LLVM_ABI BitSetInfo build()
SmallVector< uint64_t, 16 > Offsets
void addOffset(uint64_t Offset)
LLVM_ABI bool containsGlobalOffset(uint64_t Offset) const
LLVM_ABI void print(raw_ostream &OS) const
std::set< uint64_t > Bits
This class is used to build a byte array containing overlapping bit sets.
uint64_t BitAllocs[BitsPerByte]
The number of bytes allocated so far for each of the bits.
std::vector< uint8_t > Bytes
The byte array built so far.
LLVM_ABI void allocate(const std::set< uint64_t > &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask)
Allocate BitSize bits in the byte array where Bits contains the bits to set.
This class implements a layout algorithm for globals referenced by bit sets that tries to keep member...
std::vector< std::vector< uint64_t > > Fragments
The computed layout.
LLVM_ABI void addFragment(const std::set< uint64_t > &F)
Add F to the layout while trying to keep its indices contiguous.
std::vector< uint64_t > FragmentMap
Mapping from object index to fragment index.