89#include "llvm/IR/IntrinsicsAMDGPU.h"
134class AMDGPUCodeGenPassBuilder
136 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
139 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
140 const CGPassBuilderOption &Opts,
141 PassInstrumentationCallbacks *
PIC);
143 void addIRPasses(AddIRPass &)
const;
144 void addCodeGenPrepare(AddIRPass &)
const;
145 void addPreISel(AddIRPass &addPass)
const;
146 void addILPOpts(AddMachinePass &)
const;
147 void addAsmPrinter(AddMachinePass &, CreateMCStreamer)
const;
148 Error addInstSelector(AddMachinePass &)
const;
149 void addPreRewrite(AddMachinePass &)
const;
150 void addMachineSSAOptimization(AddMachinePass &)
const;
151 void addPostRegAlloc(AddMachinePass &)
const;
152 void addPreEmitPass(AddMachinePass &)
const;
153 void addPreEmitRegAlloc(AddMachinePass &)
const;
154 Error addRegAssignmentOptimized(AddMachinePass &)
const;
155 void addPreRegAlloc(AddMachinePass &)
const;
156 void addOptimizedRegAlloc(AddMachinePass &)
const;
157 void addPreSched2(AddMachinePass &)
const;
164 void addEarlyCSEOrGVNPass(AddIRPass &)
const;
165 void addStraightLineScalarOptimizationPasses(AddIRPass &)
const;
170 SGPRRegisterRegAlloc(
const char *
N,
const char *
D, FunctionPassCtor
C)
171 : RegisterRegAllocBase(
N,
D,
C) {}
176 VGPRRegisterRegAlloc(
const char *
N,
const char *
D, FunctionPassCtor
C)
177 : RegisterRegAllocBase(
N,
D,
C) {}
182 WWMRegisterRegAlloc(
const char *
N,
const char *
D, FunctionPassCtor
C)
183 : RegisterRegAllocBase(
N,
D,
C) {}
219static SGPRRegisterRegAlloc
220defaultSGPRRegAlloc(
"default",
221 "pick SGPR register allocator based on -O option",
224static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor,
false,
227 cl::desc(
"Register allocator to use for SGPRs"));
229static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor,
false,
232 cl::desc(
"Register allocator to use for VGPRs"));
234static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor,
false,
238 cl::desc(
"Register allocator to use for WWM registers"));
240static void initializeDefaultSGPRRegisterAllocatorOnce() {
245 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
249static void initializeDefaultVGPRRegisterAllocatorOnce() {
254 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
258static void initializeDefaultWWMRegisterAllocatorOnce() {
263 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
267static FunctionPass *createBasicSGPRRegisterAllocator() {
271static FunctionPass *createGreedySGPRRegisterAllocator() {
279static FunctionPass *createBasicVGPRRegisterAllocator() {
283static FunctionPass *createGreedyVGPRRegisterAllocator() {
295static FunctionPass *createGreedyWWMRegisterAllocator() {
303static SGPRRegisterRegAlloc basicRegAllocSGPR(
304 "basic",
"basic register allocator", createBasicSGPRRegisterAllocator);
305static SGPRRegisterRegAlloc greedyRegAllocSGPR(
306 "greedy",
"greedy register allocator", createGreedySGPRRegisterAllocator);
308static SGPRRegisterRegAlloc fastRegAllocSGPR(
309 "fast",
"fast register allocator", createFastSGPRRegisterAllocator);
312static VGPRRegisterRegAlloc basicRegAllocVGPR(
313 "basic",
"basic register allocator", createBasicVGPRRegisterAllocator);
314static VGPRRegisterRegAlloc greedyRegAllocVGPR(
315 "greedy",
"greedy register allocator", createGreedyVGPRRegisterAllocator);
317static VGPRRegisterRegAlloc fastRegAllocVGPR(
318 "fast",
"fast register allocator", createFastVGPRRegisterAllocator);
319static WWMRegisterRegAlloc basicRegAllocWWMReg(
"basic",
320 "basic register allocator",
321 createBasicWWMRegisterAllocator);
322static WWMRegisterRegAlloc
323 greedyRegAllocWWMReg(
"greedy",
"greedy register allocator",
324 createGreedyWWMRegisterAllocator);
325static WWMRegisterRegAlloc fastRegAllocWWMReg(
"fast",
"fast register allocator",
326 createFastWWMRegisterAllocator);
336 cl::desc(
"Run early if-conversion"),
341 cl::desc(
"Run pre-RA exec mask optimizations"),
346 cl::desc(
"Lower GPU ctor / dtors to globals on the device."),
351 "amdgpu-load-store-vectorizer",
352 cl::desc(
"Enable load store vectorizer"),
358 "amdgpu-scalarize-global-loads",
359 cl::desc(
"Enable global load scalarization"),
365 "amdgpu-internalize-symbols",
366 cl::desc(
"Enable elimination of non-kernel functions and unused globals"),
372 "amdgpu-early-inline-all",
373 cl::desc(
"Inline all functions early"),
378 "amdgpu-enable-remove-incompatible-functions",
cl::Hidden,
379 cl::desc(
"Enable removal of functions when they"
380 "use features not supported by the target GPU"),
384 "amdgpu-sdwa-peephole",
389 "amdgpu-dpp-combine",
395 cl::desc(
"Enable AMDGPU Alias Analysis"),
400 "amdgpu-simplify-libcall",
401 cl::desc(
"Enable amdgpu library simplifications"),
406 "amdgpu-ir-lower-kernel-arguments",
407 cl::desc(
"Lower kernel argument loads in IR pass"),
412 "amdgpu-reassign-regs",
413 cl::desc(
"Enable register reassign optimizations on gfx10+"),
418 "amdgpu-opt-vgpr-liverange",
419 cl::desc(
"Enable VGPR liverange optimizations for if-else structure"),
423 "amdgpu-atomic-optimizer-strategy",
424 cl::desc(
"Select DPP or Iterative strategy for scan"),
429 "Use Iterative approach for scan"),
434 "amdgpu-mode-register",
435 cl::desc(
"Enable mode register pass"),
442 cl::desc(
"Enable s_delay_alu insertion"),
448 cl::desc(
"Enable VOPD, dual issue of VALU in wave32"),
455 cl::desc(
"Enable machine DCE inside regalloc"));
462 "amdgpu-scalar-ir-passes",
463 cl::desc(
"Enable scalar IR passes"),
469 cl::desc(
"Enable lowering of lds to global memory pass "
470 "and asan instrument resulting IR."),
474 "amdgpu-enable-lower-module-lds",
cl::desc(
"Enable lower module lds pass"),
479 "amdgpu-enable-pre-ra-optimizations",
484 "amdgpu-enable-promote-kernel-arguments",
485 cl::desc(
"Enable promotion of flat kernel pointer arguments to global"),
489 "amdgpu-enable-image-intrinsic-optimizer",
495 cl::desc(
"Enable loop data prefetch on AMDGPU"),
500 cl::desc(
"Select custom AMDGPU scheduling strategy."),
504 "amdgpu-enable-rewrite-partial-reg-uses",
509 "amdgpu-enable-hipstdpar",
510 cl::desc(
"Enable HIP Standard Parallelism Offload support"),
cl::init(
false),
515 cl::desc(
"Enable AMDGPUAttributorPass"),
519 "new-reg-bank-select",
520 cl::desc(
"Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
525 "amdgpu-link-time-closed-world",
526 cl::desc(
"Whether has closed-world assumption at link time"),
619 return std::make_unique<AMDGPUTargetObjectFile>();
632 if (ST.shouldClusterStores())
652 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(
C));
654 if (ST.shouldClusterStores())
666 if (ST.shouldClusterStores())
684 if (ST.shouldClusterStores())
697 "Run GCN scheduler to maximize occupancy",
705 "gcn-max-memory-clause",
"Run GCN scheduler to maximize memory clause",
709 "gcn-iterative-max-occupancy-experimental",
710 "Run GCN scheduler to maximize occupancy (experimental)",
714 "gcn-iterative-minreg",
715 "Run GCN iterative scheduler for minimal register usage (experimental)",
720 "Run GCN iterative scheduler for ILP scheduling (experimental)",
726 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
727 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
736 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
737 "-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
738 "v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
739 "v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
763 std::optional<Reloc::Model>
RM,
764 std::optional<CodeModel::Model> CM,
786 Attribute GPUAttr =
F.getFnAttribute(
"target-cpu");
791 Attribute FSAttr =
F.getFnAttribute(
"target-features");
802 if (ST.shouldClusterStores())
810 return F->isDeclaration() ||
F->getName().starts_with(
"__asan_") ||
811 F->getName().starts_with(
"__sanitizer_") ||
840 while (!Params.
empty()) {
842 std::tie(ParamName, Params) = Params.
split(
';');
843 if (ParamName ==
"closed-world") {
844 Result.IsClosedWorld =
true;
847 formatv(
"invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
857#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
860 PB.registerScalarOptimizerLateEPCallback(
868 PB.registerVectorizerEndEPCallback(
876 PB.registerPipelineEarlySimplificationEPCallback(
903 PB.registerPeepholeEPCallback(
913 PB.registerCGSCCOptimizerLateEPCallback(
957 PB.registerFullLinkTimeOptimizationLastEPCallback(
1001 PB.registerRegClassFilterParsingCallback(
1003 if (FilterName ==
"sgpr")
1004 return onlyAllocateSGPRs;
1005 if (FilterName ==
"vgpr")
1006 return onlyAllocateVGPRs;
1007 if (FilterName ==
"wwm")
1008 return onlyAllocateWWMRegs;
1022 unsigned DestAS)
const {
1031 !Arg->hasByRefAttr())
1041 const auto *
Ptr = LD->getPointerOperand();
1051std::pair<const Value *, unsigned>
1054 switch (
II->getIntrinsicID()) {
1055 case Intrinsic::amdgcn_is_shared:
1057 case Intrinsic::amdgcn_is_private:
1062 return std::pair(
nullptr, -1);
1069 const_cast<Value *
>(V),
1075 return std::pair(
nullptr, -1);
1095 Module &M,
unsigned NumParts,
1096 function_ref<
void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1107 PB.registerModuleAnalyses(
MAM);
1108 PB.registerFunctionAnalyses(
FAM);
1124 std::optional<Reloc::Model>
RM,
1125 std::optional<CodeModel::Model> CM,
1137 auto &
I = SubtargetMap[SubtargetKey];
1143 I = std::make_unique<GCNSubtarget>(
TargetTriple, GPU, FS, *
this);
1160 AMDGPUCodeGenPassBuilder CGPB(*
this, Opts,
PIC);
1161 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1167 if (ST.enableSIScheduler())
1171 C->MF->getFunction().getFnAttribute(
"amdgpu-sched-strategy");
1176 if (SchedStrategy ==
"max-ilp")
1179 if (SchedStrategy ==
"max-memory-clause")
1182 if (SchedStrategy ==
"iterative-ilp")
1185 if (SchedStrategy ==
"iterative-minreg")
1188 if (SchedStrategy ==
"iterative-maxocc")
1201 if (ST.shouldClusterStores())
1228 setRequiresCodeGenSCCOrder(
true);
1236 bool addPreISel()
override;
1237 void addMachineSSAOptimization()
override;
1238 bool addILPOpts()
override;
1239 bool addInstSelector()
override;
1240 bool addIRTranslator()
override;
1241 void addPreLegalizeMachineIR()
override;
1242 bool addLegalizeMachineIR()
override;
1243 void addPreRegBankSelect()
override;
1244 bool addRegBankSelect()
override;
1245 void addPreGlobalInstructionSelect()
override;
1246 bool addGlobalInstructionSelect()
override;
1247 void addPreRegAlloc()
override;
1248 void addFastRegAlloc()
override;
1249 void addOptimizedRegAlloc()
override;
1251 FunctionPass *createSGPRAllocPass(
bool Optimized);
1252 FunctionPass *createVGPRAllocPass(
bool Optimized);
1253 FunctionPass *createWWMRegAllocPass(
bool Optimized);
1254 FunctionPass *createRegAllocPass(
bool Optimized)
override;
1256 bool addRegAssignAndRewriteFast()
override;
1257 bool addRegAssignAndRewriteOptimized()
override;
1259 bool addPreRewrite()
override;
1260 void addPostRegAlloc()
override;
1261 void addPreSched2()
override;
1262 void addPreEmitPass()
override;
1263 void addPostBBSections()
override;
1346 if ((
TM.getTargetTriple().isAMDGCN()) &&
1369 if (
TM.getTargetTriple().isAMDGCN()) {
1399 if (
TM->getTargetTriple().isAMDGCN() &&
1406 if (
TM->getTargetTriple().isAMDGCN()) {
1462bool GCNPassConfig::addPreISel() {
1497void GCNPassConfig::addMachineSSAOptimization() {
1521bool GCNPassConfig::addILPOpts() {
1529bool GCNPassConfig::addInstSelector() {
1536bool GCNPassConfig::addIRTranslator() {
1541void GCNPassConfig::addPreLegalizeMachineIR() {
1547bool GCNPassConfig::addLegalizeMachineIR() {
1552void GCNPassConfig::addPreRegBankSelect() {
1558bool GCNPassConfig::addRegBankSelect() {
1568void GCNPassConfig::addPreGlobalInstructionSelect() {
1573bool GCNPassConfig::addGlobalInstructionSelect() {
1578void GCNPassConfig::addFastRegAlloc() {
1592void GCNPassConfig::addPreRegAlloc() {
1597void GCNPassConfig::addOptimizedRegAlloc() {
1634bool GCNPassConfig::addPreRewrite() {
1642FunctionPass *GCNPassConfig::createSGPRAllocPass(
bool Optimized) {
1645 initializeDefaultSGPRRegisterAllocatorOnce);
1657FunctionPass *GCNPassConfig::createVGPRAllocPass(
bool Optimized) {
1660 initializeDefaultVGPRRegisterAllocatorOnce);
1667 return createGreedyVGPRRegisterAllocator();
1669 return createFastVGPRRegisterAllocator();
1672FunctionPass *GCNPassConfig::createWWMRegAllocPass(
bool Optimized) {
1675 initializeDefaultWWMRegisterAllocatorOnce);
1682 return createGreedyWWMRegisterAllocator();
1684 return createFastWWMRegisterAllocator();
1687FunctionPass *GCNPassConfig::createRegAllocPass(
bool Optimized) {
1692 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1693 "and -vgpr-regalloc";
1695bool GCNPassConfig::addRegAssignAndRewriteFast() {
1696 if (!usingDefaultRegAlloc())
1701 addPass(createSGPRAllocPass(
false));
1710 addPass(createWWMRegAllocPass(
false));
1716 addPass(createVGPRAllocPass(
false));
1721bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1722 if (!usingDefaultRegAlloc())
1727 addPass(createSGPRAllocPass(
true));
1747 addPass(createWWMRegAllocPass(
true));
1753 addPass(createVGPRAllocPass(
true));
1763void GCNPassConfig::addPostRegAlloc() {
1770void GCNPassConfig::addPreSched2() {
1776void GCNPassConfig::addPreEmitPass() {
1812void GCNPassConfig::addPostBBSections() {
1819 return new GCNPassConfig(*
this, PM);
1858 if (MFI->Occupancy == 0) {
1860 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1866 SourceRange =
RegName.SourceRange;
1879 if (parseOptionalRegister(YamlMFI.
VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1882 if (parseOptionalRegister(YamlMFI.
SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1886 MFI->LongBranchReservedReg))
1895 "incorrect register class for field",
RegName.Value,
1897 SourceRange =
RegName.SourceRange;
1901 if (parseRegister(YamlMFI.
ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1906 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1907 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1911 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1912 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1916 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1917 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1923 if (parseRegister(YamlReg, ParsedReg))
1930 MFI->
setFlag(Info->VReg, Info->Flags);
1932 for (
const auto &[
_, Info] : PFS.
VRegInfos) {
1933 MFI->
setFlag(Info->VReg, Info->Flags);
1938 if (parseRegister(YamlRegStr, ParsedReg))
1940 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1943 auto parseAndCheckArgument = [&](
const std::optional<yaml::SIArgument> &
A,
1946 unsigned SystemSGPRs) {
1951 if (
A->IsRegister) {
1954 SourceRange =
A->RegisterName.SourceRange;
1957 if (!RC.contains(Reg))
1958 return diagnoseRegisterClass(
A->RegisterName);
1966 MFI->NumUserSGPRs += UserSGPRs;
1967 MFI->NumSystemSGPRs += SystemSGPRs;
1972 (parseAndCheckArgument(YamlMFI.
ArgInfo->PrivateSegmentBuffer,
1973 AMDGPU::SGPR_128RegClass,
1975 parseAndCheckArgument(YamlMFI.
ArgInfo->DispatchPtr,
1976 AMDGPU::SReg_64RegClass, MFI->ArgInfo.
DispatchPtr,
1978 parseAndCheckArgument(YamlMFI.
ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1980 parseAndCheckArgument(YamlMFI.
ArgInfo->KernargSegmentPtr,
1981 AMDGPU::SReg_64RegClass,
1983 parseAndCheckArgument(YamlMFI.
ArgInfo->DispatchID,
1984 AMDGPU::SReg_64RegClass, MFI->ArgInfo.
DispatchID,
1986 parseAndCheckArgument(YamlMFI.
ArgInfo->FlatScratchInit,
1987 AMDGPU::SReg_64RegClass,
1989 parseAndCheckArgument(YamlMFI.
ArgInfo->PrivateSegmentSize,
1990 AMDGPU::SGPR_32RegClass,
1992 parseAndCheckArgument(YamlMFI.
ArgInfo->LDSKernelId,
1993 AMDGPU::SGPR_32RegClass,
1995 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkGroupIDX,
1998 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkGroupIDY,
2001 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkGroupIDZ,
2004 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkGroupInfo,
2005 AMDGPU::SGPR_32RegClass,
2007 parseAndCheckArgument(YamlMFI.
ArgInfo->PrivateSegmentWaveByteOffset,
2008 AMDGPU::SGPR_32RegClass,
2010 parseAndCheckArgument(YamlMFI.
ArgInfo->ImplicitArgPtr,
2011 AMDGPU::SReg_64RegClass,
2013 parseAndCheckArgument(YamlMFI.
ArgInfo->ImplicitBufferPtr,
2014 AMDGPU::SReg_64RegClass,
2016 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkItemIDX,
2017 AMDGPU::VGPR_32RegClass,
2019 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkItemIDY,
2020 AMDGPU::VGPR_32RegClass,
2022 parseAndCheckArgument(YamlMFI.
ArgInfo->WorkItemIDZ,
2023 AMDGPU::VGPR_32RegClass,
2027 if (ST.hasIEEEMode())
2029 if (ST.hasDX10ClampMode())
2057AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2061 Opt.MISchedPostRA =
true;
2062 Opt.RequiresCodeGenSCCOrder =
true;
2066 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2070void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass)
const {
2107 addStraightLineScalarOptimizationPasses(addPass);
2122 Base::addIRPasses(addPass);
2137 addEarlyCSEOrGVNPass(addPass);
2140void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass)
const {
2164 addPass.requireCGSCCOrder();
2168 Base::addCodeGenPrepare(addPass);
2180void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass)
const {
2218void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass)
const {
2222 Base::addILPOpts(addPass);
2225void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2226 CreateMCStreamer)
const {
2230Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass)
const {
2237void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass)
const {
2243void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2244 AddMachinePass &addPass)
const {
2245 Base::addMachineSSAOptimization(addPass);
2262void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2263 AddMachinePass &addPass)
const {
2272 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2298 Base::addOptimizedRegAlloc(addPass);
2301void AMDGPUCodeGenPassBuilder::addPreRegAlloc(AddMachinePass &addPass)
const {
2306Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2307 AddMachinePass &addPass)
const {
2312 addPass(RAGreedyPass({onlyAllocateSGPRs,
"sgpr"}));
2332 addPass(RAGreedyPass({onlyAllocateWWMRegs,
"wwm"}));
2338 addPass(RAGreedyPass({onlyAllocateVGPRs,
"vgpr"}));
2341 addPreRewrite(addPass);
2348void AMDGPUCodeGenPassBuilder::addPostRegAlloc(AddMachinePass &addPass)
const {
2352 Base::addPostRegAlloc(addPass);
2355void AMDGPUCodeGenPassBuilder::addPreSched2(AddMachinePass &addPass)
const {
2361void AMDGPUCodeGenPassBuilder::addPreEmitPass(AddMachinePass &addPass)
const {
2402bool AMDGPUCodeGenPassBuilder::isPassEnabled(
const cl::opt<bool> &Opt,
2406 if (
TM.getOptLevel() < Level)
2411void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(AddIRPass &addPass)
const {
2418void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2419 AddIRPass &addPass)
const {
2431 addEarlyCSEOrGVNPass(addPass);
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static std::string computeDataLayout(const Triple &TT, const MCTargetOptions &Options, bool LittleEndian)
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_EXTERNAL_VISIBILITY
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
Register const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
CGSCCAnalysisManager CGAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
void addStraightLineScalarOptimizationPasses()
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addEarlyCSEOrGVNPass()
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
~AMDGPUTargetMachine() override
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
static bool EnableFunctionCalls
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
static bool EnableLowerModuleLDS
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
FunctionPass class - This class is used to implement most global optimizations.
@ SCHEDULE_LEGACYMAXOCCUPANCY
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Pass to remove unused function declarations.
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Converts loops into loop-closed SSA form.
Performs Loop Invariant Code Motion Pass.
This pass implements the localization mechanism described at the top of this file.
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
A Module instance is used to store all the information related to an LLVM module.
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
@ ExternalSymbolCallEntry
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
FunctionPass *(*)() FunctionPassCtor
Wrapper class representing virtual and physical registers.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
void reserveWWMRegister(Register Reg)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Represents a location in source code.
Represents a range in source code.
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
void append(StringRef RHS)
Append from a StringRef.
unsigned getMainFileID() const
const MemoryBuffer * getMemoryBuffer(unsigned i) const
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
constexpr bool empty() const
empty - Check if the string is empty.
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
LLVM Value Representation.
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
This file defines the TargetMachine class.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
@ FullLTOPreLink
Full LTO prelink phase.
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
CodeGenOptLevel
Code generation optimization level.
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false, bool UseBlockFrequencyInfo=false, bool UseBranchProbabilityInfo=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
ArgDescriptor PrivateSegmentBuffer
ArgDescriptor WorkGroupIDY
ArgDescriptor WorkGroupIDZ
ArgDescriptor PrivateSegmentSize
ArgDescriptor ImplicitArgPtr
ArgDescriptor PrivateSegmentWaveByteOffset
ArgDescriptor WorkGroupInfo
ArgDescriptor WorkItemIDZ
ArgDescriptor WorkItemIDY
ArgDescriptor LDSKernelId
ArgDescriptor KernargSegmentPtr
ArgDescriptor WorkItemIDX
ArgDescriptor FlatScratchInit
ArgDescriptor DispatchPtr
ArgDescriptor ImplicitBufferPtr
ArgDescriptor WorkGroupIDX
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
DenseMap< Register, VRegInfo * > VRegInfos
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
StringValue SGPRForEXECCopy
SmallVector< StringValue > WWMReservedRegs
StringValue FrameOffsetReg
StringValue LongBranchReservedReg
StringValue VGPRForAGPRCopy
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
StringValue ScratchRSrcReg
StringValue StackPtrOffsetReg
bool FP64FP16OutputDenormals
bool FP64FP16InputDenormals
A wrapper around std::string which contains a source range that's being set during parsing.