LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
23#include "AMDGPUIGroupLP.h"
24#include "AMDGPUISelDAGToDAG.h"
26#include "AMDGPUMacroFusion.h"
33#include "AMDGPUSplitModule.h"
38#include "GCNDPPCombine.h"
40#include "GCNNSAReassign.h"
44#include "GCNSchedStrategy.h"
45#include "GCNVOPDUtils.h"
46#include "R600.h"
47#include "R600TargetMachine.h"
48#include "SIFixSGPRCopies.h"
49#include "SIFixVGPRCopies.h"
50#include "SIFoldOperands.h"
51#include "SIFormMemoryClauses.h"
53#include "SILowerControlFlow.h"
54#include "SILowerSGPRSpills.h"
55#include "SILowerWWMCopies.h"
57#include "SIMachineScheduler.h"
61#include "SIPeepholeSDWA.h"
62#include "SIPostRABundler.h"
65#include "SIWholeQuadMode.h"
85#include "llvm/CodeGen/Passes.h"
89#include "llvm/IR/IntrinsicsAMDGPU.h"
90#include "llvm/IR/PassManager.h"
99#include "llvm/Transforms/IPO.h"
124#include <optional>
125
126using namespace llvm;
127using namespace llvm::PatternMatch;
128
129namespace {
130//===----------------------------------------------------------------------===//
131// AMDGPU CodeGen Pass Builder interface.
132//===----------------------------------------------------------------------===//
133
134class AMDGPUCodeGenPassBuilder
135 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
136 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
137
138public:
139 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
140 const CGPassBuilderOption &Opts,
141 PassInstrumentationCallbacks *PIC);
142
143 void addIRPasses(AddIRPass &) const;
144 void addCodeGenPrepare(AddIRPass &) const;
145 void addPreISel(AddIRPass &addPass) const;
146 void addILPOpts(AddMachinePass &) const;
147 void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
148 Error addInstSelector(AddMachinePass &) const;
149 void addPreRewrite(AddMachinePass &) const;
150 void addMachineSSAOptimization(AddMachinePass &) const;
151 void addPostRegAlloc(AddMachinePass &) const;
152 void addPreEmitPass(AddMachinePass &) const;
153 void addPreEmitRegAlloc(AddMachinePass &) const;
154 Error addRegAssignmentOptimized(AddMachinePass &) const;
155 void addPreRegAlloc(AddMachinePass &) const;
156 void addOptimizedRegAlloc(AddMachinePass &) const;
157 void addPreSched2(AddMachinePass &) const;
158
159 /// Check if a pass is enabled given \p Opt option. The option always
160 /// overrides defaults if explicitly used. Otherwise its default will be used
161 /// given that a pass shall work at an optimization \p Level minimum.
162 bool isPassEnabled(const cl::opt<bool> &Opt,
163 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
164 void addEarlyCSEOrGVNPass(AddIRPass &) const;
165 void addStraightLineScalarOptimizationPasses(AddIRPass &) const;
166};
167
168class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
169public:
170 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
171 : RegisterRegAllocBase(N, D, C) {}
172};
173
174class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
175public:
176 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
177 : RegisterRegAllocBase(N, D, C) {}
178};
179
180class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
181public:
182 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
183 : RegisterRegAllocBase(N, D, C) {}
184};
185
186static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
188 const Register Reg) {
189 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
190 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
191}
192
193static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
195 const Register Reg) {
196 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
197 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
198}
199
200static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
202 const Register Reg) {
203 const SIMachineFunctionInfo *MFI =
204 MRI.getMF().getInfo<SIMachineFunctionInfo>();
205 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
206 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
208}
209
210/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
211static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
212
213/// A dummy default pass factory indicates whether the register allocator is
214/// overridden on the command line.
215static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
216static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
217static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
218
219static SGPRRegisterRegAlloc
220defaultSGPRRegAlloc("default",
221 "pick SGPR register allocator based on -O option",
223
224static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
226SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
227 cl::desc("Register allocator to use for SGPRs"));
228
229static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
231VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
232 cl::desc("Register allocator to use for VGPRs"));
233
234static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
236 WWMRegAlloc("wwm-regalloc", cl::Hidden,
238 cl::desc("Register allocator to use for WWM registers"));
239
240static void initializeDefaultSGPRRegisterAllocatorOnce() {
241 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
242
243 if (!Ctor) {
244 Ctor = SGPRRegAlloc;
245 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
246 }
247}
248
249static void initializeDefaultVGPRRegisterAllocatorOnce() {
250 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
251
252 if (!Ctor) {
253 Ctor = VGPRRegAlloc;
254 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
255 }
256}
257
258static void initializeDefaultWWMRegisterAllocatorOnce() {
259 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
260
261 if (!Ctor) {
262 Ctor = WWMRegAlloc;
263 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
264 }
265}
266
267static FunctionPass *createBasicSGPRRegisterAllocator() {
268 return createBasicRegisterAllocator(onlyAllocateSGPRs);
269}
270
271static FunctionPass *createGreedySGPRRegisterAllocator() {
272 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
273}
274
275static FunctionPass *createFastSGPRRegisterAllocator() {
276 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
277}
278
279static FunctionPass *createBasicVGPRRegisterAllocator() {
280 return createBasicRegisterAllocator(onlyAllocateVGPRs);
281}
282
283static FunctionPass *createGreedyVGPRRegisterAllocator() {
284 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
285}
286
287static FunctionPass *createFastVGPRRegisterAllocator() {
288 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
289}
290
291static FunctionPass *createBasicWWMRegisterAllocator() {
292 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
293}
294
295static FunctionPass *createGreedyWWMRegisterAllocator() {
296 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
297}
298
299static FunctionPass *createFastWWMRegisterAllocator() {
300 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
301}
302
303static SGPRRegisterRegAlloc basicRegAllocSGPR(
304 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
305static SGPRRegisterRegAlloc greedyRegAllocSGPR(
306 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
307
308static SGPRRegisterRegAlloc fastRegAllocSGPR(
309 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
310
311
312static VGPRRegisterRegAlloc basicRegAllocVGPR(
313 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
314static VGPRRegisterRegAlloc greedyRegAllocVGPR(
315 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
316
317static VGPRRegisterRegAlloc fastRegAllocVGPR(
318 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
319static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
320 "basic register allocator",
321 createBasicWWMRegisterAllocator);
322static WWMRegisterRegAlloc
323 greedyRegAllocWWMReg("greedy", "greedy register allocator",
324 createGreedyWWMRegisterAllocator);
325static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
326 createFastWWMRegisterAllocator);
327
331}
332} // anonymous namespace
333
334static cl::opt<bool>
336 cl::desc("Run early if-conversion"),
337 cl::init(false));
338
339static cl::opt<bool>
340OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
341 cl::desc("Run pre-RA exec mask optimizations"),
342 cl::init(true));
343
344static cl::opt<bool>
345 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
346 cl::desc("Lower GPU ctor / dtors to globals on the device."),
347 cl::init(true), cl::Hidden);
348
349// Option to disable vectorizer for tests.
351 "amdgpu-load-store-vectorizer",
352 cl::desc("Enable load store vectorizer"),
353 cl::init(true),
354 cl::Hidden);
355
356// Option to control global loads scalarization
358 "amdgpu-scalarize-global-loads",
359 cl::desc("Enable global load scalarization"),
360 cl::init(true),
361 cl::Hidden);
362
363// Option to run internalize pass.
365 "amdgpu-internalize-symbols",
366 cl::desc("Enable elimination of non-kernel functions and unused globals"),
367 cl::init(false),
368 cl::Hidden);
369
370// Option to inline all early.
372 "amdgpu-early-inline-all",
373 cl::desc("Inline all functions early"),
374 cl::init(false),
375 cl::Hidden);
376
378 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
379 cl::desc("Enable removal of functions when they"
380 "use features not supported by the target GPU"),
381 cl::init(true));
382
384 "amdgpu-sdwa-peephole",
385 cl::desc("Enable SDWA peepholer"),
386 cl::init(true));
387
389 "amdgpu-dpp-combine",
390 cl::desc("Enable DPP combiner"),
391 cl::init(true));
392
393// Enable address space based alias analysis
395 cl::desc("Enable AMDGPU Alias Analysis"),
396 cl::init(true));
397
398// Enable lib calls simplifications
400 "amdgpu-simplify-libcall",
401 cl::desc("Enable amdgpu library simplifications"),
402 cl::init(true),
403 cl::Hidden);
404
406 "amdgpu-ir-lower-kernel-arguments",
407 cl::desc("Lower kernel argument loads in IR pass"),
408 cl::init(true),
409 cl::Hidden);
410
412 "amdgpu-reassign-regs",
413 cl::desc("Enable register reassign optimizations on gfx10+"),
414 cl::init(true),
415 cl::Hidden);
416
418 "amdgpu-opt-vgpr-liverange",
419 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
420 cl::init(true), cl::Hidden);
421
423 "amdgpu-atomic-optimizer-strategy",
424 cl::desc("Select DPP or Iterative strategy for scan"),
427 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
429 "Use Iterative approach for scan"),
430 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
431
432// Enable Mode register optimization
434 "amdgpu-mode-register",
435 cl::desc("Enable mode register pass"),
436 cl::init(true),
437 cl::Hidden);
438
439// Enable GFX11+ s_delay_alu insertion
440static cl::opt<bool>
441 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
442 cl::desc("Enable s_delay_alu insertion"),
443 cl::init(true), cl::Hidden);
444
445// Enable GFX11+ VOPD
446static cl::opt<bool>
447 EnableVOPD("amdgpu-enable-vopd",
448 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
449 cl::init(true), cl::Hidden);
450
451// Option is used in lit tests to prevent deadcoding of patterns inspected.
452static cl::opt<bool>
453EnableDCEInRA("amdgpu-dce-in-ra",
454 cl::init(true), cl::Hidden,
455 cl::desc("Enable machine DCE inside regalloc"));
456
457static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
458 cl::desc("Adjust wave priority"),
459 cl::init(false), cl::Hidden);
460
462 "amdgpu-scalar-ir-passes",
463 cl::desc("Enable scalar IR passes"),
464 cl::init(true),
465 cl::Hidden);
466
467static cl::opt<bool>
468 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
469 cl::desc("Enable lowering of lds to global memory pass "
470 "and asan instrument resulting IR."),
471 cl::init(true), cl::Hidden);
472
474 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
476 cl::Hidden);
477
479 "amdgpu-enable-pre-ra-optimizations",
480 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
481 cl::Hidden);
482
484 "amdgpu-enable-promote-kernel-arguments",
485 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
486 cl::Hidden, cl::init(true));
487
489 "amdgpu-enable-image-intrinsic-optimizer",
490 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
491 cl::Hidden);
492
493static cl::opt<bool>
494 EnableLoopPrefetch("amdgpu-loop-prefetch",
495 cl::desc("Enable loop data prefetch on AMDGPU"),
496 cl::Hidden, cl::init(false));
497
499 AMDGPUSchedStrategy("amdgpu-sched-strategy",
500 cl::desc("Select custom AMDGPU scheduling strategy."),
501 cl::Hidden, cl::init(""));
502
504 "amdgpu-enable-rewrite-partial-reg-uses",
505 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
506 cl::Hidden);
507
509 "amdgpu-enable-hipstdpar",
510 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
511 cl::Hidden);
512
513static cl::opt<bool>
514 EnableAMDGPUAttributor("amdgpu-attributor-enable",
515 cl::desc("Enable AMDGPUAttributorPass"),
516 cl::init(true), cl::Hidden);
517
519 "new-reg-bank-select",
520 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
521 "regbankselect"),
522 cl::init(false), cl::Hidden);
523
525 "amdgpu-link-time-closed-world",
526 cl::desc("Whether has closed-world assumption at link time"),
527 cl::init(false), cl::Hidden);
528
530 // Register the target
533
616}
617
618static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
619 return std::make_unique<AMDGPUTargetObjectFile>();
620}
621
625
626static ScheduleDAGInstrs *
628 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
629 ScheduleDAGMILive *DAG =
630 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
631 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
632 if (ST.shouldClusterStores())
633 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
635 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
636 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
637 return DAG;
638}
639
640static ScheduleDAGInstrs *
642 ScheduleDAGMILive *DAG =
643 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
645 return DAG;
646}
647
648static ScheduleDAGInstrs *
650 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
652 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
653 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
654 if (ST.shouldClusterStores())
655 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
656 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
657 return DAG;
658}
659
660static ScheduleDAGInstrs *
662 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
663 auto *DAG = new GCNIterativeScheduler(
665 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
666 if (ST.shouldClusterStores())
667 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
669 return DAG;
670}
671
678
679static ScheduleDAGInstrs *
681 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
683 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
684 if (ST.shouldClusterStores())
685 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
686 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
688 return DAG;
689}
690
692SISchedRegistry("si", "Run SI's custom scheduler",
694
697 "Run GCN scheduler to maximize occupancy",
699
701 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
703
705 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
707
709 "gcn-iterative-max-occupancy-experimental",
710 "Run GCN scheduler to maximize occupancy (experimental)",
712
714 "gcn-iterative-minreg",
715 "Run GCN iterative scheduler for minimal register usage (experimental)",
717
719 "gcn-iterative-ilp",
720 "Run GCN iterative scheduler for ILP scheduling (experimental)",
722
724 if (TT.getArch() == Triple::r600) {
725 // 32-bit pointers.
726 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
727 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
728 }
729
730 // 32-bit private, local, and region pointers. 64-bit global, constant and
731 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
732 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
733 // (address space 7), and 128-bit non-integral buffer resourcees (address
734 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
735 // like getelementptr.
736 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
737 "-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
738 "v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
739 "v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
740}
741
744 if (!GPU.empty())
745 return GPU;
746
747 // Need to default to a target with flat support for HSA.
748 if (TT.isAMDGCN())
749 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
750
751 return "r600";
752}
753
754static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
755 // The AMDGPU toolchain only supports generating shared objects, so we
756 // must always use PIC.
757 return Reloc::PIC_;
758}
759
761 StringRef CPU, StringRef FS,
762 const TargetOptions &Options,
763 std::optional<Reloc::Model> RM,
764 std::optional<CodeModel::Model> CM,
767 T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
771 initAsmInfo();
772 if (TT.isAMDGCN()) {
773 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
775 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
777 }
778}
779
782
784
786 Attribute GPUAttr = F.getFnAttribute("target-cpu");
787 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
788}
789
791 Attribute FSAttr = F.getFnAttribute("target-features");
792
793 return FSAttr.isValid() ? FSAttr.getValueAsString()
795}
796
799 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
801 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
802 if (ST.shouldClusterStores())
803 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
804 return DAG;
805}
806
807/// Predicate for Internalize pass.
808static bool mustPreserveGV(const GlobalValue &GV) {
809 if (const Function *F = dyn_cast<Function>(&GV))
810 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
811 F->getName().starts_with("__sanitizer_") ||
812 AMDGPU::isEntryFunctionCC(F->getCallingConv());
813
815 return !GV.use_empty();
816}
817
821
824 if (Params.empty())
826 Params.consume_front("strategy=");
827 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
828 .Case("dpp", ScanOptions::DPP)
829 .Cases("iterative", "", ScanOptions::Iterative)
830 .Case("none", ScanOptions::None)
831 .Default(std::nullopt);
832 if (Result)
833 return *Result;
834 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
835}
836
840 while (!Params.empty()) {
841 StringRef ParamName;
842 std::tie(ParamName, Params) = Params.split(';');
843 if (ParamName == "closed-world") {
844 Result.IsClosedWorld = true;
845 } else {
847 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
848 .str(),
850 }
851 }
852 return Result;
853}
854
856
857#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
859
860 PB.registerScalarOptimizerLateEPCallback(
861 [](FunctionPassManager &FPM, OptimizationLevel Level) {
862 if (Level == OptimizationLevel::O0)
863 return;
864
866 });
867
868 PB.registerVectorizerEndEPCallback(
869 [](FunctionPassManager &FPM, OptimizationLevel Level) {
870 if (Level == OptimizationLevel::O0)
871 return;
872
874 });
875
876 PB.registerPipelineEarlySimplificationEPCallback(
879 if (!isLTOPreLink(Phase)) {
880 // When we are not using -fgpu-rdc, we can run accelerator code
881 // selection relatively early, but still after linking to prevent
882 // eager removal of potentially reachable symbols.
883 if (EnableHipStdPar) {
886 }
888 }
889
890 if (Level == OptimizationLevel::O0)
891 return;
892
893 // We don't want to run internalization at per-module stage.
897 }
898
901 });
902
903 PB.registerPeepholeEPCallback(
904 [](FunctionPassManager &FPM, OptimizationLevel Level) {
905 if (Level == OptimizationLevel::O0)
906 return;
907
911 });
912
913 PB.registerCGSCCOptimizerLateEPCallback(
914 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
915 if (Level == OptimizationLevel::O0)
916 return;
917
919
920 // Add promote kernel arguments pass to the opt pipeline right before
921 // infer address spaces which is needed to do actual address space
922 // rewriting.
923 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
926
927 // Add infer address spaces pass to the opt pipeline after inlining
928 // but before SROA to increase SROA opportunities.
930
931 // This should run after inlining to have any chance of doing
932 // anything, and before other cleanup optimizations.
934
935 if (Level != OptimizationLevel::O0) {
936 // Promote alloca to vector before SROA and loop unroll. If we
937 // manage to eliminate allocas before unroll we may choose to unroll
938 // less.
940 }
941
942 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
943 });
944
945 // FIXME: Why is AMDGPUAttributor not in CGSCC?
946 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
947 OptimizationLevel Level,
949 if (Level != OptimizationLevel::O0) {
950 if (!isLTOPreLink(Phase)) {
952 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
953 }
954 }
955 });
956
957 PB.registerFullLinkTimeOptimizationLastEPCallback(
958 [this](ModulePassManager &PM, OptimizationLevel Level) {
959 // When we are using -fgpu-rdc, we can only run accelerator code
960 // selection after linking to prevent, otherwise we end up removing
961 // potentially reachable symbols that were exported as external in other
962 // modules.
963 if (EnableHipStdPar) {
966 }
967 // We want to support the -lto-partitions=N option as "best effort".
968 // For that, we need to lower LDS earlier in the pipeline before the
969 // module is partitioned for codegen.
971 PM.addPass(AMDGPUSwLowerLDSPass(*this));
974 if (Level != OptimizationLevel::O0) {
975 // We only want to run this with O2 or higher since inliner and SROA
976 // don't run in O1.
977 if (Level != OptimizationLevel::O1) {
978 PM.addPass(
980 }
981 // Do we really need internalization in LTO?
982 if (InternalizeSymbols) {
985 }
989 Opt.IsClosedWorld = true;
992 }
993 }
994 if (!NoKernelInfoEndLTO) {
996 FPM.addPass(KernelInfoPrinter(this));
997 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
998 }
999 });
1000
1001 PB.registerRegClassFilterParsingCallback(
1002 [](StringRef FilterName) -> RegAllocFilterFunc {
1003 if (FilterName == "sgpr")
1004 return onlyAllocateSGPRs;
1005 if (FilterName == "vgpr")
1006 return onlyAllocateVGPRs;
1007 if (FilterName == "wwm")
1008 return onlyAllocateWWMRegs;
1009 return nullptr;
1010 });
1011}
1012
1013int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1014 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1015 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1016 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1017 ? -1
1018 : 0;
1019}
1020
1022 unsigned DestAS) const {
1023 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1025}
1026
1028 if (auto *Arg = dyn_cast<Argument>(V);
1029 Arg &&
1030 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1031 !Arg->hasByRefAttr())
1033
1034 const auto *LD = dyn_cast<LoadInst>(V);
1035 if (!LD) // TODO: Handle invariant load like constant.
1037
1038 // It must be a generic pointer loaded.
1039 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1040
1041 const auto *Ptr = LD->getPointerOperand();
1042 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1044 // For a generic pointer loaded from the constant memory, it could be assumed
1045 // as a global pointer since the constant memory is only populated on the
1046 // host side. As implied by the offload programming model, only global
1047 // pointers could be referenced on the host side.
1049}
1050
1051std::pair<const Value *, unsigned>
1053 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1054 switch (II->getIntrinsicID()) {
1055 case Intrinsic::amdgcn_is_shared:
1056 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1057 case Intrinsic::amdgcn_is_private:
1058 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1059 default:
1060 break;
1061 }
1062 return std::pair(nullptr, -1);
1063 }
1064 // Check the global pointer predication based on
1065 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1066 // the order of 'is_shared' and 'is_private' is not significant.
1067 Value *Ptr;
1068 if (match(
1069 const_cast<Value *>(V),
1072 m_Deferred(Ptr))))))
1073 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1074
1075 return std::pair(nullptr, -1);
1076}
1077
1078unsigned
1093
1095 Module &M, unsigned NumParts,
1096 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1097 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1098 // but all current users of this API don't have one ready and would need to
1099 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1100
1105
1106 PassBuilder PB(this);
1107 PB.registerModuleAnalyses(MAM);
1108 PB.registerFunctionAnalyses(FAM);
1109 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1110
1112 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1113 MPM.run(M, MAM);
1114 return true;
1115}
1116
1117//===----------------------------------------------------------------------===//
1118// GCN Target Machine (SI+)
1119//===----------------------------------------------------------------------===//
1120
1122 StringRef CPU, StringRef FS,
1123 const TargetOptions &Options,
1124 std::optional<Reloc::Model> RM,
1125 std::optional<CodeModel::Model> CM,
1126 CodeGenOptLevel OL, bool JIT)
1127 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1128
1129const TargetSubtargetInfo *
1131 StringRef GPU = getGPUName(F);
1133
1134 SmallString<128> SubtargetKey(GPU);
1135 SubtargetKey.append(FS);
1136
1137 auto &I = SubtargetMap[SubtargetKey];
1138 if (!I) {
1139 // This needs to be done before we create a new subtarget since any
1140 // creation will depend on the TM and the code generation flags on the
1141 // function that reside in TargetOptions.
1143 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1144 }
1145
1146 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1147
1148 return I.get();
1149}
1150
1153 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1154}
1155
1158 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1160 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1161 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1162}
1163
1166 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1167 if (ST.enableSIScheduler())
1169
1170 Attribute SchedStrategyAttr =
1171 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1172 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1173 ? SchedStrategyAttr.getValueAsString()
1175
1176 if (SchedStrategy == "max-ilp")
1178
1179 if (SchedStrategy == "max-memory-clause")
1181
1182 if (SchedStrategy == "iterative-ilp")
1184
1185 if (SchedStrategy == "iterative-minreg")
1186 return createMinRegScheduler(C);
1187
1188 if (SchedStrategy == "iterative-maxocc")
1190
1192}
1193
1196 ScheduleDAGMI *DAG =
1197 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1198 /*RemoveKillFlags=*/true);
1199 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1201 if (ST.shouldClusterStores())
1204 if ((EnableVOPD.getNumOccurrences() ||
1206 EnableVOPD)
1209 return DAG;
1210}
1211//===----------------------------------------------------------------------===//
1212// AMDGPU Legacy Pass Setup
1213//===----------------------------------------------------------------------===//
1214
1215std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1216 return getStandardCSEConfigForOpt(TM->getOptLevel());
1217}
1218
1219namespace {
1220
1221class GCNPassConfig final : public AMDGPUPassConfig {
1222public:
1223 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1224 : AMDGPUPassConfig(TM, PM) {
1225 // It is necessary to know the register usage of the entire call graph. We
1226 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1227 // noinline, so this is always required.
1228 setRequiresCodeGenSCCOrder(true);
1229 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1230 }
1231
1232 GCNTargetMachine &getGCNTargetMachine() const {
1233 return getTM<GCNTargetMachine>();
1234 }
1235
1236 bool addPreISel() override;
1237 void addMachineSSAOptimization() override;
1238 bool addILPOpts() override;
1239 bool addInstSelector() override;
1240 bool addIRTranslator() override;
1241 void addPreLegalizeMachineIR() override;
1242 bool addLegalizeMachineIR() override;
1243 void addPreRegBankSelect() override;
1244 bool addRegBankSelect() override;
1245 void addPreGlobalInstructionSelect() override;
1246 bool addGlobalInstructionSelect() override;
1247 void addPreRegAlloc() override;
1248 void addFastRegAlloc() override;
1249 void addOptimizedRegAlloc() override;
1250
1251 FunctionPass *createSGPRAllocPass(bool Optimized);
1252 FunctionPass *createVGPRAllocPass(bool Optimized);
1253 FunctionPass *createWWMRegAllocPass(bool Optimized);
1254 FunctionPass *createRegAllocPass(bool Optimized) override;
1255
1256 bool addRegAssignAndRewriteFast() override;
1257 bool addRegAssignAndRewriteOptimized() override;
1258
1259 bool addPreRewrite() override;
1260 void addPostRegAlloc() override;
1261 void addPreSched2() override;
1262 void addPreEmitPass() override;
1263 void addPostBBSections() override;
1264};
1265
1266} // end anonymous namespace
1267
1269 : TargetPassConfig(TM, PM) {
1270 // Exceptions and StackMaps are not supported, so these passes will never do
1271 // anything.
1274 // Garbage collection is not supported.
1277}
1278
1285
1290 // ReassociateGEPs exposes more opportunities for SLSR. See
1291 // the example in reassociate-geps-and-slsr.ll.
1293 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1294 // EarlyCSE can reuse.
1296 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1298 // NaryReassociate on GEPs creates redundant common expressions, so run
1299 // EarlyCSE after it.
1301}
1302
1305
1306 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1308
1309 // There is no reason to run these.
1313
1315 if (LowerCtorDtor)
1317
1320
1321 // This can be disabled by passing ::Disable here or on the command line
1322 // with --expand-variadics-override=disable.
1324
1325 // Function calls are not supported, so make sure we inline everything.
1328
1329 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1330 if (TM.getTargetTriple().getArch() == Triple::r600)
1332
1333 // Make enqueued block runtime handles externally visible.
1335
1336 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1337 if (EnableSwLowerLDS)
1339
1340 // Runs before PromoteAlloca so the latter can account for function uses
1343 }
1344
1345 // Run atomic optimizer before Atomic Expand
1346 if ((TM.getTargetTriple().isAMDGCN()) &&
1347 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1350 }
1351
1353
1354 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1356
1359
1363 AAResults &AAR) {
1364 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1365 AAR.addAAResult(WrapperPass->getResult());
1366 }));
1367 }
1368
1369 if (TM.getTargetTriple().isAMDGCN()) {
1370 // TODO: May want to move later or split into an early and late one.
1372 }
1373
1374 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1375 // have expanded.
1376 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1378 }
1379
1381
1382 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1383 // example, GVN can combine
1384 //
1385 // %0 = add %a, %b
1386 // %1 = add %b, %a
1387 //
1388 // and
1389 //
1390 // %0 = shl nsw %a, 2
1391 // %1 = shl %a, 2
1392 //
1393 // but EarlyCSE can do neither of them.
1396}
1397
1399 if (TM->getTargetTriple().isAMDGCN() &&
1400 TM->getOptLevel() > CodeGenOptLevel::None)
1402
1403 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1405
1406 if (TM->getTargetTriple().isAMDGCN()) {
1407 // This lowering has been placed after codegenprepare to take advantage of
1408 // address mode matching (which is why it isn't put with the LDS lowerings).
1409 // It could be placed anywhere before uniformity annotations (an analysis
1410 // that it changes by splitting up fat pointers into their components)
1411 // but has been put before switch lowering and CFG flattening so that those
1412 // passes can run on the more optimized control flow this pass creates in
1413 // many cases.
1414 //
1415 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1416 // However, due to some annoying facts about ResourceUsageAnalysis,
1417 // (especially as exercised in the resource-usage-dead-function test),
1418 // we need all the function passes codegenprepare all the way through
1419 // said resource usage analysis to run on the call graph produced
1420 // before codegenprepare runs (because codegenprepare will knock some
1421 // nodes out of the graph, which leads to function-level passes not
1422 // being run on them, which causes crashes in the resource usage analysis).
1425 // In accordance with the above FIXME, manually force all the
1426 // function-level passes into a CGSCCPassManager.
1427 addPass(new DummyCGSCCPass());
1428 }
1429
1431
1434
1435 // LowerSwitch pass may introduce unreachable blocks that can
1436 // cause unexpected behavior for subsequent passes. Placing it
1437 // here seems better that these blocks would get cleaned up by
1438 // UnreachableBlockElim inserted next in the pass flow.
1440}
1441
1443 if (TM->getOptLevel() > CodeGenOptLevel::None)
1445 return false;
1446}
1447
1452
1454 // Do nothing. GC is not supported.
1455 return false;
1456}
1457
1458//===----------------------------------------------------------------------===//
1459// GCN Legacy Pass Setup
1460//===----------------------------------------------------------------------===//
1461
1462bool GCNPassConfig::addPreISel() {
1464
1465 if (TM->getOptLevel() > CodeGenOptLevel::None)
1466 addPass(createSinkingPass());
1467
1468 if (TM->getOptLevel() > CodeGenOptLevel::None)
1470
1471 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1472 // regions formed by them.
1474 addPass(createFixIrreduciblePass());
1475 addPass(createUnifyLoopExitsPass());
1476 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1477
1480 // TODO: Move this right after structurizeCFG to avoid extra divergence
1481 // analysis. This depends on stopping SIAnnotateControlFlow from making
1482 // control flow modifications.
1484
1485 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1486 // with -new-reg-bank-select and without any of the fallback options.
1488 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1489 addPass(createLCSSAPass());
1490
1491 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1493
1494 return false;
1495}
1496
1497void GCNPassConfig::addMachineSSAOptimization() {
1499
1500 // We want to fold operands after PeepholeOptimizer has run (or as part of
1501 // it), because it will eliminate extra copies making it easier to fold the
1502 // real source operand. We want to eliminate dead instructions after, so that
1503 // we see fewer uses of the copies. We then need to clean up the dead
1504 // instructions leftover after the operands are folded as well.
1505 //
1506 // XXX - Can we get away without running DeadMachineInstructionElim again?
1507 addPass(&SIFoldOperandsLegacyID);
1508 if (EnableDPPCombine)
1509 addPass(&GCNDPPCombineLegacyID);
1511 if (isPassEnabled(EnableSDWAPeephole)) {
1512 addPass(&SIPeepholeSDWALegacyID);
1513 addPass(&EarlyMachineLICMID);
1514 addPass(&MachineCSELegacyID);
1515 addPass(&SIFoldOperandsLegacyID);
1516 }
1519}
1520
1521bool GCNPassConfig::addILPOpts() {
1523 addPass(&EarlyIfConverterLegacyID);
1524
1526 return false;
1527}
1528
1529bool GCNPassConfig::addInstSelector() {
1531 addPass(&SIFixSGPRCopiesLegacyID);
1533 return false;
1534}
1535
1536bool GCNPassConfig::addIRTranslator() {
1537 addPass(new IRTranslator(getOptLevel()));
1538 return false;
1539}
1540
1541void GCNPassConfig::addPreLegalizeMachineIR() {
1542 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1543 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1544 addPass(new Localizer());
1545}
1546
1547bool GCNPassConfig::addLegalizeMachineIR() {
1548 addPass(new Legalizer());
1549 return false;
1550}
1551
1552void GCNPassConfig::addPreRegBankSelect() {
1553 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1554 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1556}
1557
1558bool GCNPassConfig::addRegBankSelect() {
1559 if (NewRegBankSelect) {
1562 } else {
1563 addPass(new RegBankSelect());
1564 }
1565 return false;
1566}
1567
1568void GCNPassConfig::addPreGlobalInstructionSelect() {
1569 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1570 addPass(createAMDGPURegBankCombiner(IsOptNone));
1571}
1572
1573bool GCNPassConfig::addGlobalInstructionSelect() {
1574 addPass(new InstructionSelect(getOptLevel()));
1575 return false;
1576}
1577
1578void GCNPassConfig::addFastRegAlloc() {
1579 // FIXME: We have to disable the verifier here because of PHIElimination +
1580 // TwoAddressInstructions disabling it.
1581
1582 // This must be run immediately after phi elimination and before
1583 // TwoAddressInstructions, otherwise the processing of the tied operand of
1584 // SI_ELSE will introduce a copy of the tied operand source after the else.
1586
1588
1590}
1591
1592void GCNPassConfig::addPreRegAlloc() {
1593 if (getOptLevel() != CodeGenOptLevel::None)
1595}
1596
1597void GCNPassConfig::addOptimizedRegAlloc() {
1598 if (EnableDCEInRA)
1600
1601 // FIXME: when an instruction has a Killed operand, and the instruction is
1602 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1603 // the register in LiveVariables, this would trigger a failure in verifier,
1604 // we should fix it and enable the verifier.
1605 if (OptVGPRLiveRange)
1607
1608 // This must be run immediately after phi elimination and before
1609 // TwoAddressInstructions, otherwise the processing of the tied operand of
1610 // SI_ELSE will introduce a copy of the tied operand source after the else.
1612
1615
1616 if (isPassEnabled(EnablePreRAOptimizations))
1618
1619 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1620 // instructions that cause scheduling barriers.
1622
1623 if (OptExecMaskPreRA)
1625
1626 // This is not an essential optimization and it has a noticeable impact on
1627 // compilation time, so we only enable it from O2.
1628 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1630
1632}
1633
1634bool GCNPassConfig::addPreRewrite() {
1636 addPass(&GCNNSAReassignID);
1637
1639 return true;
1640}
1641
1642FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1643 // Initialize the global default.
1644 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1645 initializeDefaultSGPRRegisterAllocatorOnce);
1646
1647 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1648 if (Ctor != useDefaultRegisterAllocator)
1649 return Ctor();
1650
1651 if (Optimized)
1652 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1653
1654 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1655}
1656
1657FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1658 // Initialize the global default.
1659 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1660 initializeDefaultVGPRRegisterAllocatorOnce);
1661
1662 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1663 if (Ctor != useDefaultRegisterAllocator)
1664 return Ctor();
1665
1666 if (Optimized)
1667 return createGreedyVGPRRegisterAllocator();
1668
1669 return createFastVGPRRegisterAllocator();
1670}
1671
1672FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1673 // Initialize the global default.
1674 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1675 initializeDefaultWWMRegisterAllocatorOnce);
1676
1677 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1678 if (Ctor != useDefaultRegisterAllocator)
1679 return Ctor();
1680
1681 if (Optimized)
1682 return createGreedyWWMRegisterAllocator();
1683
1684 return createFastWWMRegisterAllocator();
1685}
1686
1687FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1688 llvm_unreachable("should not be used");
1689}
1690
1692 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1693 "and -vgpr-regalloc";
1694
1695bool GCNPassConfig::addRegAssignAndRewriteFast() {
1696 if (!usingDefaultRegAlloc())
1698
1699 addPass(&GCNPreRALongBranchRegID);
1700
1701 addPass(createSGPRAllocPass(false));
1702
1703 // Equivalent of PEI for SGPRs.
1704 addPass(&SILowerSGPRSpillsLegacyID);
1705
1706 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1708
1709 // For allocating other wwm register operands.
1710 addPass(createWWMRegAllocPass(false));
1711
1712 addPass(&SILowerWWMCopiesLegacyID);
1714
1715 // For allocating per-thread VGPRs.
1716 addPass(createVGPRAllocPass(false));
1717
1718 return true;
1719}
1720
1721bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1722 if (!usingDefaultRegAlloc())
1724
1725 addPass(&GCNPreRALongBranchRegID);
1726
1727 addPass(createSGPRAllocPass(true));
1728
1729 // Commit allocated register changes. This is mostly necessary because too
1730 // many things rely on the use lists of the physical registers, such as the
1731 // verifier. This is only necessary with allocators which use LiveIntervals,
1732 // since FastRegAlloc does the replacements itself.
1733 addPass(createVirtRegRewriter(false));
1734
1735 // At this point, the sgpr-regalloc has been done and it is good to have the
1736 // stack slot coloring to try to optimize the SGPR spill stack indices before
1737 // attempting the custom SGPR spill lowering.
1738 addPass(&StackSlotColoringID);
1739
1740 // Equivalent of PEI for SGPRs.
1741 addPass(&SILowerSGPRSpillsLegacyID);
1742
1743 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1745
1746 // For allocating other whole wave mode registers.
1747 addPass(createWWMRegAllocPass(true));
1748 addPass(&SILowerWWMCopiesLegacyID);
1749 addPass(createVirtRegRewriter(false));
1751
1752 // For allocating per-thread VGPRs.
1753 addPass(createVGPRAllocPass(true));
1754
1755 addPreRewrite();
1756 addPass(&VirtRegRewriterID);
1757
1759
1760 return true;
1761}
1762
1763void GCNPassConfig::addPostRegAlloc() {
1764 addPass(&SIFixVGPRCopiesID);
1765 if (getOptLevel() > CodeGenOptLevel::None)
1768}
1769
1770void GCNPassConfig::addPreSched2() {
1771 if (TM->getOptLevel() > CodeGenOptLevel::None)
1773 addPass(&SIPostRABundlerLegacyID);
1774}
1775
1776void GCNPassConfig::addPreEmitPass() {
1777 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1778 addPass(&GCNCreateVOPDID);
1779 addPass(createSIMemoryLegalizerPass());
1780 addPass(createSIInsertWaitcntsPass());
1781
1782 addPass(createSIModeRegisterPass());
1783
1784 if (getOptLevel() > CodeGenOptLevel::None)
1785 addPass(&SIInsertHardClausesID);
1786
1788 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1790 if (getOptLevel() > CodeGenOptLevel::None)
1791 addPass(&SIPreEmitPeepholeID);
1792 // The hazard recognizer that runs as part of the post-ra scheduler does not
1793 // guarantee to be able handle all hazards correctly. This is because if there
1794 // are multiple scheduling regions in a basic block, the regions are scheduled
1795 // bottom up, so when we begin to schedule a region we don't know what
1796 // instructions were emitted directly before it.
1797 //
1798 // Here we add a stand-alone hazard recognizer pass which can handle all
1799 // cases.
1800 addPass(&PostRAHazardRecognizerID);
1801
1803
1805
1806 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1807 addPass(&AMDGPUInsertDelayAluID);
1808
1809 addPass(&BranchRelaxationPassID);
1810}
1811
1812void GCNPassConfig::addPostBBSections() {
1813 // We run this later to avoid passes like livedebugvalues and BBSections
1814 // having to deal with the apparent multi-entry functions we may generate.
1816}
1817
1819 return new GCNPassConfig(*this, PM);
1820}
1821
1827
1834
1838
1845
1848 SMDiagnostic &Error, SMRange &SourceRange) const {
1849 const yaml::SIMachineFunctionInfo &YamlMFI =
1850 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1851 MachineFunction &MF = PFS.MF;
1853 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1854
1855 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1856 return true;
1857
1858 if (MFI->Occupancy == 0) {
1859 // Fixup the subtarget dependent default value.
1860 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1861 }
1862
1863 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1864 Register TempReg;
1865 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1866 SourceRange = RegName.SourceRange;
1867 return true;
1868 }
1869 RegVal = TempReg;
1870
1871 return false;
1872 };
1873
1874 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1875 Register &RegVal) {
1876 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1877 };
1878
1879 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1880 return true;
1881
1882 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1883 return true;
1884
1885 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1886 MFI->LongBranchReservedReg))
1887 return true;
1888
1889 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1890 // Create a diagnostic for a the register string literal.
1891 const MemoryBuffer &Buffer =
1892 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1893 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1894 RegName.Value.size(), SourceMgr::DK_Error,
1895 "incorrect register class for field", RegName.Value,
1896 {}, {});
1897 SourceRange = RegName.SourceRange;
1898 return true;
1899 };
1900
1901 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1902 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1903 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1904 return true;
1905
1906 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1907 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1908 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1909 }
1910
1911 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1912 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1913 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1914 }
1915
1916 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1917 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1918 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1919 }
1920
1921 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1922 Register ParsedReg;
1923 if (parseRegister(YamlReg, ParsedReg))
1924 return true;
1925
1926 MFI->reserveWWMRegister(ParsedReg);
1927 }
1928
1929 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1930 MFI->setFlag(Info->VReg, Info->Flags);
1931 }
1932 for (const auto &[_, Info] : PFS.VRegInfos) {
1933 MFI->setFlag(Info->VReg, Info->Flags);
1934 }
1935
1936 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1937 Register ParsedReg;
1938 if (parseRegister(YamlRegStr, ParsedReg))
1939 return true;
1940 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1941 }
1942
1943 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1944 const TargetRegisterClass &RC,
1945 ArgDescriptor &Arg, unsigned UserSGPRs,
1946 unsigned SystemSGPRs) {
1947 // Skip parsing if it's not present.
1948 if (!A)
1949 return false;
1950
1951 if (A->IsRegister) {
1952 Register Reg;
1953 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1954 SourceRange = A->RegisterName.SourceRange;
1955 return true;
1956 }
1957 if (!RC.contains(Reg))
1958 return diagnoseRegisterClass(A->RegisterName);
1960 } else
1961 Arg = ArgDescriptor::createStack(A->StackOffset);
1962 // Check and apply the optional mask.
1963 if (A->Mask)
1964 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1965
1966 MFI->NumUserSGPRs += UserSGPRs;
1967 MFI->NumSystemSGPRs += SystemSGPRs;
1968 return false;
1969 };
1970
1971 if (YamlMFI.ArgInfo &&
1972 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1973 AMDGPU::SGPR_128RegClass,
1974 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1975 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1976 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1977 2, 0) ||
1978 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1979 MFI->ArgInfo.QueuePtr, 2, 0) ||
1980 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1981 AMDGPU::SReg_64RegClass,
1982 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1983 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1984 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1985 2, 0) ||
1986 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1987 AMDGPU::SReg_64RegClass,
1988 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1989 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1990 AMDGPU::SGPR_32RegClass,
1991 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1992 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1993 AMDGPU::SGPR_32RegClass,
1994 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1995 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1996 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1997 0, 1) ||
1998 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1999 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
2000 0, 1) ||
2001 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
2002 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2003 0, 1) ||
2004 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2005 AMDGPU::SGPR_32RegClass,
2006 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2007 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2008 AMDGPU::SGPR_32RegClass,
2009 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2010 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2011 AMDGPU::SReg_64RegClass,
2012 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2013 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2014 AMDGPU::SReg_64RegClass,
2015 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2016 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2017 AMDGPU::VGPR_32RegClass,
2018 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2019 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2020 AMDGPU::VGPR_32RegClass,
2021 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2022 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2023 AMDGPU::VGPR_32RegClass,
2024 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2025 return true;
2026
2027 if (ST.hasIEEEMode())
2028 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2029 if (ST.hasDX10ClampMode())
2030 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2031
2032 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2033 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2036 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2039
2046
2047 if (YamlMFI.HasInitWholeWave)
2048 MFI->setInitWholeWave();
2049
2050 return false;
2051}
2052
2053//===----------------------------------------------------------------------===//
2054// AMDGPU CodeGen Pass Builder interface.
2055//===----------------------------------------------------------------------===//
2056
2057AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2058 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2060 : CodeGenPassBuilder(TM, Opts, PIC) {
2061 Opt.MISchedPostRA = true;
2062 Opt.RequiresCodeGenSCCOrder = true;
2063 // Exceptions and StackMaps are not supported, so these passes will never do
2064 // anything.
2065 // Garbage collection is not supported.
2066 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2068}
2069
2070void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
2071 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
2073
2075 if (LowerCtorDtor)
2076 addPass(AMDGPUCtorDtorLoweringPass());
2077
2078 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2080
2081 // This can be disabled by passing ::Disable here or on the command line
2082 // with --expand-variadics-override=disable.
2084
2085 addPass(AMDGPUAlwaysInlinePass());
2086 addPass(AlwaysInlinerPass());
2087
2089
2090 if (EnableSwLowerLDS)
2091 addPass(AMDGPUSwLowerLDSPass(TM));
2092
2093 // Runs before PromoteAlloca so the latter can account for function uses
2095 addPass(AMDGPULowerModuleLDSPass(TM));
2096
2097 // Run atomic optimizer before Atomic Expand
2098 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2101
2102 addPass(AtomicExpandPass(&TM));
2103
2104 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2105 addPass(AMDGPUPromoteAllocaPass(TM));
2106 if (isPassEnabled(EnableScalarIRPasses))
2107 addStraightLineScalarOptimizationPasses(addPass);
2108
2109 // TODO: Handle EnableAMDGPUAliasAnalysis
2110
2111 // TODO: May want to move later or split into an early and late one.
2112 addPass(AMDGPUCodeGenPreparePass(TM));
2113
2114 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2115 // have expanded.
2116 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2118 /*UseMemorySSA=*/true));
2119 }
2120 }
2121
2122 Base::addIRPasses(addPass);
2123
2124 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2125 // example, GVN can combine
2126 //
2127 // %0 = add %a, %b
2128 // %1 = add %b, %a
2129 //
2130 // and
2131 //
2132 // %0 = shl nsw %a, 2
2133 // %1 = shl %a, 2
2134 //
2135 // but EarlyCSE can do neither of them.
2136 if (isPassEnabled(EnableScalarIRPasses))
2137 addEarlyCSEOrGVNPass(addPass);
2138}
2139
2140void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
2141 if (TM.getOptLevel() > CodeGenOptLevel::None)
2143
2145 addPass(AMDGPULowerKernelArgumentsPass(TM));
2146
2147 // This lowering has been placed after codegenprepare to take advantage of
2148 // address mode matching (which is why it isn't put with the LDS lowerings).
2149 // It could be placed anywhere before uniformity annotations (an analysis
2150 // that it changes by splitting up fat pointers into their components)
2151 // but has been put before switch lowering and CFG flattening so that those
2152 // passes can run on the more optimized control flow this pass creates in
2153 // many cases.
2154 //
2155 // FIXME: This should ideally be put after the LoadStoreVectorizer.
2156 // However, due to some annoying facts about ResourceUsageAnalysis,
2157 // (especially as exercised in the resource-usage-dead-function test),
2158 // we need all the function passes codegenprepare all the way through
2159 // said resource usage analysis to run on the call graph produced
2160 // before codegenprepare runs (because codegenprepare will knock some
2161 // nodes out of the graph, which leads to function-level passes not
2162 // being run on them, which causes crashes in the resource usage analysis).
2164 addPass.requireCGSCCOrder();
2165
2166 addPass(AMDGPULowerIntrinsicsPass(TM));
2167
2168 Base::addCodeGenPrepare(addPass);
2169
2170 if (isPassEnabled(EnableLoadStoreVectorizer))
2171 addPass(LoadStoreVectorizerPass());
2172
2173 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2174 // behavior for subsequent passes. Placing it here seems better that these
2175 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2176 // pass flow.
2177 addPass(LowerSwitchPass());
2178}
2179
2180void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2181
2182 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2183 addPass(FlattenCFGPass());
2184 addPass(SinkingPass());
2185 addPass(AMDGPULateCodeGenPreparePass(TM));
2186 }
2187
2188 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2189 // regions formed by them.
2190
2192 addPass(FixIrreduciblePass());
2193 addPass(UnifyLoopExitsPass());
2194 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2195
2197
2198 addPass(SIAnnotateControlFlowPass(TM));
2199
2200 // TODO: Move this right after structurizeCFG to avoid extra divergence
2201 // analysis. This depends on stopping SIAnnotateControlFlow from making
2202 // control flow modifications.
2204
2206 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2207 addPass(LCSSAPass());
2208
2209 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2210 addPass(AMDGPUPerfHintAnalysisPass(TM));
2211
2212 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2213 // isn't this in addInstSelector?
2215 /*Force=*/true);
2216}
2217
2218void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2220 addPass(EarlyIfConverterPass());
2221
2222 Base::addILPOpts(addPass);
2223}
2224
2225void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2226 CreateMCStreamer) const {
2227 // TODO: Add AsmPrinter.
2228}
2229
2230Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2231 addPass(AMDGPUISelDAGToDAGPass(TM));
2232 addPass(SIFixSGPRCopiesPass());
2233 addPass(SILowerI1CopiesPass());
2234 return Error::success();
2235}
2236
2237void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass) const {
2238 if (EnableRegReassign) {
2239 addPass(GCNNSAReassignPass());
2240 }
2241}
2242
2243void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2244 AddMachinePass &addPass) const {
2245 Base::addMachineSSAOptimization(addPass);
2246
2247 addPass(SIFoldOperandsPass());
2248 if (EnableDPPCombine) {
2249 addPass(GCNDPPCombinePass());
2250 }
2251 addPass(SILoadStoreOptimizerPass());
2252 if (isPassEnabled(EnableSDWAPeephole)) {
2253 addPass(SIPeepholeSDWAPass());
2254 addPass(EarlyMachineLICMPass());
2255 addPass(MachineCSEPass());
2256 addPass(SIFoldOperandsPass());
2257 }
2259 addPass(SIShrinkInstructionsPass());
2260}
2261
2262void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2263 AddMachinePass &addPass) const {
2264 if (EnableDCEInRA)
2265 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2266
2267 // FIXME: when an instruction has a Killed operand, and the instruction is
2268 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2269 // the register in LiveVariables, this would trigger a failure in verifier,
2270 // we should fix it and enable the verifier.
2271 if (OptVGPRLiveRange)
2272 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2274
2275 // This must be run immediately after phi elimination and before
2276 // TwoAddressInstructions, otherwise the processing of the tied operand of
2277 // SI_ELSE will introduce a copy of the tied operand source after the else.
2278 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2279
2281 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2282
2283 if (isPassEnabled(EnablePreRAOptimizations))
2284 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2285
2286 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2287 // instructions that cause scheduling barriers.
2288 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2289
2290 if (OptExecMaskPreRA)
2291 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2292
2293 // This is not an essential optimization and it has a noticeable impact on
2294 // compilation time, so we only enable it from O2.
2295 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2296 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2297
2298 Base::addOptimizedRegAlloc(addPass);
2299}
2300
2301void AMDGPUCodeGenPassBuilder::addPreRegAlloc(AddMachinePass &addPass) const {
2302 if (getOptLevel() != CodeGenOptLevel::None)
2303 addPass(AMDGPUPrepareAGPRAllocPass());
2304}
2305
2306Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2307 AddMachinePass &addPass) const {
2308 // TODO: Check --regalloc-npm option
2309
2310 addPass(GCNPreRALongBranchRegPass());
2311
2312 addPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}));
2313
2314 // Commit allocated register changes. This is mostly necessary because too
2315 // many things rely on the use lists of the physical registers, such as the
2316 // verifier. This is only necessary with allocators which use LiveIntervals,
2317 // since FastRegAlloc does the replacements itself.
2318 addPass(VirtRegRewriterPass(false));
2319
2320 // At this point, the sgpr-regalloc has been done and it is good to have the
2321 // stack slot coloring to try to optimize the SGPR spill stack indices before
2322 // attempting the custom SGPR spill lowering.
2323 addPass(StackSlotColoringPass());
2324
2325 // Equivalent of PEI for SGPRs.
2326 addPass(SILowerSGPRSpillsPass());
2327
2328 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2329 addPass(SIPreAllocateWWMRegsPass());
2330
2331 // For allocating other wwm register operands.
2332 addPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}));
2333 addPass(SILowerWWMCopiesPass());
2334 addPass(VirtRegRewriterPass(false));
2335 addPass(AMDGPUReserveWWMRegsPass());
2336
2337 // For allocating per-thread VGPRs.
2338 addPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}));
2339
2340
2341 addPreRewrite(addPass);
2342 addPass(VirtRegRewriterPass(true));
2343
2345 return Error::success();
2346}
2347
2348void AMDGPUCodeGenPassBuilder::addPostRegAlloc(AddMachinePass &addPass) const {
2349 addPass(SIFixVGPRCopiesPass());
2350 if (TM.getOptLevel() > CodeGenOptLevel::None)
2351 addPass(SIOptimizeExecMaskingPass());
2352 Base::addPostRegAlloc(addPass);
2353}
2354
2355void AMDGPUCodeGenPassBuilder::addPreSched2(AddMachinePass &addPass) const {
2356 if (TM.getOptLevel() > CodeGenOptLevel::None)
2357 addPass(SIShrinkInstructionsPass());
2358 addPass(SIPostRABundlerPass());
2359}
2360
2361void AMDGPUCodeGenPassBuilder::addPreEmitPass(AddMachinePass &addPass) const {
2362 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2363 addPass(GCNCreateVOPDPass());
2364 }
2365
2366 addPass(SIMemoryLegalizerPass());
2367 addPass(SIInsertWaitcntsPass());
2368
2369 // TODO: addPass(SIModeRegisterPass());
2370
2371 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2372 // TODO: addPass(SIInsertHardClausesPass());
2373 }
2374
2375 addPass(SILateBranchLoweringPass());
2376
2377 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2378 addPass(AMDGPUSetWavePriorityPass());
2379
2380 if (TM.getOptLevel() > CodeGenOptLevel::None)
2381 addPass(SIPreEmitPeepholePass());
2382
2383 // The hazard recognizer that runs as part of the post-ra scheduler does not
2384 // guarantee to be able handle all hazards correctly. This is because if there
2385 // are multiple scheduling regions in a basic block, the regions are scheduled
2386 // bottom up, so when we begin to schedule a region we don't know what
2387 // instructions were emitted directly before it.
2388 //
2389 // Here we add a stand-alone hazard recognizer pass which can handle all
2390 // cases.
2391 addPass(PostRAHazardRecognizerPass());
2392 addPass(AMDGPUWaitSGPRHazardsPass());
2393 addPass(AMDGPULowerVGPREncodingPass());
2394
2395 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2396 addPass(AMDGPUInsertDelayAluPass());
2397 }
2398
2399 addPass(BranchRelaxationPass());
2400}
2401
2402bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2403 CodeGenOptLevel Level) const {
2404 if (Opt.getNumOccurrences())
2405 return Opt;
2406 if (TM.getOptLevel() < Level)
2407 return false;
2408 return Opt;
2409}
2410
2411void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(AddIRPass &addPass) const {
2412 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2413 addPass(GVNPass());
2414 else
2415 addPass(EarlyCSEPass());
2416}
2417
2418void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2419 AddIRPass &addPass) const {
2421 addPass(LoopDataPrefetchPass());
2422
2424
2425 // ReassociateGEPs exposes more opportunities for SLSR. See
2426 // the example in reassociate-geps-and-slsr.ll.
2428
2429 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2430 // EarlyCSE can reuse.
2431 addEarlyCSEOrGVNPass(addPass);
2432
2433 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2434 addPass(NaryReassociatePass());
2435
2436 // NaryReassociate on GEPs creates redundant common expressions, so run
2437 // EarlyCSE after it.
2438 addPass(EarlyCSEPass());
2439}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static std::string computeDataLayout(const Triple &TT, const MCTargetOptions &Options, bool LittleEndian)
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Register const TargetRegisterInfo * TRI
#define T
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:126
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:282
Represents a location in source code.
Definition SMLoc.h:23
Represents a range in source code.
Definition SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:133
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:126
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:710
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:645
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
This file defines the TargetMachine class.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:98
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:384
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3449
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false, bool UseBlockFrequencyInfo=false, bool UseBranchProbabilityInfo=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.