LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
23#include "AMDGPUIGroupLP.h"
24#include "AMDGPUISelDAGToDAG.h"
25#include "AMDGPUMacroFusion.h"
32#include "AMDGPUSplitModule.h"
37#include "GCNDPPCombine.h"
39#include "GCNNSAReassign.h"
43#include "GCNSchedStrategy.h"
44#include "GCNVOPDUtils.h"
45#include "R600.h"
46#include "R600TargetMachine.h"
47#include "SIFixSGPRCopies.h"
48#include "SIFixVGPRCopies.h"
49#include "SIFoldOperands.h"
50#include "SIFormMemoryClauses.h"
52#include "SILowerControlFlow.h"
53#include "SILowerSGPRSpills.h"
54#include "SILowerWWMCopies.h"
56#include "SIMachineScheduler.h"
60#include "SIPeepholeSDWA.h"
61#include "SIPostRABundler.h"
64#include "SIWholeQuadMode.h"
84#include "llvm/CodeGen/Passes.h"
88#include "llvm/IR/IntrinsicsAMDGPU.h"
89#include "llvm/IR/PassManager.h"
98#include "llvm/Transforms/IPO.h"
123#include <optional>
124
125using namespace llvm;
126using namespace llvm::PatternMatch;
127
128namespace {
129//===----------------------------------------------------------------------===//
130// AMDGPU CodeGen Pass Builder interface.
131//===----------------------------------------------------------------------===//
132
133class AMDGPUCodeGenPassBuilder
134 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
136
137public:
138 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
139 const CGPassBuilderOption &Opts,
141
142 void addIRPasses(AddIRPass &) const;
143 void addCodeGenPrepare(AddIRPass &) const;
144 void addPreISel(AddIRPass &addPass) const;
145 void addILPOpts(AddMachinePass &) const;
146 void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
147 Error addInstSelector(AddMachinePass &) const;
148 void addPreRewrite(AddMachinePass &) const;
149 void addMachineSSAOptimization(AddMachinePass &) const;
150 void addPostRegAlloc(AddMachinePass &) const;
151 void addPreEmitPass(AddMachinePass &) const;
152 void addPreEmitRegAlloc(AddMachinePass &) const;
153 Error addRegAssignmentOptimized(AddMachinePass &) const;
154 void addPreRegAlloc(AddMachinePass &) const;
155 void addOptimizedRegAlloc(AddMachinePass &) const;
156 void addPreSched2(AddMachinePass &) const;
157
158 /// Check if a pass is enabled given \p Opt option. The option always
159 /// overrides defaults if explicitly used. Otherwise its default will be used
160 /// given that a pass shall work at an optimization \p Level minimum.
161 bool isPassEnabled(const cl::opt<bool> &Opt,
162 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
163 void addEarlyCSEOrGVNPass(AddIRPass &) const;
164 void addStraightLineScalarOptimizationPasses(AddIRPass &) const;
165};
166
167class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
168public:
169 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
170 : RegisterRegAllocBase(N, D, C) {}
171};
172
173class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
174public:
175 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
176 : RegisterRegAllocBase(N, D, C) {}
177};
178
179class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
180public:
181 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
182 : RegisterRegAllocBase(N, D, C) {}
183};
184
185static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
187 const Register Reg) {
188 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
189 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
190}
191
192static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
194 const Register Reg) {
195 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
196 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
197}
198
199static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
201 const Register Reg) {
202 const SIMachineFunctionInfo *MFI =
203 MRI.getMF().getInfo<SIMachineFunctionInfo>();
204 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
205 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
207}
208
209/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
210static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
211
212/// A dummy default pass factory indicates whether the register allocator is
213/// overridden on the command line.
214static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
215static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
216static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
217
218static SGPRRegisterRegAlloc
219defaultSGPRRegAlloc("default",
220 "pick SGPR register allocator based on -O option",
222
223static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
225SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
226 cl::desc("Register allocator to use for SGPRs"));
227
228static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
230VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
231 cl::desc("Register allocator to use for VGPRs"));
232
233static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
235 WWMRegAlloc("wwm-regalloc", cl::Hidden,
237 cl::desc("Register allocator to use for WWM registers"));
238
239static void initializeDefaultSGPRRegisterAllocatorOnce() {
240 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
241
242 if (!Ctor) {
243 Ctor = SGPRRegAlloc;
244 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
245 }
246}
247
248static void initializeDefaultVGPRRegisterAllocatorOnce() {
249 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
250
251 if (!Ctor) {
252 Ctor = VGPRRegAlloc;
253 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
254 }
255}
256
257static void initializeDefaultWWMRegisterAllocatorOnce() {
258 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
259
260 if (!Ctor) {
261 Ctor = WWMRegAlloc;
262 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
263 }
264}
265
266static FunctionPass *createBasicSGPRRegisterAllocator() {
267 return createBasicRegisterAllocator(onlyAllocateSGPRs);
268}
269
270static FunctionPass *createGreedySGPRRegisterAllocator() {
271 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
272}
273
274static FunctionPass *createFastSGPRRegisterAllocator() {
275 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
276}
277
278static FunctionPass *createBasicVGPRRegisterAllocator() {
279 return createBasicRegisterAllocator(onlyAllocateVGPRs);
280}
281
282static FunctionPass *createGreedyVGPRRegisterAllocator() {
283 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
284}
285
286static FunctionPass *createFastVGPRRegisterAllocator() {
287 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
288}
289
290static FunctionPass *createBasicWWMRegisterAllocator() {
291 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
292}
293
294static FunctionPass *createGreedyWWMRegisterAllocator() {
295 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
296}
297
298static FunctionPass *createFastWWMRegisterAllocator() {
299 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
300}
301
302static SGPRRegisterRegAlloc basicRegAllocSGPR(
303 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
304static SGPRRegisterRegAlloc greedyRegAllocSGPR(
305 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
306
307static SGPRRegisterRegAlloc fastRegAllocSGPR(
308 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
309
310
311static VGPRRegisterRegAlloc basicRegAllocVGPR(
312 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
313static VGPRRegisterRegAlloc greedyRegAllocVGPR(
314 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
315
316static VGPRRegisterRegAlloc fastRegAllocVGPR(
317 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
318static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
319 "basic register allocator",
320 createBasicWWMRegisterAllocator);
321static WWMRegisterRegAlloc
322 greedyRegAllocWWMReg("greedy", "greedy register allocator",
323 createGreedyWWMRegisterAllocator);
324static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
325 createFastWWMRegisterAllocator);
326
328 return Phase == ThinOrFullLTOPhase::FullLTOPreLink ||
329 Phase == ThinOrFullLTOPhase::ThinLTOPreLink;
330}
331} // anonymous namespace
332
333static cl::opt<bool>
335 cl::desc("Run early if-conversion"),
336 cl::init(false));
337
338static cl::opt<bool>
339OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
340 cl::desc("Run pre-RA exec mask optimizations"),
341 cl::init(true));
342
343static cl::opt<bool>
344 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
345 cl::desc("Lower GPU ctor / dtors to globals on the device."),
346 cl::init(true), cl::Hidden);
347
348// Option to disable vectorizer for tests.
350 "amdgpu-load-store-vectorizer",
351 cl::desc("Enable load store vectorizer"),
352 cl::init(true),
353 cl::Hidden);
354
355// Option to control global loads scalarization
357 "amdgpu-scalarize-global-loads",
358 cl::desc("Enable global load scalarization"),
359 cl::init(true),
360 cl::Hidden);
361
362// Option to run internalize pass.
364 "amdgpu-internalize-symbols",
365 cl::desc("Enable elimination of non-kernel functions and unused globals"),
366 cl::init(false),
367 cl::Hidden);
368
369// Option to inline all early.
371 "amdgpu-early-inline-all",
372 cl::desc("Inline all functions early"),
373 cl::init(false),
374 cl::Hidden);
375
377 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
378 cl::desc("Enable removal of functions when they"
379 "use features not supported by the target GPU"),
380 cl::init(true));
381
383 "amdgpu-sdwa-peephole",
384 cl::desc("Enable SDWA peepholer"),
385 cl::init(true));
386
388 "amdgpu-dpp-combine",
389 cl::desc("Enable DPP combiner"),
390 cl::init(true));
391
392// Enable address space based alias analysis
394 cl::desc("Enable AMDGPU Alias Analysis"),
395 cl::init(true));
396
397// Enable lib calls simplifications
399 "amdgpu-simplify-libcall",
400 cl::desc("Enable amdgpu library simplifications"),
401 cl::init(true),
402 cl::Hidden);
403
405 "amdgpu-ir-lower-kernel-arguments",
406 cl::desc("Lower kernel argument loads in IR pass"),
407 cl::init(true),
408 cl::Hidden);
409
411 "amdgpu-reassign-regs",
412 cl::desc("Enable register reassign optimizations on gfx10+"),
413 cl::init(true),
414 cl::Hidden);
415
417 "amdgpu-opt-vgpr-liverange",
418 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
419 cl::init(true), cl::Hidden);
420
422 "amdgpu-atomic-optimizer-strategy",
423 cl::desc("Select DPP or Iterative strategy for scan"),
424 cl::init(ScanOptions::Iterative),
426 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
427 clEnumValN(ScanOptions::Iterative, "Iterative",
428 "Use Iterative approach for scan"),
429 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
430
431// Enable Mode register optimization
433 "amdgpu-mode-register",
434 cl::desc("Enable mode register pass"),
435 cl::init(true),
436 cl::Hidden);
437
438// Enable GFX11+ s_delay_alu insertion
439static cl::opt<bool>
440 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
441 cl::desc("Enable s_delay_alu insertion"),
442 cl::init(true), cl::Hidden);
443
444// Enable GFX11+ VOPD
445static cl::opt<bool>
446 EnableVOPD("amdgpu-enable-vopd",
447 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
448 cl::init(true), cl::Hidden);
449
450// Option is used in lit tests to prevent deadcoding of patterns inspected.
451static cl::opt<bool>
452EnableDCEInRA("amdgpu-dce-in-ra",
453 cl::init(true), cl::Hidden,
454 cl::desc("Enable machine DCE inside regalloc"));
455
456static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
457 cl::desc("Adjust wave priority"),
458 cl::init(false), cl::Hidden);
459
461 "amdgpu-scalar-ir-passes",
462 cl::desc("Enable scalar IR passes"),
463 cl::init(true),
464 cl::Hidden);
465
466static cl::opt<bool>
467 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
468 cl::desc("Enable lowering of lds to global memory pass "
469 "and asan instrument resulting IR."),
470 cl::init(true), cl::Hidden);
471
473 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
475 cl::Hidden);
476
478 "amdgpu-enable-pre-ra-optimizations",
479 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
480 cl::Hidden);
481
483 "amdgpu-enable-promote-kernel-arguments",
484 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
485 cl::Hidden, cl::init(true));
486
488 "amdgpu-enable-image-intrinsic-optimizer",
489 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
490 cl::Hidden);
491
492static cl::opt<bool>
493 EnableLoopPrefetch("amdgpu-loop-prefetch",
494 cl::desc("Enable loop data prefetch on AMDGPU"),
495 cl::Hidden, cl::init(false));
496
498 AMDGPUSchedStrategy("amdgpu-sched-strategy",
499 cl::desc("Select custom AMDGPU scheduling strategy."),
500 cl::Hidden, cl::init(""));
501
503 "amdgpu-enable-rewrite-partial-reg-uses",
504 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
505 cl::Hidden);
506
508 "amdgpu-enable-hipstdpar",
509 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
510 cl::Hidden);
511
512static cl::opt<bool>
513 EnableAMDGPUAttributor("amdgpu-attributor-enable",
514 cl::desc("Enable AMDGPUAttributorPass"),
515 cl::init(true), cl::Hidden);
516
518 "new-reg-bank-select",
519 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
520 "regbankselect"),
521 cl::init(false), cl::Hidden);
522
524 "amdgpu-link-time-closed-world",
525 cl::desc("Whether has closed-world assumption at link time"),
526 cl::init(false), cl::Hidden);
527
529 // Register the target
532
614}
615
616static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
617 return std::make_unique<AMDGPUTargetObjectFile>();
618}
619
621 return new SIScheduleDAGMI(C);
622}
623
624static ScheduleDAGInstrs *
626 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
627 ScheduleDAGMILive *DAG =
628 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
629 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
630 if (ST.shouldClusterStores())
631 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
632 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
633 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
634 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
635 return DAG;
636}
637
638static ScheduleDAGInstrs *
640 ScheduleDAGMILive *DAG =
641 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
642 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
643 return DAG;
644}
645
646static ScheduleDAGInstrs *
648 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
650 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
651 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
652 if (ST.shouldClusterStores())
653 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
654 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
655 return DAG;
656}
657
658static ScheduleDAGInstrs *
660 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
661 auto *DAG = new GCNIterativeScheduler(
663 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
664 if (ST.shouldClusterStores())
665 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
666 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
667 return DAG;
668}
669
671 auto *DAG = new GCNIterativeScheduler(
673 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
674 return DAG;
675}
676
677static ScheduleDAGInstrs *
679 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
681 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
682 if (ST.shouldClusterStores())
683 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
684 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
685 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
686 return DAG;
687}
688
690SISchedRegistry("si", "Run SI's custom scheduler",
692
695 "Run GCN scheduler to maximize occupancy",
697
699 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
701
703 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
705
707 "gcn-iterative-max-occupancy-experimental",
708 "Run GCN scheduler to maximize occupancy (experimental)",
710
712 "gcn-iterative-minreg",
713 "Run GCN iterative scheduler for minimal register usage (experimental)",
715
717 "gcn-iterative-ilp",
718 "Run GCN iterative scheduler for ILP scheduling (experimental)",
720
722 if (TT.getArch() == Triple::r600) {
723 // 32-bit pointers.
724 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
725 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
726 }
727
728 // 32-bit private, local, and region pointers. 64-bit global, constant and
729 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
730 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
731 // (address space 7), and 128-bit non-integral buffer resourcees (address
732 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
733 // like getelementptr.
734 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
735 "-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
736 "v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
737 "v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
738}
739
742 if (!GPU.empty())
743 return GPU;
744
745 // Need to default to a target with flat support for HSA.
746 if (TT.isAMDGCN())
747 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
748
749 return "r600";
750}
751
752static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
753 // The AMDGPU toolchain only supports generating shared objects, so we
754 // must always use PIC.
755 return Reloc::PIC_;
756}
757
759 StringRef CPU, StringRef FS,
760 const TargetOptions &Options,
761 std::optional<Reloc::Model> RM,
762 std::optional<CodeModel::Model> CM,
763 CodeGenOptLevel OptLevel)
765 T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
767 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
768 TLOF(createTLOF(getTargetTriple())) {
769 initAsmInfo();
770 if (TT.isAMDGCN()) {
771 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
773 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
775 }
776}
777
780
782
784 Attribute GPUAttr = F.getFnAttribute("target-cpu");
785 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
786}
787
789 Attribute FSAttr = F.getFnAttribute("target-features");
790
791 return FSAttr.isValid() ? FSAttr.getValueAsString()
793}
794
797 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
799 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
800 if (ST.shouldClusterStores())
801 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
802 return DAG;
803}
804
805/// Predicate for Internalize pass.
806static bool mustPreserveGV(const GlobalValue &GV) {
807 if (const Function *F = dyn_cast<Function>(&GV))
808 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
809 F->getName().starts_with("__sanitizer_") ||
810 AMDGPU::isEntryFunctionCC(F->getCallingConv());
811
813 return !GV.use_empty();
814}
815
818}
819
822 if (Params.empty())
824 Params.consume_front("strategy=");
825 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
826 .Case("dpp", ScanOptions::DPP)
827 .Cases("iterative", "", ScanOptions::Iterative)
828 .Case("none", ScanOptions::None)
829 .Default(std::nullopt);
830 if (Result)
831 return *Result;
832 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
833}
834
838 while (!Params.empty()) {
839 StringRef ParamName;
840 std::tie(ParamName, Params) = Params.split(';');
841 if (ParamName == "closed-world") {
842 Result.IsClosedWorld = true;
843 } else {
844 return make_error<StringError>(
845 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
846 .str(),
848 }
849 }
850 return Result;
851}
852
854
855#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
857
859 [](FunctionPassManager &FPM, OptimizationLevel Level) {
860 if (Level == OptimizationLevel::O0)
861 return;
862
864 });
865
867 [](FunctionPassManager &FPM, OptimizationLevel Level) {
868 if (Level == OptimizationLevel::O0)
869 return;
870
872 });
873
877 if (!isLTOPreLink(Phase)) {
878 // When we are not using -fgpu-rdc, we can run accelerator code
879 // selection relatively early, but still after linking to prevent
880 // eager removal of potentially reachable symbols.
881 if (EnableHipStdPar) {
884 }
886 }
887
888 if (Level == OptimizationLevel::O0)
889 return;
890
891 // We don't want to run internalization at per-module stage.
895 }
896
899 });
900
902 [](FunctionPassManager &FPM, OptimizationLevel Level) {
903 if (Level == OptimizationLevel::O0)
904 return;
905
909 });
910
912 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
913 if (Level == OptimizationLevel::O0)
914 return;
915
917
918 // Add promote kernel arguments pass to the opt pipeline right before
919 // infer address spaces which is needed to do actual address space
920 // rewriting.
921 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
924
925 // Add infer address spaces pass to the opt pipeline after inlining
926 // but before SROA to increase SROA opportunities.
928
929 // This should run after inlining to have any chance of doing
930 // anything, and before other cleanup optimizations.
932
933 if (Level != OptimizationLevel::O0) {
934 // Promote alloca to vector before SROA and loop unroll. If we
935 // manage to eliminate allocas before unroll we may choose to unroll
936 // less.
938 }
939
940 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
941 });
942
943 // FIXME: Why is AMDGPUAttributor not in CGSCC?
945 OptimizationLevel Level,
947 if (Level != OptimizationLevel::O0) {
948 if (!isLTOPreLink(Phase)) {
950 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
951 }
952 }
953 });
954
956 [this](ModulePassManager &PM, OptimizationLevel Level) {
957 // When we are using -fgpu-rdc, we can only run accelerator code
958 // selection after linking to prevent, otherwise we end up removing
959 // potentially reachable symbols that were exported as external in other
960 // modules.
961 if (EnableHipStdPar) {
964 }
965 // We want to support the -lto-partitions=N option as "best effort".
966 // For that, we need to lower LDS earlier in the pipeline before the
967 // module is partitioned for codegen.
969 PM.addPass(AMDGPUSwLowerLDSPass(*this));
972 if (Level != OptimizationLevel::O0) {
973 // We only want to run this with O2 or higher since inliner and SROA
974 // don't run in O1.
975 if (Level != OptimizationLevel::O1) {
976 PM.addPass(
978 }
979 // Do we really need internalization in LTO?
980 if (InternalizeSymbols) {
983 }
987 Opt.IsClosedWorld = true;
990 }
991 }
992 if (!NoKernelInfoEndLTO) {
994 FPM.addPass(KernelInfoPrinter(this));
995 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
996 }
997 });
998
1000 [](StringRef FilterName) -> RegAllocFilterFunc {
1001 if (FilterName == "sgpr")
1002 return onlyAllocateSGPRs;
1003 if (FilterName == "vgpr")
1004 return onlyAllocateVGPRs;
1005 if (FilterName == "wwm")
1006 return onlyAllocateWWMRegs;
1007 return nullptr;
1008 });
1009}
1010
1011int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1012 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1013 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1014 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1015 ? -1
1016 : 0;
1017}
1018
1020 unsigned DestAS) const {
1021 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1023}
1024
1026 if (auto *Arg = dyn_cast<Argument>(V);
1027 Arg &&
1028 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1029 !Arg->hasByRefAttr())
1031
1032 const auto *LD = dyn_cast<LoadInst>(V);
1033 if (!LD) // TODO: Handle invariant load like constant.
1035
1036 // It must be a generic pointer loaded.
1037 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1038
1039 const auto *Ptr = LD->getPointerOperand();
1040 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1042 // For a generic pointer loaded from the constant memory, it could be assumed
1043 // as a global pointer since the constant memory is only populated on the
1044 // host side. As implied by the offload programming model, only global
1045 // pointers could be referenced on the host side.
1047}
1048
1049std::pair<const Value *, unsigned>
1051 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1052 switch (II->getIntrinsicID()) {
1053 case Intrinsic::amdgcn_is_shared:
1054 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1055 case Intrinsic::amdgcn_is_private:
1056 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1057 default:
1058 break;
1059 }
1060 return std::pair(nullptr, -1);
1061 }
1062 // Check the global pointer predication based on
1063 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1064 // the order of 'is_shared' and 'is_private' is not significant.
1065 Value *Ptr;
1066 if (match(
1067 const_cast<Value *>(V),
1068 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
1069 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
1070 m_Deferred(Ptr))))))
1071 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1072
1073 return std::pair(nullptr, -1);
1074}
1075
1076unsigned
1078 switch (Kind) {
1088 }
1090}
1091
1093 Module &M, unsigned NumParts,
1094 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1095 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1096 // but all current users of this API don't have one ready and would need to
1097 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1098
1103
1104 PassBuilder PB(this);
1108
1110 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1111 MPM.run(M, MAM);
1112 return true;
1113}
1114
1115//===----------------------------------------------------------------------===//
1116// GCN Target Machine (SI+)
1117//===----------------------------------------------------------------------===//
1118
1120 StringRef CPU, StringRef FS,
1121 const TargetOptions &Options,
1122 std::optional<Reloc::Model> RM,
1123 std::optional<CodeModel::Model> CM,
1124 CodeGenOptLevel OL, bool JIT)
1125 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1126
1127const TargetSubtargetInfo *
1129 StringRef GPU = getGPUName(F);
1131
1132 SmallString<128> SubtargetKey(GPU);
1133 SubtargetKey.append(FS);
1134
1135 auto &I = SubtargetMap[SubtargetKey];
1136 if (!I) {
1137 // This needs to be done before we create a new subtarget since any
1138 // creation will depend on the TM and the code generation flags on the
1139 // function that reside in TargetOptions.
1141 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1142 }
1143
1144 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1145
1146 return I.get();
1147}
1148
1151 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1152}
1153
1156 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1158 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1159 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1160}
1161
1164 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1165 if (ST.enableSIScheduler())
1167
1168 Attribute SchedStrategyAttr =
1169 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1170 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1171 ? SchedStrategyAttr.getValueAsString()
1173
1174 if (SchedStrategy == "max-ilp")
1176
1177 if (SchedStrategy == "max-memory-clause")
1179
1180 if (SchedStrategy == "iterative-ilp")
1182
1183 if (SchedStrategy == "iterative-minreg")
1184 return createMinRegScheduler(C);
1185
1186 if (SchedStrategy == "iterative-maxocc")
1188
1190}
1191
1194 ScheduleDAGMI *DAG =
1195 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1196 /*RemoveKillFlags=*/true);
1197 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1199 if (ST.shouldClusterStores())
1204 EnableVOPD)
1207 return DAG;
1208}
1209//===----------------------------------------------------------------------===//
1210// AMDGPU Legacy Pass Setup
1211//===----------------------------------------------------------------------===//
1212
1213std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1215}
1216
1217namespace {
1218
1219class GCNPassConfig final : public AMDGPUPassConfig {
1220public:
1221 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1222 : AMDGPUPassConfig(TM, PM) {
1223 // It is necessary to know the register usage of the entire call graph. We
1224 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1225 // noinline, so this is always required.
1226 setRequiresCodeGenSCCOrder(true);
1227 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1228 }
1229
1230 GCNTargetMachine &getGCNTargetMachine() const {
1231 return getTM<GCNTargetMachine>();
1232 }
1233
1234 bool addPreISel() override;
1235 void addMachineSSAOptimization() override;
1236 bool addILPOpts() override;
1237 bool addInstSelector() override;
1238 bool addIRTranslator() override;
1239 void addPreLegalizeMachineIR() override;
1240 bool addLegalizeMachineIR() override;
1241 void addPreRegBankSelect() override;
1242 bool addRegBankSelect() override;
1243 void addPreGlobalInstructionSelect() override;
1244 bool addGlobalInstructionSelect() override;
1245 void addPreRegAlloc() override;
1246 void addFastRegAlloc() override;
1247 void addOptimizedRegAlloc() override;
1248
1249 FunctionPass *createSGPRAllocPass(bool Optimized);
1250 FunctionPass *createVGPRAllocPass(bool Optimized);
1251 FunctionPass *createWWMRegAllocPass(bool Optimized);
1252 FunctionPass *createRegAllocPass(bool Optimized) override;
1253
1254 bool addRegAssignAndRewriteFast() override;
1255 bool addRegAssignAndRewriteOptimized() override;
1256
1257 bool addPreRewrite() override;
1258 void addPostRegAlloc() override;
1259 void addPreSched2() override;
1260 void addPreEmitPass() override;
1261 void addPostBBSections() override;
1262};
1263
1264} // end anonymous namespace
1265
1267 : TargetPassConfig(TM, PM) {
1268 // Exceptions and StackMaps are not supported, so these passes will never do
1269 // anything.
1272 // Garbage collection is not supported.
1275}
1276
1280 else
1282}
1283
1288 // ReassociateGEPs exposes more opportunities for SLSR. See
1289 // the example in reassociate-geps-and-slsr.ll.
1291 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1292 // EarlyCSE can reuse.
1294 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1296 // NaryReassociate on GEPs creates redundant common expressions, so run
1297 // EarlyCSE after it.
1299}
1300
1303
1306
1307 // There is no reason to run these.
1311
1313 if (LowerCtorDtor)
1315
1318
1319 // This can be disabled by passing ::Disable here or on the command line
1320 // with --expand-variadics-override=disable.
1322
1323 // Function calls are not supported, so make sure we inline everything.
1326
1327 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1330
1331 // Make enqueued block runtime handles externally visible.
1333
1334 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1335 if (EnableSwLowerLDS)
1337
1338 // Runs before PromoteAlloca so the latter can account for function uses
1341 }
1342
1343 // Run atomic optimizer before Atomic Expand
1344 if ((TM.getTargetTriple().isAMDGCN()) &&
1348 }
1349
1351
1354
1357
1361 AAResults &AAR) {
1362 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1363 AAR.addAAResult(WrapperPass->getResult());
1364 }));
1365 }
1366
1367 if (TM.getTargetTriple().isAMDGCN()) {
1368 // TODO: May want to move later or split into an early and late one.
1370 }
1371
1372 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1373 // have expanded.
1376 }
1377
1379
1380 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1381 // example, GVN can combine
1382 //
1383 // %0 = add %a, %b
1384 // %1 = add %b, %a
1385 //
1386 // and
1387 //
1388 // %0 = shl nsw %a, 2
1389 // %1 = shl %a, 2
1390 //
1391 // but EarlyCSE can do neither of them.
1394}
1395
1397 if (TM->getTargetTriple().isAMDGCN() &&
1400
1403
1404 if (TM->getTargetTriple().isAMDGCN()) {
1405 // This lowering has been placed after codegenprepare to take advantage of
1406 // address mode matching (which is why it isn't put with the LDS lowerings).
1407 // It could be placed anywhere before uniformity annotations (an analysis
1408 // that it changes by splitting up fat pointers into their components)
1409 // but has been put before switch lowering and CFG flattening so that those
1410 // passes can run on the more optimized control flow this pass creates in
1411 // many cases.
1412 //
1413 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1414 // However, due to some annoying facts about ResourceUsageAnalysis,
1415 // (especially as exercised in the resource-usage-dead-function test),
1416 // we need all the function passes codegenprepare all the way through
1417 // said resource usage analysis to run on the call graph produced
1418 // before codegenprepare runs (because codegenprepare will knock some
1419 // nodes out of the graph, which leads to function-level passes not
1420 // being run on them, which causes crashes in the resource usage analysis).
1423 // In accordance with the above FIXME, manually force all the
1424 // function-level passes into a CGSCCPassManager.
1425 addPass(new DummyCGSCCPass());
1426 }
1427
1429
1432
1433 // LowerSwitch pass may introduce unreachable blocks that can
1434 // cause unexpected behavior for subsequent passes. Placing it
1435 // here seems better that these blocks would get cleaned up by
1436 // UnreachableBlockElim inserted next in the pass flow.
1438}
1439
1443 return false;
1444}
1445
1448 return false;
1449}
1450
1452 // Do nothing. GC is not supported.
1453 return false;
1454}
1455
1456//===----------------------------------------------------------------------===//
1457// GCN Legacy Pass Setup
1458//===----------------------------------------------------------------------===//
1459
1460bool GCNPassConfig::addPreISel() {
1462
1463 if (TM->getOptLevel() > CodeGenOptLevel::None)
1464 addPass(createSinkingPass());
1465
1466 if (TM->getOptLevel() > CodeGenOptLevel::None)
1468
1469 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1470 // regions formed by them.
1472 addPass(createFixIrreduciblePass());
1473 addPass(createUnifyLoopExitsPass());
1474 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1475
1478 // TODO: Move this right after structurizeCFG to avoid extra divergence
1479 // analysis. This depends on stopping SIAnnotateControlFlow from making
1480 // control flow modifications.
1482
1483 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1484 // with -new-reg-bank-select and without any of the fallback options.
1486 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1487 addPass(createLCSSAPass());
1488
1489 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1491
1492 return false;
1493}
1494
1495void GCNPassConfig::addMachineSSAOptimization() {
1497
1498 // We want to fold operands after PeepholeOptimizer has run (or as part of
1499 // it), because it will eliminate extra copies making it easier to fold the
1500 // real source operand. We want to eliminate dead instructions after, so that
1501 // we see fewer uses of the copies. We then need to clean up the dead
1502 // instructions leftover after the operands are folded as well.
1503 //
1504 // XXX - Can we get away without running DeadMachineInstructionElim again?
1505 addPass(&SIFoldOperandsLegacyID);
1506 if (EnableDPPCombine)
1507 addPass(&GCNDPPCombineLegacyID);
1509 if (isPassEnabled(EnableSDWAPeephole)) {
1510 addPass(&SIPeepholeSDWALegacyID);
1511 addPass(&EarlyMachineLICMID);
1512 addPass(&MachineCSELegacyID);
1513 addPass(&SIFoldOperandsLegacyID);
1514 }
1517}
1518
1519bool GCNPassConfig::addILPOpts() {
1521 addPass(&EarlyIfConverterLegacyID);
1522
1524 return false;
1525}
1526
1527bool GCNPassConfig::addInstSelector() {
1529 addPass(&SIFixSGPRCopiesLegacyID);
1531 return false;
1532}
1533
1534bool GCNPassConfig::addIRTranslator() {
1535 addPass(new IRTranslator(getOptLevel()));
1536 return false;
1537}
1538
1539void GCNPassConfig::addPreLegalizeMachineIR() {
1540 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1541 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1542 addPass(new Localizer());
1543}
1544
1545bool GCNPassConfig::addLegalizeMachineIR() {
1546 addPass(new Legalizer());
1547 return false;
1548}
1549
1550void GCNPassConfig::addPreRegBankSelect() {
1551 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1552 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1554}
1555
1556bool GCNPassConfig::addRegBankSelect() {
1557 if (NewRegBankSelect) {
1560 } else {
1561 addPass(new RegBankSelect());
1562 }
1563 return false;
1564}
1565
1566void GCNPassConfig::addPreGlobalInstructionSelect() {
1567 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1568 addPass(createAMDGPURegBankCombiner(IsOptNone));
1569}
1570
1571bool GCNPassConfig::addGlobalInstructionSelect() {
1572 addPass(new InstructionSelect(getOptLevel()));
1573 return false;
1574}
1575
1576void GCNPassConfig::addFastRegAlloc() {
1577 // FIXME: We have to disable the verifier here because of PHIElimination +
1578 // TwoAddressInstructions disabling it.
1579
1580 // This must be run immediately after phi elimination and before
1581 // TwoAddressInstructions, otherwise the processing of the tied operand of
1582 // SI_ELSE will introduce a copy of the tied operand source after the else.
1584
1586
1588}
1589
1590void GCNPassConfig::addPreRegAlloc() {
1591 if (getOptLevel() != CodeGenOptLevel::None)
1593}
1594
1595void GCNPassConfig::addOptimizedRegAlloc() {
1596 if (EnableDCEInRA)
1598
1599 // FIXME: when an instruction has a Killed operand, and the instruction is
1600 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1601 // the register in LiveVariables, this would trigger a failure in verifier,
1602 // we should fix it and enable the verifier.
1603 if (OptVGPRLiveRange)
1605
1606 // This must be run immediately after phi elimination and before
1607 // TwoAddressInstructions, otherwise the processing of the tied operand of
1608 // SI_ELSE will introduce a copy of the tied operand source after the else.
1610
1613
1614 if (isPassEnabled(EnablePreRAOptimizations))
1616
1617 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1618 // instructions that cause scheduling barriers.
1620
1621 if (OptExecMaskPreRA)
1623
1624 // This is not an essential optimization and it has a noticeable impact on
1625 // compilation time, so we only enable it from O2.
1626 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1628
1630}
1631
1632bool GCNPassConfig::addPreRewrite() {
1634 addPass(&GCNNSAReassignID);
1635
1637 return true;
1638}
1639
1640FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1641 // Initialize the global default.
1642 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1643 initializeDefaultSGPRRegisterAllocatorOnce);
1644
1645 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1646 if (Ctor != useDefaultRegisterAllocator)
1647 return Ctor();
1648
1649 if (Optimized)
1650 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1651
1652 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1653}
1654
1655FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1656 // Initialize the global default.
1657 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1658 initializeDefaultVGPRRegisterAllocatorOnce);
1659
1660 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1661 if (Ctor != useDefaultRegisterAllocator)
1662 return Ctor();
1663
1664 if (Optimized)
1665 return createGreedyVGPRRegisterAllocator();
1666
1667 return createFastVGPRRegisterAllocator();
1668}
1669
1670FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1671 // Initialize the global default.
1672 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1673 initializeDefaultWWMRegisterAllocatorOnce);
1674
1675 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1676 if (Ctor != useDefaultRegisterAllocator)
1677 return Ctor();
1678
1679 if (Optimized)
1680 return createGreedyWWMRegisterAllocator();
1681
1682 return createFastWWMRegisterAllocator();
1683}
1684
1685FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1686 llvm_unreachable("should not be used");
1687}
1688
1690 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1691 "and -vgpr-regalloc";
1692
1693bool GCNPassConfig::addRegAssignAndRewriteFast() {
1694 if (!usingDefaultRegAlloc())
1696
1697 addPass(&GCNPreRALongBranchRegID);
1698
1699 addPass(createSGPRAllocPass(false));
1700
1701 // Equivalent of PEI for SGPRs.
1702 addPass(&SILowerSGPRSpillsLegacyID);
1703
1704 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1706
1707 // For allocating other wwm register operands.
1708 addPass(createWWMRegAllocPass(false));
1709
1710 addPass(&SILowerWWMCopiesLegacyID);
1712
1713 // For allocating per-thread VGPRs.
1714 addPass(createVGPRAllocPass(false));
1715
1716 return true;
1717}
1718
1719bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1720 if (!usingDefaultRegAlloc())
1722
1723 addPass(&GCNPreRALongBranchRegID);
1724
1725 addPass(createSGPRAllocPass(true));
1726
1727 // Commit allocated register changes. This is mostly necessary because too
1728 // many things rely on the use lists of the physical registers, such as the
1729 // verifier. This is only necessary with allocators which use LiveIntervals,
1730 // since FastRegAlloc does the replacements itself.
1731 addPass(createVirtRegRewriter(false));
1732
1733 // At this point, the sgpr-regalloc has been done and it is good to have the
1734 // stack slot coloring to try to optimize the SGPR spill stack indices before
1735 // attempting the custom SGPR spill lowering.
1736 addPass(&StackSlotColoringID);
1737
1738 // Equivalent of PEI for SGPRs.
1739 addPass(&SILowerSGPRSpillsLegacyID);
1740
1741 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1743
1744 // For allocating other whole wave mode registers.
1745 addPass(createWWMRegAllocPass(true));
1746 addPass(&SILowerWWMCopiesLegacyID);
1747 addPass(createVirtRegRewriter(false));
1749
1750 // For allocating per-thread VGPRs.
1751 addPass(createVGPRAllocPass(true));
1752
1753 addPreRewrite();
1754 addPass(&VirtRegRewriterID);
1755
1757
1758 return true;
1759}
1760
1761void GCNPassConfig::addPostRegAlloc() {
1762 addPass(&SIFixVGPRCopiesID);
1763 if (getOptLevel() > CodeGenOptLevel::None)
1766}
1767
1768void GCNPassConfig::addPreSched2() {
1769 if (TM->getOptLevel() > CodeGenOptLevel::None)
1771 addPass(&SIPostRABundlerLegacyID);
1772}
1773
1774void GCNPassConfig::addPreEmitPass() {
1775 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1776 addPass(&GCNCreateVOPDID);
1777 addPass(createSIMemoryLegalizerPass());
1778 addPass(createSIInsertWaitcntsPass());
1779
1780 addPass(createSIModeRegisterPass());
1781
1782 if (getOptLevel() > CodeGenOptLevel::None)
1783 addPass(&SIInsertHardClausesID);
1784
1786 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1788 if (getOptLevel() > CodeGenOptLevel::None)
1789 addPass(&SIPreEmitPeepholeID);
1790 // The hazard recognizer that runs as part of the post-ra scheduler does not
1791 // guarantee to be able handle all hazards correctly. This is because if there
1792 // are multiple scheduling regions in a basic block, the regions are scheduled
1793 // bottom up, so when we begin to schedule a region we don't know what
1794 // instructions were emitted directly before it.
1795 //
1796 // Here we add a stand-alone hazard recognizer pass which can handle all
1797 // cases.
1798 addPass(&PostRAHazardRecognizerID);
1799
1801
1802 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1803 addPass(&AMDGPUInsertDelayAluID);
1804
1805 addPass(&BranchRelaxationPassID);
1806}
1807
1808void GCNPassConfig::addPostBBSections() {
1809 // We run this later to avoid passes like livedebugvalues and BBSections
1810 // having to deal with the apparent multi-entry functions we may generate.
1812}
1813
1815 return new GCNPassConfig(*this, PM);
1816}
1817
1819 MachineFunction &MF) const {
1821 MF.getRegInfo().addDelegate(MFI);
1822}
1823
1825 BumpPtrAllocator &Allocator, const Function &F,
1826 const TargetSubtargetInfo *STI) const {
1827 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1828 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1829}
1830
1832 return new yaml::SIMachineFunctionInfo();
1833}
1834
1838 return new yaml::SIMachineFunctionInfo(
1839 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1840}
1841
1844 SMDiagnostic &Error, SMRange &SourceRange) const {
1845 const yaml::SIMachineFunctionInfo &YamlMFI =
1846 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1847 MachineFunction &MF = PFS.MF;
1849 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1850
1851 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1852 return true;
1853
1854 if (MFI->Occupancy == 0) {
1855 // Fixup the subtarget dependent default value.
1856 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1857 }
1858
1859 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1860 Register TempReg;
1861 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1862 SourceRange = RegName.SourceRange;
1863 return true;
1864 }
1865 RegVal = TempReg;
1866
1867 return false;
1868 };
1869
1870 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1871 Register &RegVal) {
1872 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1873 };
1874
1875 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1876 return true;
1877
1878 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1879 return true;
1880
1881 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1882 MFI->LongBranchReservedReg))
1883 return true;
1884
1885 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1886 // Create a diagnostic for a the register string literal.
1887 const MemoryBuffer &Buffer =
1888 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1889 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1890 RegName.Value.size(), SourceMgr::DK_Error,
1891 "incorrect register class for field", RegName.Value,
1892 {}, {});
1893 SourceRange = RegName.SourceRange;
1894 return true;
1895 };
1896
1897 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1898 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1899 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1900 return true;
1901
1902 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1903 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1904 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1905 }
1906
1907 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1908 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1909 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1910 }
1911
1912 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1913 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1914 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1915 }
1916
1917 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1918 Register ParsedReg;
1919 if (parseRegister(YamlReg, ParsedReg))
1920 return true;
1921
1922 MFI->reserveWWMRegister(ParsedReg);
1923 }
1924
1925 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1926 MFI->setFlag(Info->VReg, Info->Flags);
1927 }
1928 for (const auto &[_, Info] : PFS.VRegInfos) {
1929 MFI->setFlag(Info->VReg, Info->Flags);
1930 }
1931
1932 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1933 Register ParsedReg;
1934 if (parseRegister(YamlRegStr, ParsedReg))
1935 return true;
1936 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1937 }
1938
1939 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1940 const TargetRegisterClass &RC,
1941 ArgDescriptor &Arg, unsigned UserSGPRs,
1942 unsigned SystemSGPRs) {
1943 // Skip parsing if it's not present.
1944 if (!A)
1945 return false;
1946
1947 if (A->IsRegister) {
1948 Register Reg;
1949 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1950 SourceRange = A->RegisterName.SourceRange;
1951 return true;
1952 }
1953 if (!RC.contains(Reg))
1954 return diagnoseRegisterClass(A->RegisterName);
1956 } else
1957 Arg = ArgDescriptor::createStack(A->StackOffset);
1958 // Check and apply the optional mask.
1959 if (A->Mask)
1960 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1961
1962 MFI->NumUserSGPRs += UserSGPRs;
1963 MFI->NumSystemSGPRs += SystemSGPRs;
1964 return false;
1965 };
1966
1967 if (YamlMFI.ArgInfo &&
1968 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1969 AMDGPU::SGPR_128RegClass,
1970 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1971 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1972 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1973 2, 0) ||
1974 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1975 MFI->ArgInfo.QueuePtr, 2, 0) ||
1976 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1977 AMDGPU::SReg_64RegClass,
1978 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1979 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1980 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1981 2, 0) ||
1982 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1983 AMDGPU::SReg_64RegClass,
1984 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1985 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1986 AMDGPU::SGPR_32RegClass,
1987 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1988 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1989 AMDGPU::SGPR_32RegClass,
1990 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1991 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1992 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1993 0, 1) ||
1994 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1995 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1996 0, 1) ||
1997 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1998 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1999 0, 1) ||
2000 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2001 AMDGPU::SGPR_32RegClass,
2002 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2003 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2004 AMDGPU::SGPR_32RegClass,
2005 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2006 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2007 AMDGPU::SReg_64RegClass,
2008 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2009 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2010 AMDGPU::SReg_64RegClass,
2011 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2012 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2013 AMDGPU::VGPR_32RegClass,
2014 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2015 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2016 AMDGPU::VGPR_32RegClass,
2017 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2018 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2019 AMDGPU::VGPR_32RegClass,
2020 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2021 return true;
2022
2023 if (ST.hasIEEEMode())
2024 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2025 if (ST.hasDX10ClampMode())
2026 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2027
2028 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2029 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2032 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2035
2042
2043 if (YamlMFI.HasInitWholeWave)
2044 MFI->setInitWholeWave();
2045
2046 return false;
2047}
2048
2049//===----------------------------------------------------------------------===//
2050// AMDGPU CodeGen Pass Builder interface.
2051//===----------------------------------------------------------------------===//
2052
2053AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2054 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2056 : CodeGenPassBuilder(TM, Opts, PIC) {
2057 Opt.MISchedPostRA = true;
2058 Opt.RequiresCodeGenSCCOrder = true;
2059 // Exceptions and StackMaps are not supported, so these passes will never do
2060 // anything.
2061 // Garbage collection is not supported.
2062 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2064}
2065
2066void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
2067 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
2069
2071 if (LowerCtorDtor)
2072 addPass(AMDGPUCtorDtorLoweringPass());
2073
2074 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2076
2077 // This can be disabled by passing ::Disable here or on the command line
2078 // with --expand-variadics-override=disable.
2080
2081 addPass(AMDGPUAlwaysInlinePass());
2082 addPass(AlwaysInlinerPass());
2083
2085
2086 if (EnableSwLowerLDS)
2087 addPass(AMDGPUSwLowerLDSPass(TM));
2088
2089 // Runs before PromoteAlloca so the latter can account for function uses
2091 addPass(AMDGPULowerModuleLDSPass(TM));
2092
2093 // Run atomic optimizer before Atomic Expand
2094 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2097
2098 addPass(AtomicExpandPass(&TM));
2099
2100 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2101 addPass(AMDGPUPromoteAllocaPass(TM));
2102 if (isPassEnabled(EnableScalarIRPasses))
2103 addStraightLineScalarOptimizationPasses(addPass);
2104
2105 // TODO: Handle EnableAMDGPUAliasAnalysis
2106
2107 // TODO: May want to move later or split into an early and late one.
2108 addPass(AMDGPUCodeGenPreparePass(TM));
2109
2110 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2111 // have expanded.
2112 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2114 /*UseMemorySSA=*/true));
2115 }
2116 }
2117
2118 Base::addIRPasses(addPass);
2119
2120 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2121 // example, GVN can combine
2122 //
2123 // %0 = add %a, %b
2124 // %1 = add %b, %a
2125 //
2126 // and
2127 //
2128 // %0 = shl nsw %a, 2
2129 // %1 = shl %a, 2
2130 //
2131 // but EarlyCSE can do neither of them.
2132 if (isPassEnabled(EnableScalarIRPasses))
2133 addEarlyCSEOrGVNPass(addPass);
2134}
2135
2136void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
2137 if (TM.getOptLevel() > CodeGenOptLevel::None)
2139
2141 addPass(AMDGPULowerKernelArgumentsPass(TM));
2142
2143 // This lowering has been placed after codegenprepare to take advantage of
2144 // address mode matching (which is why it isn't put with the LDS lowerings).
2145 // It could be placed anywhere before uniformity annotations (an analysis
2146 // that it changes by splitting up fat pointers into their components)
2147 // but has been put before switch lowering and CFG flattening so that those
2148 // passes can run on the more optimized control flow this pass creates in
2149 // many cases.
2150 //
2151 // FIXME: This should ideally be put after the LoadStoreVectorizer.
2152 // However, due to some annoying facts about ResourceUsageAnalysis,
2153 // (especially as exercised in the resource-usage-dead-function test),
2154 // we need all the function passes codegenprepare all the way through
2155 // said resource usage analysis to run on the call graph produced
2156 // before codegenprepare runs (because codegenprepare will knock some
2157 // nodes out of the graph, which leads to function-level passes not
2158 // being run on them, which causes crashes in the resource usage analysis).
2160 addPass.requireCGSCCOrder();
2161
2162 addPass(AMDGPULowerIntrinsicsPass(TM));
2163
2164 Base::addCodeGenPrepare(addPass);
2165
2166 if (isPassEnabled(EnableLoadStoreVectorizer))
2167 addPass(LoadStoreVectorizerPass());
2168
2169 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2170 // behavior for subsequent passes. Placing it here seems better that these
2171 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2172 // pass flow.
2173 addPass(LowerSwitchPass());
2174}
2175
2176void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2177
2178 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2179 addPass(FlattenCFGPass());
2180 addPass(SinkingPass());
2181 addPass(AMDGPULateCodeGenPreparePass(TM));
2182 }
2183
2184 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2185 // regions formed by them.
2186
2188 addPass(FixIrreduciblePass());
2189 addPass(UnifyLoopExitsPass());
2190 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2191
2193
2194 addPass(SIAnnotateControlFlowPass(TM));
2195
2196 // TODO: Move this right after structurizeCFG to avoid extra divergence
2197 // analysis. This depends on stopping SIAnnotateControlFlow from making
2198 // control flow modifications.
2200
2202 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2203 addPass(LCSSAPass());
2204
2205 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2206 addPass(AMDGPUPerfHintAnalysisPass(TM));
2207
2208 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2209 // isn't this in addInstSelector?
2211 /*Force=*/true);
2212}
2213
2214void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2216 addPass(EarlyIfConverterPass());
2217
2218 Base::addILPOpts(addPass);
2219}
2220
2221void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2222 CreateMCStreamer) const {
2223 // TODO: Add AsmPrinter.
2224}
2225
2226Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2227 addPass(AMDGPUISelDAGToDAGPass(TM));
2228 addPass(SIFixSGPRCopiesPass());
2229 addPass(SILowerI1CopiesPass());
2230 return Error::success();
2231}
2232
2233void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass) const {
2234 if (EnableRegReassign) {
2235 addPass(GCNNSAReassignPass());
2236 }
2237}
2238
2239void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2240 AddMachinePass &addPass) const {
2241 Base::addMachineSSAOptimization(addPass);
2242
2243 addPass(SIFoldOperandsPass());
2244 if (EnableDPPCombine) {
2245 addPass(GCNDPPCombinePass());
2246 }
2247 addPass(SILoadStoreOptimizerPass());
2248 if (isPassEnabled(EnableSDWAPeephole)) {
2249 addPass(SIPeepholeSDWAPass());
2250 addPass(EarlyMachineLICMPass());
2251 addPass(MachineCSEPass());
2252 addPass(SIFoldOperandsPass());
2253 }
2255 addPass(SIShrinkInstructionsPass());
2256}
2257
2258void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2259 AddMachinePass &addPass) const {
2260 if (EnableDCEInRA)
2261 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2262
2263 // FIXME: when an instruction has a Killed operand, and the instruction is
2264 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2265 // the register in LiveVariables, this would trigger a failure in verifier,
2266 // we should fix it and enable the verifier.
2267 if (OptVGPRLiveRange)
2268 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2270
2271 // This must be run immediately after phi elimination and before
2272 // TwoAddressInstructions, otherwise the processing of the tied operand of
2273 // SI_ELSE will introduce a copy of the tied operand source after the else.
2274 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2275
2277 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2278
2279 if (isPassEnabled(EnablePreRAOptimizations))
2280 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2281
2282 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2283 // instructions that cause scheduling barriers.
2284 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2285
2286 if (OptExecMaskPreRA)
2287 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2288
2289 // This is not an essential optimization and it has a noticeable impact on
2290 // compilation time, so we only enable it from O2.
2291 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2292 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2293
2294 Base::addOptimizedRegAlloc(addPass);
2295}
2296
2297void AMDGPUCodeGenPassBuilder::addPreRegAlloc(AddMachinePass &addPass) const {
2298 if (getOptLevel() != CodeGenOptLevel::None)
2299 addPass(AMDGPUPrepareAGPRAllocPass());
2300}
2301
2302Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2303 AddMachinePass &addPass) const {
2304 // TODO: Check --regalloc-npm option
2305
2306 addPass(GCNPreRALongBranchRegPass());
2307
2308 addPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}));
2309
2310 // Commit allocated register changes. This is mostly necessary because too
2311 // many things rely on the use lists of the physical registers, such as the
2312 // verifier. This is only necessary with allocators which use LiveIntervals,
2313 // since FastRegAlloc does the replacements itself.
2314 addPass(VirtRegRewriterPass(false));
2315
2316 // At this point, the sgpr-regalloc has been done and it is good to have the
2317 // stack slot coloring to try to optimize the SGPR spill stack indices before
2318 // attempting the custom SGPR spill lowering.
2319 addPass(StackSlotColoringPass());
2320
2321 // Equivalent of PEI for SGPRs.
2322 addPass(SILowerSGPRSpillsPass());
2323
2324 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2325 addPass(SIPreAllocateWWMRegsPass());
2326
2327 // For allocating other wwm register operands.
2328 addPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}));
2329 addPass(SILowerWWMCopiesPass());
2330 addPass(VirtRegRewriterPass(false));
2331 addPass(AMDGPUReserveWWMRegsPass());
2332
2333 // For allocating per-thread VGPRs.
2334 addPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}));
2335
2336
2337 addPreRewrite(addPass);
2338 addPass(VirtRegRewriterPass(true));
2339
2341 return Error::success();
2342}
2343
2344void AMDGPUCodeGenPassBuilder::addPostRegAlloc(AddMachinePass &addPass) const {
2345 addPass(SIFixVGPRCopiesPass());
2346 if (TM.getOptLevel() > CodeGenOptLevel::None)
2347 addPass(SIOptimizeExecMaskingPass());
2348 Base::addPostRegAlloc(addPass);
2349}
2350
2351void AMDGPUCodeGenPassBuilder::addPreSched2(AddMachinePass &addPass) const {
2352 if (TM.getOptLevel() > CodeGenOptLevel::None)
2353 addPass(SIShrinkInstructionsPass());
2354 addPass(SIPostRABundlerPass());
2355}
2356
2357void AMDGPUCodeGenPassBuilder::addPreEmitPass(AddMachinePass &addPass) const {
2358 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2359 addPass(GCNCreateVOPDPass());
2360 }
2361
2362 addPass(SIMemoryLegalizerPass());
2363 addPass(SIInsertWaitcntsPass());
2364
2365 // TODO: addPass(SIModeRegisterPass());
2366
2367 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2368 // TODO: addPass(SIInsertHardClausesPass());
2369 }
2370
2371 addPass(SILateBranchLoweringPass());
2372
2373 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2374 addPass(AMDGPUSetWavePriorityPass());
2375
2376 if (TM.getOptLevel() > CodeGenOptLevel::None)
2377 addPass(SIPreEmitPeepholePass());
2378
2379 // The hazard recognizer that runs as part of the post-ra scheduler does not
2380 // guarantee to be able handle all hazards correctly. This is because if there
2381 // are multiple scheduling regions in a basic block, the regions are scheduled
2382 // bottom up, so when we begin to schedule a region we don't know what
2383 // instructions were emitted directly before it.
2384 //
2385 // Here we add a stand-alone hazard recognizer pass which can handle all
2386 // cases.
2387 addPass(PostRAHazardRecognizerPass());
2388 addPass(AMDGPUWaitSGPRHazardsPass());
2389
2390 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2391 addPass(AMDGPUInsertDelayAluPass());
2392 }
2393
2394 addPass(BranchRelaxationPass());
2395}
2396
2397bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2398 CodeGenOptLevel Level) const {
2399 if (Opt.getNumOccurrences())
2400 return Opt;
2401 if (TM.getOptLevel() < Level)
2402 return false;
2403 return Opt;
2404}
2405
2406void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(AddIRPass &addPass) const {
2407 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2408 addPass(GVNPass());
2409 else
2410 addPass(EarlyCSEPass());
2411}
2412
2413void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2414 AddIRPass &addPass) const {
2416 addPass(LoopDataPrefetchPass());
2417
2419
2420 // ReassociateGEPs exposes more opportunities for SLSR. See
2421 // the example in reassociate-geps-and-slsr.ll.
2423
2424 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2425 // EarlyCSE can reuse.
2426 addEarlyCSEOrGVNPass(addPass);
2427
2428 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2429 addPass(NaryReassociatePass());
2430
2431 // NaryReassociate on GEPs creates redundant common expressions, so run
2432 // EarlyCSE after it.
2433 addPass(EarlyCSEPass());
2434}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:687
#define LLVM_READNONE
Definition: Compiler.h:315
#define LLVM_ABI
Definition: Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:132
This file provides the interface for a simple, fast CSE pass.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Register const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
A private abstract base class describing the concept of an individual alias analysis implementation.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Definition: AlwaysInliner.h:33
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:255
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:400
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:223
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:67
This class provides access to building LLVM's passes.
void addPreRewrite(AddMachinePass &) const
addPreRewrite - Add passes to the optimized register allocation pipeline after register allocation is...
void addPostRegAlloc(AddMachinePass &) const
This method may be implemented by targets that want to run passes after register allocation pass pipe...
void addILPOpts(AddMachinePass &) const
Add passes that optimize instruction level parallelism for out-of-order targets.
void addPreSched2(AddMachinePass &) const
This method may be implemented by targets that want to run passes after prolog-epilog insertion and b...
void addPreRegAlloc(AddMachinePass &) const
This method may be implemented by targets that want to run passes immediately before register allocat...
void addOptimizedRegAlloc(AddMachinePass &) const
addOptimizedRegAlloc - Add passes related to register allocation.
void addMachineSSAOptimization(AddMachinePass &) const
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
Error addRegAssignmentOptimized(AddMachinePass &) const
void addCodeGenPrepare(AddIRPass &) const
Add pass to prepare the LLVM IR for code generation.
Error addInstSelector(AddMachinePass &) const
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
void addPreEmitPass(AddMachinePass &) const
This pass may be implemented by targets that want to run passes immediately before machine code is em...
void addIRPasses(AddIRPass &) const
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addPreISel(AddIRPass &) const
{{@ For GlobalISel
void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const
implements a set of functionality in the TargetMachine class for targets that make use of the indepen...
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:739
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:159
static ErrorSuccess success()
Create a success value.
Definition: Error.h:336
Tagged union holding either a T or a Error.
Definition: Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:320
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition: GVN.h:126
Pass to remove unused function declarations.
Definition: GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:37
Converts loops into loop-closed SSA form.
Definition: LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition: LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:52
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:77
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:110
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:503
LLVM_ABI void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel, ThinOrFullLTOPhase)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:523
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:417
void registerScalarOptimizerLateEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:462
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:613
LLVM_ABI void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:542
void registerVectorizerEndEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:484
LLVM_ABI void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:196
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:38
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Definition: RegBankSelect.h:91
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:282
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:584
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:585
Move instructions into successor blocks when possible.
Definition: Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:133
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:126
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:710
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:645
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:68
R Default(T Value)
Definition: StringSwitch.h:177
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:87
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isAMDGCN() const
Tests whether the target is AMDGCN.
Definition: Triple.h:901
LLVM Value Representation.
Definition: Value.h:75
bool use_empty() const
Definition: Value.h:346
int getNumOccurrences() const
Definition: CommandLine.h:400
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:435
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
This file defines the TargetMachine class.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:980
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:712
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:464
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition: LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:877
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition: LICM.cpp:384
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition: Pass.h:77
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition: Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:111
char & GCNDPPCombineLegacyID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition: GVN.cpp:3448
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:163
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:258
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:782
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false, bool UseBlockFrequencyInfo=false, bool UseBranchProbabilityInfo=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1946
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3638
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition: Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition: EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
StringMap< VRegInfo * > VRegInfosNamed
Definition: MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition: MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
Definition: PassManager.h:903
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.