LLVM 22.0.0git
AArch64MachineFunctionInfo.h
Go to the documentation of this file.
1//=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AArch64-specific per-machine-function information.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15
16#include "AArch64Subtarget.h"
18#include "llvm/ADT/ArrayRef.h"
25#include "llvm/IR/Function.h"
27#include "llvm/MC/MCSymbol.h"
28#include <cassert>
29#include <optional>
30
31namespace llvm {
32
33namespace yaml {
35} // end namespace yaml
36
38class MachineInstr;
39
41 int FrameIndex = std::numeric_limits<int>::max();
42 unsigned Uses = 0;
43};
44
45/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
46/// contains private AArch64-specific information for each MachineFunction.
48 /// Number of bytes of arguments this function has on the stack. If the callee
49 /// is expected to restore the argument stack this should be a multiple of 16,
50 /// all usable during a tail call.
51 ///
52 /// The alternative would forbid tail call optimisation in some cases: if we
53 /// want to transfer control from a function with 8-bytes of stack-argument
54 /// space to a function with 16-bytes then misalignment of this value would
55 /// make a stack adjustment necessary, which could not be undone by the
56 /// callee.
57 unsigned BytesInStackArgArea = 0;
58
59 /// The number of bytes to restore to deallocate space for incoming
60 /// arguments. Canonically 0 in the C calling convention, but non-zero when
61 /// callee is expected to pop the args.
62 unsigned ArgumentStackToRestore = 0;
63
64 /// Space just below incoming stack pointer reserved for arguments being
65 /// passed on the stack during a tail call. This will be the difference
66 /// between the largest tail call argument space needed in this function and
67 /// what's already available by reusing space of incoming arguments.
68 unsigned TailCallReservedStack = 0;
69
70 /// HasStackFrame - True if this function has a stack frame. Set by
71 /// determineCalleeSaves().
72 bool HasStackFrame = false;
73
74 /// Amount of stack frame size, not including callee-saved registers.
75 uint64_t LocalStackSize = 0;
76
77 /// Amount of stack frame size used for saving callee-saved registers.
78 unsigned CalleeSavedStackSize = 0;
79 unsigned ZPRCalleeSavedStackSize = 0;
80 unsigned PPRCalleeSavedStackSize = 0;
81 bool HasCalleeSavedStackSize = false;
82 bool HasSVECalleeSavedStackSize = false;
83
84 /// Number of TLS accesses using the special (combinable)
85 /// _TLS_MODULE_BASE_ symbol.
86 unsigned NumLocalDynamicTLSAccesses = 0;
87
88 /// FrameIndex for start of varargs area for arguments passed on the
89 /// stack.
90 int VarArgsStackIndex = 0;
91
92 /// Offset of start of varargs area for arguments passed on the stack.
93 unsigned VarArgsStackOffset = 0;
94
95 /// FrameIndex for start of varargs area for arguments passed in
96 /// general purpose registers.
97 int VarArgsGPRIndex = 0;
98
99 /// Size of the varargs area for arguments passed in general purpose
100 /// registers.
101 unsigned VarArgsGPRSize = 0;
102
103 /// FrameIndex for start of varargs area for arguments passed in
104 /// floating-point registers.
105 int VarArgsFPRIndex = 0;
106
107 /// Size of the varargs area for arguments passed in floating-point
108 /// registers.
109 unsigned VarArgsFPRSize = 0;
110
111 /// The stack slots used to add space between FPR and GPR accesses when using
112 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
113 /// StackHazardSlotIndex is added between (sorted) stack objects.
114 int StackHazardSlotIndex = std::numeric_limits<int>::max();
115 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
116
117 /// True if this function has a subset of CSRs that is handled explicitly via
118 /// copies.
119 bool IsSplitCSR = false;
120
121 /// True when the stack gets realigned dynamically because the size of stack
122 /// frame is unknown at compile time. e.g., in case of VLAs.
123 bool StackRealigned = false;
124
125 /// True when the callee-save stack area has unused gaps that may be used for
126 /// other stack allocations.
127 bool CalleeSaveStackHasFreeSpace = false;
128
129 /// SRetReturnReg - sret lowering includes returning the value of the
130 /// returned struct in a register. This field holds the virtual register into
131 /// which the sret argument is passed.
132 Register SRetReturnReg;
133
134 /// SVE stack size (for predicates and data vectors) are maintained here
135 /// rather than in FrameInfo, as the placement and Stack IDs are target
136 /// specific.
137 uint64_t StackSizeZPR = 0;
138 uint64_t StackSizePPR = 0;
139
140 /// Are SVE objects (vectors and predicates) split into separate regions on
141 /// the stack.
142 bool SplitSVEObjects = false;
143
144 /// HasCalculatedStackSizeSVE indicates whether StackSizeZPR/PPR is valid.
145 bool HasCalculatedStackSizeSVE = false;
146
147 /// Has a value when it is known whether or not the function uses a
148 /// redzone, and no value otherwise.
149 /// Initialized during frame lowering, unless the function has the noredzone
150 /// attribute, in which case it is set to false at construction.
151 std::optional<bool> HasRedZone;
152
153 /// ForwardedMustTailRegParms - A list of virtual and physical registers
154 /// that must be forwarded to every musttail call.
155 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
156
157 /// FrameIndex for the tagged base pointer.
158 std::optional<int> TaggedBasePointerIndex;
159
160 /// Offset from SP-at-entry to the tagged base pointer.
161 /// Tagged base pointer is set up to point to the first (lowest address)
162 /// tagged stack slot.
163 unsigned TaggedBasePointerOffset;
164
165 /// OutliningStyle denotes, if a function was outined, how it was outlined,
166 /// e.g. Tail Call, Thunk, or Function if none apply.
167 std::optional<std::string> OutliningStyle;
168
169 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
170 // CalleeSavedStackSize) to the address of the frame record.
171 int CalleeSaveBaseToFrameRecordOffset = 0;
172
173 /// SignReturnAddress is true if PAC-RET is enabled for the function with
174 /// defaults being sign non-leaf functions only, with the B key.
175 bool SignReturnAddress = false;
176
177 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
178 /// functions as well.
179 bool SignReturnAddressAll = false;
180
181 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
182 bool SignWithBKey = false;
183
184 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
185 /// module containing the corresponding function has "ptrauth-elf-got" flag
186 /// set to 1.
187 bool HasELFSignedGOT = false;
188
189 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
190 /// within the prologue, so it can be re-used for authentication in the
191 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
192 MCSymbol *SignInstrLabel = nullptr;
193
194 /// BranchTargetEnforcement enables placing BTI instructions at potential
195 /// indirect branch destinations.
196 bool BranchTargetEnforcement = false;
197
198 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
199 /// This is set by -mbranch-protection and will emit NOP instructions unless
200 /// the subtarget feature +pauthlr is also used (in which case non-NOP
201 /// instructions are emitted).
202 bool BranchProtectionPAuthLR = false;
203
204 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
205 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
206 /// extended record.
207 bool HasSwiftAsyncContext = false;
208
209 /// The stack slot where the Swift asynchronous context is stored.
210 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
211
212 bool IsMTETagged = false;
213
214 /// The function has Scalable Vector or Scalable Predicate register argument
215 /// or return type
216 bool IsSVECC = false;
217
218 /// Whether this function changes streaming mode within the function.
219 bool HasStreamingModeChanges = false;
220
221 /// True if the function need unwind information.
222 mutable std::optional<bool> NeedsDwarfUnwindInfo;
223
224 /// True if the function need asynchronous unwind information.
225 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
226
227 int64_t StackProbeSize = 0;
228
229 // Holds a register containing pstate.sm. This is set
230 // on function entry to record the initial pstate of a function.
231 Register PStateSMReg = MCRegister::NoRegister;
232
233 // Has the PNReg used to build PTRUE instruction.
234 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
235 unsigned PredicateRegForFillSpill = 0;
236
237 // Holds the SME function attributes (streaming mode, ZA/ZT0 state).
238 SMEAttrs SMEFnAttrs;
239
240 // Holds the TPIDR2 block if allocated early (for Windows/stack probes
241 // support).
242 Register EarlyAllocSMESaveBuffer = AArch64::NoRegister;
243
244 // Holds the spill slot for ZT0.
245 int ZT0SpillSlotIndex = std::numeric_limits<int>::max();
246
247 // Note: The following properties are only used for the old SME ABI lowering:
248 /// The frame-index for the TPIDR2 object used for lazy saves.
249 TPIDR2Object TPIDR2;
250 // Holds a pointer to a buffer that is large enough to represent
251 // all SME ZA state and any additional state required by the
252 // __arm_sme_save/restore support routines.
253 Register SMESaveBufferAddr = MCRegister::NoRegister;
254 // true if SMESaveBufferAddr is used.
255 bool SMESaveBufferUsed = false;
256
257public:
259
263 const override;
264
266 EarlyAllocSMESaveBuffer = Ptr;
267 }
268
270 return EarlyAllocSMESaveBuffer;
271 }
272
273 void setZT0SpillSlotIndex(int FI) { ZT0SpillSlotIndex = FI; }
275 assert(hasZT0SpillSlotIndex() && "ZT0 spill slot index not set!");
276 return ZT0SpillSlotIndex;
277 }
278 bool hasZT0SpillSlotIndex() const {
279 return ZT0SpillSlotIndex != std::numeric_limits<int>::max();
280 }
281
282 // Old SME ABI lowering state getters/setters:
283 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
284 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
285 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
286 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
287 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
288
290 PredicateRegForFillSpill = Reg;
291 }
293 return PredicateRegForFillSpill;
294 }
295
296 Register getPStateSMReg() const { return PStateSMReg; };
297 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
298
299 bool isSVECC() const { return IsSVECC; };
300 void setIsSVECC(bool s) { IsSVECC = s; };
301
303
304 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
305 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
306
307 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
308 void setArgumentStackToRestore(unsigned bytes) {
309 ArgumentStackToRestore = bytes;
310 }
311
312 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
313 void setTailCallReservedStack(unsigned bytes) {
314 TailCallReservedStack = bytes;
315 }
316
318 StackSizeZPR = ZPR;
319 StackSizePPR = PPR;
320 HasCalculatedStackSizeSVE = true;
321 }
322
325 return StackSizeZPR;
326 }
329 return StackSizePPR;
330 }
331
332 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
333
334 bool hasSVEStackSize() const {
335 return getStackSizeZPR() > 0 || getStackSizePPR() > 0;
336 }
337
338 bool hasStackFrame() const { return HasStackFrame; }
339 void setHasStackFrame(bool s) { HasStackFrame = s; }
340
341 bool isStackRealigned() const { return StackRealigned; }
342 void setStackRealigned(bool s) { StackRealigned = s; }
344 return CalleeSaveStackHasFreeSpace;
345 }
347 CalleeSaveStackHasFreeSpace = s;
348 }
349 bool isSplitCSR() const { return IsSplitCSR; }
350 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
351
352 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
353 uint64_t getLocalStackSize() const { return LocalStackSize; }
354
355 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
356 std::optional<std::string> getOutliningStyle() const {
357 return OutliningStyle;
358 }
359
361 CalleeSavedStackSize = Size;
362 HasCalleeSavedStackSize = true;
363 }
364
365 // When CalleeSavedStackSize has not been set (for example when
366 // some MachineIR pass is run in isolation), then recalculate
367 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
368 // Note: This information can only be recalculated after PEI
369 // has assigned offsets to the callee save objects.
370 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
371 bool ValidateCalleeSavedStackSize = false;
372
373#ifndef NDEBUG
374 // Make sure the calculated size derived from the CalleeSavedInfo
375 // equals the cached size that was calculated elsewhere (e.g. in
376 // determineCalleeSaves).
377 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
378#endif
379
380 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
381 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
382 if (MFI.getCalleeSavedInfo().empty())
383 return 0;
384
385 int64_t MinOffset = std::numeric_limits<int64_t>::max();
386 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
387 for (const auto &Info : MFI.getCalleeSavedInfo()) {
388 int FrameIdx = Info.getFrameIdx();
389 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
390 continue;
391 int64_t Offset = MFI.getObjectOffset(FrameIdx);
392 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
393 MinOffset = std::min<int64_t>(Offset, MinOffset);
394 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
395 }
396
397 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
399 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
400 MinOffset = std::min<int64_t>(Offset, MinOffset);
401 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
402 }
403
404 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
405 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
406 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
407 MinOffset = std::min<int64_t>(Offset, MinOffset);
408 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
409 }
410
411 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
412 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
413 "Invalid size calculated for callee saves");
414 return Size;
415 }
416
418 }
419
420 unsigned getCalleeSavedStackSize() const {
421 assert(HasCalleeSavedStackSize &&
422 "CalleeSavedStackSize has not been calculated");
423 return CalleeSavedStackSize;
424 }
425
426 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
427 void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) {
428 ZPRCalleeSavedStackSize = ZPR;
429 PPRCalleeSavedStackSize = PPR;
430 HasSVECalleeSavedStackSize = true;
431 }
432 unsigned getZPRCalleeSavedStackSize() const {
433 assert(HasSVECalleeSavedStackSize &&
434 "ZPRCalleeSavedStackSize has not been calculated");
435 return ZPRCalleeSavedStackSize;
436 }
437 unsigned getPPRCalleeSavedStackSize() const {
438 assert(HasSVECalleeSavedStackSize &&
439 "PPRCalleeSavedStackSize has not been calculated");
440 return PPRCalleeSavedStackSize;
441 }
442
443 unsigned getSVECalleeSavedStackSize() const {
445 "ZPRs and PPRs are split. Use get[ZPR|PPR]CalleeSavedStackSize()");
447 }
448
449 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
451 return NumLocalDynamicTLSAccesses;
452 }
453
457
458 std::optional<bool> hasRedZone() const { return HasRedZone; }
459 void setHasRedZone(bool s) { HasRedZone = s; }
460
461 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
462 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
463
464 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
465 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
466
467 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
468 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
469
470 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
471 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
472
473 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
474 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
475
476 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
477 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
478
480 return StackHazardSlotIndex != std::numeric_limits<int>::max();
481 }
482 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
483 void setStackHazardSlotIndex(int Index) {
484 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
485 StackHazardSlotIndex = Index;
486 }
487 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
489 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
490 StackHazardCSRSlotIndex = Index;
491 }
492
493 bool hasSplitSVEObjects() const { return SplitSVEObjects; }
494 void setSplitSVEObjects(bool s) { SplitSVEObjects = s; }
495
496 bool hasSVE_AAPCS(const MachineFunction &MF) const {
497 return hasSplitSVEObjects() || isSVECC() ||
500 }
501
502 SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
503
504 unsigned getSRetReturnReg() const { return SRetReturnReg; }
505 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
506
507 unsigned getJumpTableEntrySize(int Idx) const {
508 return JumpTableEntryInfo[Idx].first;
509 }
511 return JumpTableEntryInfo[Idx].second;
512 }
513 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
514 if ((unsigned)Idx >= JumpTableEntryInfo.size())
515 JumpTableEntryInfo.resize(Idx+1);
516 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
517 }
518
520
521 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
522
523 // Shortcuts for LOH related types.
525 MCLOHType Kind;
526
527 /// Arguments of this directive. Order matters.
529
530 public:
532
534 : Kind(Kind), Args(Args.begin(), Args.end()) {
535 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
536 }
537
538 MCLOHType getKind() const { return Kind; }
539 LOHArgs getArgs() const { return Args; }
540 };
541
544
545 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
546
547 /// Add a LOH directive of this @p Kind and this @p Args.
549 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
550 LOHRelated.insert_range(Args);
551 }
552
553 size_t
555 size_t InitialSize = LOHContainerSet.size();
556 erase_if(LOHContainerSet, [&](const auto &D) {
557 return any_of(D.getArgs(), [&](auto *Arg) { return MIs.contains(Arg); });
558 });
559 // In theory there could be an LOH with one label in MIs and another label
560 // outside MIs, however we don't know if the label outside MIs is used in
561 // any other LOHs, so we can't remove them from LOHRelated. In that case, we
562 // might produce a few extra labels, but it won't break anything.
563 LOHRelated.remove_if([&](auto *MI) { return MIs.contains(MI); });
564 return InitialSize - LOHContainerSet.size();
565 };
566
568 return ForwardedMustTailRegParms;
569 }
570
571 std::optional<int> getTaggedBasePointerIndex() const {
572 return TaggedBasePointerIndex;
573 }
574 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
575
576 unsigned getTaggedBasePointerOffset() const {
577 return TaggedBasePointerOffset;
578 }
580 TaggedBasePointerOffset = Offset;
581 }
582
584 return CalleeSaveBaseToFrameRecordOffset;
585 }
587 CalleeSaveBaseToFrameRecordOffset = Offset;
588 }
589
590 bool shouldSignReturnAddress(const MachineFunction &MF) const;
591 bool shouldSignReturnAddress(bool SpillsLR) const;
592
594
595 bool shouldSignWithBKey() const { return SignWithBKey; }
596
597 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
598
599 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
600 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
601
602 bool isMTETagged() const { return IsMTETagged; }
603
604 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
605
606 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
607
608 void setHasSwiftAsyncContext(bool HasContext) {
609 HasSwiftAsyncContext = HasContext;
610 }
611 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
612
614 SwiftAsyncContextFrameIdx = FI;
615 }
616 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
617
618 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
619 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
620
621 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
622 void setHasStreamingModeChanges(bool HasChanges) {
623 HasStreamingModeChanges = HasChanges;
624 }
625
626 bool hasStackProbing() const { return StackProbeSize != 0; }
627
628 int64_t getStackProbeSize() const { return StackProbeSize; }
629
630private:
631 // Hold the lists of LOHs.
632 MILOHContainer LOHContainerSet;
633 SetOfInstructions LOHRelated;
634
635 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
636};
637
638namespace yaml {
640 std::optional<bool> HasRedZone;
641 std::optional<uint64_t> StackSizeZPR;
642 std::optional<uint64_t> StackSizePPR;
643 std::optional<bool> HasStackFrame;
644
647
648 void mappingImpl(yaml::IO &YamlIO) override;
650};
651
653 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
654 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
655 YamlIO.mapOptional("stackSizeZPR", MFI.StackSizeZPR);
656 YamlIO.mapOptional("stackSizePPR", MFI.StackSizePPR);
657 YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame);
658 }
659};
660
661} // end namespace yaml
662
663} // end namespace llvm
664
665#endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:55
Register Reg
Basic Register Allocator
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void addLOHDirective(MCLOHType Kind, MILOHArgs Args)
Add a LOH directive of this Kind and this Args.
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
void setVarArgsStackOffset(unsigned Offset)
void setTailCallReservedStack(unsigned bytes)
SmallVector< MILOHDirective, 32 > MILOHContainer
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setOutliningStyle(const std::string &Style)
const SetOfInstructions & getLOHRelated() const
void setBytesInStackArgArea(unsigned bytes)
void setSigningInstrLabel(MCSymbol *Label)
void setHasSwiftAsyncContext(bool HasContext)
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR)
std::optional< int > getTaggedBasePointerIndex() const
AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI)
unsigned getJumpTableEntrySize(int Idx) const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
size_t clearLinkerOptimizationHints(const SmallPtrSetImpl< MachineInstr * > &MIs)
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
SmallPtrSet< const MachineInstr *, 16 > SetOfInstructions
void setTaggedBasePointerOffset(unsigned Offset)
std::optional< bool > hasRedZone() const
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR)
std::optional< std::string > getOutliningStyle() const
void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI)
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
MachineFunctionInfo * clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF, const DenseMap< MachineBasicBlock *, MachineBasicBlock * > &Src2DstMBB) const override
Make a functionally equivalent copy of this MachineFunctionInfo in MF.
bool hasSVE_AAPCS(const MachineFunction &MF) const
void setArgumentStackToRestore(unsigned bytes)
void setHasStreamingModeChanges(bool HasChanges)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
static constexpr unsigned NoRegister
Definition MCRegister.h:52
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void mapOptional(StringRef Key, T &Val)
Definition YAMLTraits.h:799
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
static bool isValidMCLOHType(unsigned Kind)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
MCLOHType
Linker Optimization Hint Type.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2100
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
void mappingImpl(yaml::IO &YamlIO) override
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI)
This class should be specialized by any type that needs to be converted to/from a YAML mapping.
Definition YAMLTraits.h:62