LLVM 22.0.0git
AArch64MachineFunctionInfo.h
Go to the documentation of this file.
1//=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AArch64-specific per-machine-function information.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15
16#include "AArch64Subtarget.h"
18#include "llvm/ADT/ArrayRef.h"
25#include "llvm/IR/Function.h"
27#include "llvm/MC/MCSymbol.h"
28#include <cassert>
29#include <optional>
30
31namespace llvm {
32
33namespace yaml {
35} // end namespace yaml
36
38class MachineInstr;
39
41 int FrameIndex = std::numeric_limits<int>::max();
42 unsigned Uses = 0;
43};
44
45/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
46/// contains private AArch64-specific information for each MachineFunction.
48 /// Number of bytes of arguments this function has on the stack. If the callee
49 /// is expected to restore the argument stack this should be a multiple of 16,
50 /// all usable during a tail call.
51 ///
52 /// The alternative would forbid tail call optimisation in some cases: if we
53 /// want to transfer control from a function with 8-bytes of stack-argument
54 /// space to a function with 16-bytes then misalignment of this value would
55 /// make a stack adjustment necessary, which could not be undone by the
56 /// callee.
57 unsigned BytesInStackArgArea = 0;
58
59 /// The number of bytes to restore to deallocate space for incoming
60 /// arguments. Canonically 0 in the C calling convention, but non-zero when
61 /// callee is expected to pop the args.
62 unsigned ArgumentStackToRestore = 0;
63
64 /// Space just below incoming stack pointer reserved for arguments being
65 /// passed on the stack during a tail call. This will be the difference
66 /// between the largest tail call argument space needed in this function and
67 /// what's already available by reusing space of incoming arguments.
68 unsigned TailCallReservedStack = 0;
69
70 /// HasStackFrame - True if this function has a stack frame. Set by
71 /// determineCalleeSaves().
72 bool HasStackFrame = false;
73
74 /// Amount of stack frame size, not including callee-saved registers.
75 uint64_t LocalStackSize = 0;
76
77 /// The start and end frame indices for the SVE callee saves.
78 int MinSVECSFrameIndex = 0;
79 int MaxSVECSFrameIndex = 0;
80
81 /// Amount of stack frame size used for saving callee-saved registers.
82 unsigned CalleeSavedStackSize = 0;
83 unsigned SVECalleeSavedStackSize = 0;
84 bool HasCalleeSavedStackSize = false;
85 bool HasSVECalleeSavedStackSize = false;
86
87 /// Number of TLS accesses using the special (combinable)
88 /// _TLS_MODULE_BASE_ symbol.
89 unsigned NumLocalDynamicTLSAccesses = 0;
90
91 /// FrameIndex for start of varargs area for arguments passed on the
92 /// stack.
93 int VarArgsStackIndex = 0;
94
95 /// Offset of start of varargs area for arguments passed on the stack.
96 unsigned VarArgsStackOffset = 0;
97
98 /// FrameIndex for start of varargs area for arguments passed in
99 /// general purpose registers.
100 int VarArgsGPRIndex = 0;
101
102 /// Size of the varargs area for arguments passed in general purpose
103 /// registers.
104 unsigned VarArgsGPRSize = 0;
105
106 /// FrameIndex for start of varargs area for arguments passed in
107 /// floating-point registers.
108 int VarArgsFPRIndex = 0;
109
110 /// Size of the varargs area for arguments passed in floating-point
111 /// registers.
112 unsigned VarArgsFPRSize = 0;
113
114 /// The stack slots used to add space between FPR and GPR accesses when using
115 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
116 /// StackHazardSlotIndex is added between (sorted) stack objects.
117 int StackHazardSlotIndex = std::numeric_limits<int>::max();
118 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
119
120 /// True if this function has a subset of CSRs that is handled explicitly via
121 /// copies.
122 bool IsSplitCSR = false;
123
124 /// True when the stack gets realigned dynamically because the size of stack
125 /// frame is unknown at compile time. e.g., in case of VLAs.
126 bool StackRealigned = false;
127
128 /// True when the callee-save stack area has unused gaps that may be used for
129 /// other stack allocations.
130 bool CalleeSaveStackHasFreeSpace = false;
131
132 /// SRetReturnReg - sret lowering includes returning the value of the
133 /// returned struct in a register. This field holds the virtual register into
134 /// which the sret argument is passed.
135 Register SRetReturnReg;
136
137 /// SVE stack size (for predicates and data vectors) are maintained here
138 /// rather than in FrameInfo, as the placement and Stack IDs are target
139 /// specific.
140 uint64_t StackSizeSVE = 0;
141
142 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
143 bool HasCalculatedStackSizeSVE = false;
144
145 /// Has a value when it is known whether or not the function uses a
146 /// redzone, and no value otherwise.
147 /// Initialized during frame lowering, unless the function has the noredzone
148 /// attribute, in which case it is set to false at construction.
149 std::optional<bool> HasRedZone;
150
151 /// ForwardedMustTailRegParms - A list of virtual and physical registers
152 /// that must be forwarded to every musttail call.
153 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
154
155 /// FrameIndex for the tagged base pointer.
156 std::optional<int> TaggedBasePointerIndex;
157
158 /// Offset from SP-at-entry to the tagged base pointer.
159 /// Tagged base pointer is set up to point to the first (lowest address)
160 /// tagged stack slot.
161 unsigned TaggedBasePointerOffset;
162
163 /// OutliningStyle denotes, if a function was outined, how it was outlined,
164 /// e.g. Tail Call, Thunk, or Function if none apply.
165 std::optional<std::string> OutliningStyle;
166
167 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
168 // CalleeSavedStackSize) to the address of the frame record.
169 int CalleeSaveBaseToFrameRecordOffset = 0;
170
171 /// SignReturnAddress is true if PAC-RET is enabled for the function with
172 /// defaults being sign non-leaf functions only, with the B key.
173 bool SignReturnAddress = false;
174
175 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
176 /// functions as well.
177 bool SignReturnAddressAll = false;
178
179 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
180 bool SignWithBKey = false;
181
182 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
183 /// module containing the corresponding function has "ptrauth-elf-got" flag
184 /// set to 1.
185 bool HasELFSignedGOT = false;
186
187 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
188 /// within the prologue, so it can be re-used for authentication in the
189 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
190 MCSymbol *SignInstrLabel = nullptr;
191
192 /// BranchTargetEnforcement enables placing BTI instructions at potential
193 /// indirect branch destinations.
194 bool BranchTargetEnforcement = false;
195
196 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
197 /// This is set by -mbranch-protection and will emit NOP instructions unless
198 /// the subtarget feature +pauthlr is also used (in which case non-NOP
199 /// instructions are emitted).
200 bool BranchProtectionPAuthLR = false;
201
202 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
203 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
204 /// extended record.
205 bool HasSwiftAsyncContext = false;
206
207 /// The stack slot where the Swift asynchronous context is stored.
208 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
209
210 bool IsMTETagged = false;
211
212 /// The function has Scalable Vector or Scalable Predicate register argument
213 /// or return type
214 bool IsSVECC = false;
215
216 /// Whether this function changes streaming mode within the function.
217 bool HasStreamingModeChanges = false;
218
219 /// True if the function need unwind information.
220 mutable std::optional<bool> NeedsDwarfUnwindInfo;
221
222 /// True if the function need asynchronous unwind information.
223 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
224
225 int64_t StackProbeSize = 0;
226
227 // Holds a register containing pstate.sm. This is set
228 // on function entry to record the initial pstate of a function.
229 Register PStateSMReg = MCRegister::NoRegister;
230
231 // Has the PNReg used to build PTRUE instruction.
232 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
233 unsigned PredicateRegForFillSpill = 0;
234
235 // Holds the SME function attributes (streaming mode, ZA/ZT0 state).
236 SMEAttrs SMEFnAttrs;
237
238 // Holds the TPIDR2 block if allocated early (for Windows/stack probes
239 // support).
240 Register EarlyAllocSMESaveBuffer = AArch64::NoRegister;
241
242 // Holds the spill slot for ZT0.
243 int ZT0SpillSlotIndex = std::numeric_limits<int>::max();
244
245 // Note: The following properties are only used for the old SME ABI lowering:
246 /// The frame-index for the TPIDR2 object used for lazy saves.
247 TPIDR2Object TPIDR2;
248 // Holds a pointer to a buffer that is large enough to represent
249 // all SME ZA state and any additional state required by the
250 // __arm_sme_save/restore support routines.
251 Register SMESaveBufferAddr = MCRegister::NoRegister;
252 // true if SMESaveBufferAddr is used.
253 bool SMESaveBufferUsed = false;
254
255public:
257
261 const override;
262
264 EarlyAllocSMESaveBuffer = Ptr;
265 }
266
268 return EarlyAllocSMESaveBuffer;
269 }
270
271 void setZT0SpillSlotIndex(int FI) { ZT0SpillSlotIndex = FI; }
273 assert(hasZT0SpillSlotIndex() && "ZT0 spill slot index not set!");
274 return ZT0SpillSlotIndex;
275 }
276 bool hasZT0SpillSlotIndex() const {
277 return ZT0SpillSlotIndex != std::numeric_limits<int>::max();
278 }
279
280 // Old SME ABI lowering state getters/setters:
281 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
282 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
283 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
284 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
285 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
286
288 PredicateRegForFillSpill = Reg;
289 }
291 return PredicateRegForFillSpill;
292 }
293
294 Register getPStateSMReg() const { return PStateSMReg; };
295 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
296
297 bool isSVECC() const { return IsSVECC; };
298 void setIsSVECC(bool s) { IsSVECC = s; };
299
301
302 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
303 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
304
305 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
306 void setArgumentStackToRestore(unsigned bytes) {
307 ArgumentStackToRestore = bytes;
308 }
309
310 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
311 void setTailCallReservedStack(unsigned bytes) {
312 TailCallReservedStack = bytes;
313 }
314
315 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
316
318 HasCalculatedStackSizeSVE = true;
319 StackSizeSVE = S;
320 }
321
324 return StackSizeSVE;
325 }
326
327 bool hasStackFrame() const { return HasStackFrame; }
328 void setHasStackFrame(bool s) { HasStackFrame = s; }
329
330 bool isStackRealigned() const { return StackRealigned; }
331 void setStackRealigned(bool s) { StackRealigned = s; }
332
334 return CalleeSaveStackHasFreeSpace;
335 }
337 CalleeSaveStackHasFreeSpace = s;
338 }
339 bool isSplitCSR() const { return IsSplitCSR; }
340 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
341
342 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
343 uint64_t getLocalStackSize() const { return LocalStackSize; }
344
345 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
346 std::optional<std::string> getOutliningStyle() const {
347 return OutliningStyle;
348 }
349
351 CalleeSavedStackSize = Size;
352 HasCalleeSavedStackSize = true;
353 }
354
355 // When CalleeSavedStackSize has not been set (for example when
356 // some MachineIR pass is run in isolation), then recalculate
357 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
358 // Note: This information can only be recalculated after PEI
359 // has assigned offsets to the callee save objects.
360 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
361 bool ValidateCalleeSavedStackSize = false;
362
363#ifndef NDEBUG
364 // Make sure the calculated size derived from the CalleeSavedInfo
365 // equals the cached size that was calculated elsewhere (e.g. in
366 // determineCalleeSaves).
367 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
368#endif
369
370 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
371 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
372 if (MFI.getCalleeSavedInfo().empty())
373 return 0;
374
375 int64_t MinOffset = std::numeric_limits<int64_t>::max();
376 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
377 for (const auto &Info : MFI.getCalleeSavedInfo()) {
378 int FrameIdx = Info.getFrameIdx();
379 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
380 continue;
381 int64_t Offset = MFI.getObjectOffset(FrameIdx);
382 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
383 MinOffset = std::min<int64_t>(Offset, MinOffset);
384 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
385 }
386
387 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
389 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
390 MinOffset = std::min<int64_t>(Offset, MinOffset);
391 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
392 }
393
394 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
395 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
396 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
397 MinOffset = std::min<int64_t>(Offset, MinOffset);
398 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
399 }
400
401 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
402 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
403 "Invalid size calculated for callee saves");
404 return Size;
405 }
406
408 }
409
410 unsigned getCalleeSavedStackSize() const {
411 assert(HasCalleeSavedStackSize &&
412 "CalleeSavedStackSize has not been calculated");
413 return CalleeSavedStackSize;
414 }
415
416 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
418 SVECalleeSavedStackSize = Size;
419 HasSVECalleeSavedStackSize = true;
420 }
421 unsigned getSVECalleeSavedStackSize() const {
422 assert(HasSVECalleeSavedStackSize &&
423 "SVECalleeSavedStackSize has not been calculated");
424 return SVECalleeSavedStackSize;
425 }
426
427 void setMinMaxSVECSFrameIndex(int Min, int Max) {
428 MinSVECSFrameIndex = Min;
429 MaxSVECSFrameIndex = Max;
430 }
431
432 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
433 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
434
435 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
437 return NumLocalDynamicTLSAccesses;
438 }
439
440 std::optional<bool> hasRedZone() const { return HasRedZone; }
441 void setHasRedZone(bool s) { HasRedZone = s; }
442
443 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
444 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
445
446 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
447 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
448
449 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
450 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
451
452 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
453 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
454
455 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
456 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
457
458 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
459 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
460
462 return StackHazardSlotIndex != std::numeric_limits<int>::max();
463 }
464 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
465 void setStackHazardSlotIndex(int Index) {
466 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
467 StackHazardSlotIndex = Index;
468 }
469 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
471 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
472 StackHazardCSRSlotIndex = Index;
473 }
474
475 SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
476
477 unsigned getSRetReturnReg() const { return SRetReturnReg; }
478 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
479
480 unsigned getJumpTableEntrySize(int Idx) const {
481 return JumpTableEntryInfo[Idx].first;
482 }
484 return JumpTableEntryInfo[Idx].second;
485 }
486 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
487 if ((unsigned)Idx >= JumpTableEntryInfo.size())
488 JumpTableEntryInfo.resize(Idx+1);
489 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
490 }
491
493
494 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
495
496 // Shortcuts for LOH related types.
498 MCLOHType Kind;
499
500 /// Arguments of this directive. Order matters.
502
503 public:
505
507 : Kind(Kind), Args(Args.begin(), Args.end()) {
508 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
509 }
510
511 MCLOHType getKind() const { return Kind; }
512 LOHArgs getArgs() const { return Args; }
513 };
514
517
518 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
519
520 /// Add a LOH directive of this @p Kind and this @p Args.
522 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
523 LOHRelated.insert_range(Args);
524 }
525
526 size_t
528 size_t InitialSize = LOHContainerSet.size();
529 erase_if(LOHContainerSet, [&](const auto &D) {
530 return any_of(D.getArgs(), [&](auto *Arg) { return MIs.contains(Arg); });
531 });
532 // In theory there could be an LOH with one label in MIs and another label
533 // outside MIs, however we don't know if the label outside MIs is used in
534 // any other LOHs, so we can't remove them from LOHRelated. In that case, we
535 // might produce a few extra labels, but it won't break anything.
536 LOHRelated.remove_if([&](auto *MI) { return MIs.contains(MI); });
537 return InitialSize - LOHContainerSet.size();
538 };
539
541 return ForwardedMustTailRegParms;
542 }
543
544 std::optional<int> getTaggedBasePointerIndex() const {
545 return TaggedBasePointerIndex;
546 }
547 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
548
549 unsigned getTaggedBasePointerOffset() const {
550 return TaggedBasePointerOffset;
551 }
553 TaggedBasePointerOffset = Offset;
554 }
555
557 return CalleeSaveBaseToFrameRecordOffset;
558 }
560 CalleeSaveBaseToFrameRecordOffset = Offset;
561 }
562
563 bool shouldSignReturnAddress(const MachineFunction &MF) const;
564 bool shouldSignReturnAddress(bool SpillsLR) const;
565
567
568 bool shouldSignWithBKey() const { return SignWithBKey; }
569
570 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
571
572 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
573 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
574
575 bool isMTETagged() const { return IsMTETagged; }
576
577 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
578
579 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
580
581 void setHasSwiftAsyncContext(bool HasContext) {
582 HasSwiftAsyncContext = HasContext;
583 }
584 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
585
587 SwiftAsyncContextFrameIdx = FI;
588 }
589 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
590
591 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
592 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
593
594 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
595 void setHasStreamingModeChanges(bool HasChanges) {
596 HasStreamingModeChanges = HasChanges;
597 }
598
599 bool hasStackProbing() const { return StackProbeSize != 0; }
600
601 int64_t getStackProbeSize() const { return StackProbeSize; }
602
603private:
604 // Hold the lists of LOHs.
605 MILOHContainer LOHContainerSet;
606 SetOfInstructions LOHRelated;
607
608 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
609};
610
611namespace yaml {
613 std::optional<bool> HasRedZone;
614 std::optional<uint64_t> StackSizeSVE;
615 std::optional<bool> HasStackFrame;
616
619
620 void mappingImpl(yaml::IO &YamlIO) override;
622};
623
625 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
626 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
627 YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE);
628 YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame);
629 }
630};
631
632} // end namespace yaml
633
634} // end namespace llvm
635
636#endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:55
Register Reg
Basic Register Allocator
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void addLOHDirective(MCLOHType Kind, MILOHArgs Args)
Add a LOH directive of this Kind and this Args.
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
void setVarArgsStackOffset(unsigned Offset)
void setTailCallReservedStack(unsigned bytes)
SmallVector< MILOHDirective, 32 > MILOHContainer
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setOutliningStyle(const std::string &Style)
const SetOfInstructions & getLOHRelated() const
void setBytesInStackArgArea(unsigned bytes)
void setSigningInstrLabel(MCSymbol *Label)
void setHasSwiftAsyncContext(bool HasContext)
std::optional< int > getTaggedBasePointerIndex() const
AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI)
unsigned getJumpTableEntrySize(int Idx) const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
size_t clearLinkerOptimizationHints(const SmallPtrSetImpl< MachineInstr * > &MIs)
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
SmallPtrSet< const MachineInstr *, 16 > SetOfInstructions
void setTaggedBasePointerOffset(unsigned Offset)
std::optional< bool > hasRedZone() const
void setSVECalleeSavedStackSize(unsigned Size)
std::optional< std::string > getOutliningStyle() const
void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI)
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
MachineFunctionInfo * clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF, const DenseMap< MachineBasicBlock *, MachineBasicBlock * > &Src2DstMBB) const override
Make a functionally equivalent copy of this MachineFunctionInfo in MF.
void setArgumentStackToRestore(unsigned bytes)
void setMinMaxSVECSFrameIndex(int Min, int Max)
void setHasStreamingModeChanges(bool HasChanges)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static constexpr unsigned NoRegister
Definition MCRegister.h:52
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Representation of each machine instruction.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void mapOptional(const char *Key, T &Val)
Definition YAMLTraits.h:800
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
static bool isValidMCLOHType(unsigned Kind)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
MCLOHType
Linker Optimization Hint Type.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2100
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
void mappingImpl(yaml::IO &YamlIO) override
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI)
This class should be specialized by any type that needs to be converted to/from a YAML mapping.
Definition YAMLTraits.h:62