23 AArch64::X3, AArch64::X4, AArch64::X5,
24 AArch64::X6, AArch64::X7};
26 AArch64::H3, AArch64::H4, AArch64::H5,
27 AArch64::H6, AArch64::H7};
29 AArch64::S3, AArch64::S4, AArch64::S5,
30 AArch64::S6, AArch64::S7};
32 AArch64::D3, AArch64::D4, AArch64::D5,
33 AArch64::D6, AArch64::D7};
35 AArch64::Q3, AArch64::Q4, AArch64::Q5,
36 AArch64::Q6, AArch64::Q7};
38 AArch64::Z3, AArch64::Z4, AArch64::Z5,
39 AArch64::Z6, AArch64::Z7};
48 State.getMachineFunction().getSubtarget());
63 bool ZRegsAllocated[8];
64 for (
int I = 0;
I < 8;
I++) {
65 ZRegsAllocated[
I] = State.isAllocated(
ZRegList[
I]);
69 bool PRegsAllocated[4];
70 for (
int I = 0;
I < 4;
I++) {
71 PRegsAllocated[
I] = State.isAllocated(
PRegList[
I]);
75 auto &It = PendingMembers[0];
81 ArgFlags, OrigTy, State))
90 for (
int I = 0;
I < 8;
I++)
91 if (!ZRegsAllocated[
I])
93 for (
int I = 0;
I < 4;
I++)
94 if (!PRegsAllocated[
I])
98 PendingMembers.
clear();
103 for (
auto &It : PendingMembers) {
104 It.convertToMem(State.AllocateStack(
Size, SlotAlign));
106 SlotAlign =
Align(1);
110 PendingMembers.
clear();
139 State.getMachineFunction().getSubtarget());
145 if (LocVT.
SimpleTy == MVT::i64 || (IsDarwinILP32 && LocVT.
SimpleTy == MVT::i32))
157 if (LocVT == MVT::nxv1i1 || LocVT == MVT::nxv2i1 || LocVT == MVT::nxv4i1 ||
158 LocVT == MVT::nxv8i1 || LocVT == MVT::nxv16i1 ||
159 LocVT == MVT::aarch64svcount)
180 unsigned EltsPerReg = (IsDarwinILP32 && LocVT.
SimpleTy == MVT::i32) ? 2 : 1;
182 RegList,
alignTo(PendingMembers.
size(), EltsPerReg) / EltsPerReg);
183 if (!RegResult.
empty() && EltsPerReg == 1) {
184 for (
const auto &[It,
Reg] :
zip(PendingMembers, RegResult)) {
185 It.convertToReg(
Reg);
188 PendingMembers.
clear();
190 }
else if (!RegResult.
empty()) {
191 assert(EltsPerReg == 2 &&
"unexpected ABI");
192 bool UseHigh =
false;
195 for (
auto &It : PendingMembers) {
198 RegResult[RegIdx], MVT::i64,
Info));
203 PendingMembers.clear();
209 for (
auto Reg : RegList)
210 State.AllocateReg(
Reg);
214 State.getMachineFunction().getDataLayout().getStackAlignment();
215 assert(StackAlign &&
"data layout string is missing stack alignment");
217 Align SlotAlign = std::min(MemAlign, *StackAlign);
219 SlotAlign = std::max(SlotAlign,
Align(8));
226#include "AArch64GenCallingConv.inc"
static bool finishStackBlock(SmallVectorImpl< CCValAssign > &PendingMembers, MVT LocVT, ISD::ArgFlagsTy &ArgFlags, CCState &State, Align SlotAlign)
static const MCPhysReg XRegList[]
static const MCPhysReg SRegList[]
static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Given an [N x Ty] block, it should be passed in a consecutive sequence of registers.
static const MCPhysReg ZRegList[]
static const MCPhysReg DRegList[]
static const MCPhysReg HRegList[]
static const MCPhysReg QRegList[]
static const MCPhysReg PRegList[]
static bool CC_AArch64_Custom_Stack_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
The Darwin variadic PCS places anonymous arguments in 8-byte stack slots.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Analysis containing CSE Info
bool isTargetDarwin() const
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool empty() const
empty - Check if the array is empty.
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
bool is32BitVector() const
Return true if this is a 32-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool is64BitVector() const
Return true if this is a 64-bit vector type.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
The instances of the Type class are immutable: once they are created, they are never changed.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
This struct is a compact representation of a valid (non-zero power of two) alignment.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
void setInConsecutiveRegs(bool Flag=true)
bool isInConsecutiveRegsLast() const
Align getNonZeroMemAlign() const
void setInConsecutiveRegsLast(bool Flag=true)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.