LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/Module.h"
39using namespace llvm;
40
41
42//===----------------------------------------------------------------------===//
43// Calling Convention Implementation
44//===----------------------------------------------------------------------===//
45
46static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
47 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
48 ISD::ArgFlagsTy &ArgFlags, CCState &State)
49{
50 assert (ArgFlags.isSRet());
51
52 // Assign SRet argument.
53 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
54 0,
55 LocVT, LocInfo));
56 return true;
57}
58
59static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
60 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
61 ISD::ArgFlagsTy &ArgFlags, CCState &State)
62{
63 static const MCPhysReg RegList[] = {
64 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
65 };
66 // Try to get first reg.
67 if (Register Reg = State.AllocateReg(RegList)) {
68 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
69 } else {
70 // Assign whole thing in stack.
71 State.addLoc(CCValAssign::getCustomMem(
72 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
73 return true;
74 }
75
76 // Try to get second reg.
77 if (Register Reg = State.AllocateReg(RegList))
78 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
79 else
80 State.addLoc(CCValAssign::getCustomMem(
81 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
82 return true;
83}
84
85static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
86 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
87 ISD::ArgFlagsTy &ArgFlags, CCState &State)
88{
89 static const MCPhysReg RegList[] = {
90 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
91 };
92
93 // Try to get first reg.
94 if (Register Reg = State.AllocateReg(RegList))
95 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
96 else
97 return false;
98
99 // Try to get second reg.
100 if (Register Reg = State.AllocateReg(RegList))
101 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
102 else
103 return false;
104
105 return true;
106}
107
108// Allocate a full-sized argument for the 64-bit ABI.
109static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
110 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
111 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
112 assert((LocVT == MVT::f32 || LocVT == MVT::f128
113 || LocVT.getSizeInBits() == 64) &&
114 "Can't handle non-64 bits locations");
115
116 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
117 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
118 Align alignment =
119 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
120 unsigned Offset = State.AllocateStack(size, alignment);
121 unsigned Reg = 0;
122
123 if (LocVT == MVT::i64 && Offset < 6*8)
124 // Promote integers to %i0-%i5.
125 Reg = SP::I0 + Offset/8;
126 else if (LocVT == MVT::f64 && Offset < 16*8)
127 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
128 Reg = SP::D0 + Offset/8;
129 else if (LocVT == MVT::f32 && Offset < 16*8)
130 // Promote floats to %f1, %f3, ...
131 Reg = SP::F1 + Offset/4;
132 else if (LocVT == MVT::f128 && Offset < 16*8)
133 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
134 Reg = SP::Q0 + Offset/16;
135
136 // Promote to register when possible, otherwise use the stack slot.
137 if (Reg) {
138 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
139 return true;
140 }
141
142 // Bail out if this is a return CC and we run out of registers to place
143 // values into.
144 if (IsReturn)
145 return false;
146
147 // This argument goes on the stack in an 8-byte slot.
148 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
149 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
150 if (LocVT == MVT::f32)
151 Offset += 4;
152
153 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
154 return true;
155}
156
157// Allocate a half-sized argument for the 64-bit ABI.
158//
159// This is used when passing { float, int } structs by value in registers.
160static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
161 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
162 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
163 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
164 unsigned Offset = State.AllocateStack(4, Align(4));
165
166 if (LocVT == MVT::f32 && Offset < 16*8) {
167 // Promote floats to %f0-%f31.
168 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
169 LocVT, LocInfo));
170 return true;
171 }
172
173 if (LocVT == MVT::i32 && Offset < 6*8) {
174 // Promote integers to %i0-%i5, using half the register.
175 unsigned Reg = SP::I0 + Offset/8;
176 LocVT = MVT::i64;
177 LocInfo = CCValAssign::AExt;
178
179 // Set the Custom bit if this i32 goes in the high bits of a register.
180 if (Offset % 8 == 0)
181 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
182 LocVT, LocInfo));
183 else
184 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
185 return true;
186 }
187
188 // Bail out if this is a return CC and we run out of registers to place
189 // values into.
190 if (IsReturn)
191 return false;
192
193 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
194 return true;
195}
196
197static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
198 CCValAssign::LocInfo &LocInfo,
199 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
200 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
201 State);
202}
203
204static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
205 CCValAssign::LocInfo &LocInfo,
206 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
207 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
208 State);
209}
210
211static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
212 CCValAssign::LocInfo &LocInfo,
213 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
214 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
215 State);
216}
217
218static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
219 CCValAssign::LocInfo &LocInfo,
220 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
221 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
222 State);
223}
224
225#include "SparcGenCallingConv.inc"
226
227// The calling conventions in SparcCallingConv.td are described in terms of the
228// callee's register window. This function translates registers to the
229// corresponding caller window %o register.
230static unsigned toCallerWindow(unsigned Reg) {
231 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
232 "Unexpected enum");
233 if (Reg >= SP::I0 && Reg <= SP::I7)
234 return Reg - SP::I0 + SP::O0;
235 return Reg;
236}
237
239 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
240 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
241 const Type *RetTy) const {
243 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
244 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
245 : RetCC_Sparc32);
246}
247
250 bool IsVarArg,
252 const SmallVectorImpl<SDValue> &OutVals,
253 const SDLoc &DL, SelectionDAG &DAG) const {
254 if (Subtarget->is64Bit())
255 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
256 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257}
258
261 bool IsVarArg,
263 const SmallVectorImpl<SDValue> &OutVals,
264 const SDLoc &DL, SelectionDAG &DAG) const {
266
267 // CCValAssign - represent the assignment of the return value to locations.
269
270 // CCState - Info about the registers and stack slot.
271 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
272 *DAG.getContext());
273
274 // Analyze return values.
275 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
276
277 SDValue Glue;
278 SmallVector<SDValue, 4> RetOps(1, Chain);
279 // Make room for the return address offset.
280 RetOps.push_back(SDValue());
281
282 // Copy the result values into the output registers.
283 for (unsigned i = 0, realRVLocIdx = 0;
284 i != RVLocs.size();
285 ++i, ++realRVLocIdx) {
286 CCValAssign &VA = RVLocs[i];
287 assert(VA.isRegLoc() && "Can only return in registers!");
288
289 SDValue Arg = OutVals[realRVLocIdx];
290
291 if (VA.needsCustom()) {
292 assert(VA.getLocVT() == MVT::v2i32);
293 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
294 // happen by default if this wasn't a legal type)
295
296 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
297 Arg,
299 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
300 Arg,
302
303 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
304 Glue = Chain.getValue(1);
305 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
306 VA = RVLocs[++i]; // skip ahead to next loc
307 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
308 Glue);
309 } else
310 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
311
312 // Guarantee that all emitted copies are stuck together with flags.
313 Glue = Chain.getValue(1);
314 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
315 }
316
317 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
318 // If the function returns a struct, copy the SRetReturnReg to I0
319 if (MF.getFunction().hasStructRetAttr()) {
321 Register Reg = SFI->getSRetReturnReg();
322 if (!Reg)
323 llvm_unreachable("sret virtual register not created in the entry block");
324 auto PtrVT = getPointerTy(DAG.getDataLayout());
325 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
326 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
327 Glue = Chain.getValue(1);
328 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
329 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
330 }
331
332 RetOps[0] = Chain; // Update chain.
333 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
334
335 // Add the glue if we have it.
336 if (Glue.getNode())
337 RetOps.push_back(Glue);
338
339 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
340}
341
342// Lower return values for the 64-bit ABI.
343// Return values are passed the exactly the same way as function arguments.
346 bool IsVarArg,
348 const SmallVectorImpl<SDValue> &OutVals,
349 const SDLoc &DL, SelectionDAG &DAG) const {
350 // CCValAssign - represent the assignment of the return value to locations.
352
353 // CCState - Info about the registers and stack slot.
354 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
355 *DAG.getContext());
356
357 // Analyze return values.
358 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
359
360 SDValue Glue;
361 SmallVector<SDValue, 4> RetOps(1, Chain);
362
363 // The second operand on the return instruction is the return address offset.
364 // The return address is always %i7+8 with the 64-bit ABI.
365 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
366
367 // Copy the result values into the output registers.
368 for (unsigned i = 0; i != RVLocs.size(); ++i) {
369 CCValAssign &VA = RVLocs[i];
370 assert(VA.isRegLoc() && "Can only return in registers!");
371 SDValue OutVal = OutVals[i];
372
373 // Integer return values must be sign or zero extended by the callee.
374 switch (VA.getLocInfo()) {
375 case CCValAssign::Full: break;
377 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
378 break;
380 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
381 break;
383 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
384 break;
385 default:
386 llvm_unreachable("Unknown loc info!");
387 }
388
389 // The custom bit on an i32 return value indicates that it should be passed
390 // in the high bits of the register.
391 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
392 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
393 DAG.getConstant(32, DL, MVT::i32));
394
395 // The next value may go in the low bits of the same register.
396 // Handle both at once.
397 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
398 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
399 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
400 // Skip the next value, it's already done.
401 ++i;
402 }
403 }
404
405 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
406
407 // Guarantee that all emitted copies are stuck together with flags.
408 Glue = Chain.getValue(1);
409 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
410 }
411
412 RetOps[0] = Chain; // Update chain.
413
414 // Add the flag if we have it.
415 if (Glue.getNode())
416 RetOps.push_back(Glue);
417
418 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
419}
420
422 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
423 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
424 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
425 if (Subtarget->is64Bit())
426 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
427 DL, DAG, InVals);
428 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
429 DL, DAG, InVals);
430}
431
432/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
433/// passed in either one or two GPRs, including FP values. TODO: we should
434/// pass FP values in FP registers for fastcc functions.
436 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
437 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
438 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
440 MachineRegisterInfo &RegInfo = MF.getRegInfo();
442
443 // Assign locations to all of the incoming arguments.
445 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
446 *DAG.getContext());
447 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
448
449 const unsigned StackOffset = 92;
450 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
451
452 unsigned InIdx = 0;
453 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
454 CCValAssign &VA = ArgLocs[i];
455
456 if (Ins[InIdx].Flags.isSRet()) {
457 if (InIdx != 0)
458 report_fatal_error("sparc only supports sret on the first parameter");
459 // Get SRet from [%fp+64].
460 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
461 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
462 SDValue Arg =
463 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
464 InVals.push_back(Arg);
465 continue;
466 }
467
468 if (VA.isRegLoc()) {
469 if (VA.needsCustom()) {
470 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
471
472 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
473 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
474 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
475
476 assert(i+1 < e);
477 CCValAssign &NextVA = ArgLocs[++i];
478
479 SDValue LoVal;
480 if (NextVA.isMemLoc()) {
481 int FrameIdx = MF.getFrameInfo().
482 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
483 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
484 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
485 } else {
486 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
487 &SP::IntRegsRegClass);
488 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
489 }
490
491 if (IsLittleEndian)
492 std::swap(LoVal, HiVal);
493
494 SDValue WholeValue =
495 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
496 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
497 InVals.push_back(WholeValue);
498 continue;
499 }
500 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
501 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
502 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
503 if (VA.getLocVT() == MVT::f32)
504 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
505 else if (VA.getLocVT() != MVT::i32) {
506 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
507 DAG.getValueType(VA.getLocVT()));
508 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
509 }
510 InVals.push_back(Arg);
511 continue;
512 }
513
514 assert(VA.isMemLoc());
515
516 unsigned Offset = VA.getLocMemOffset()+StackOffset;
517 auto PtrVT = getPointerTy(DAG.getDataLayout());
518
519 if (VA.needsCustom()) {
520 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
521 // If it is double-word aligned, just load.
522 if (Offset % 8 == 0) {
523 int FI = MF.getFrameInfo().CreateFixedObject(8,
524 Offset,
525 true);
526 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
527 SDValue Load =
528 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
529 InVals.push_back(Load);
530 continue;
531 }
532
533 int FI = MF.getFrameInfo().CreateFixedObject(4,
534 Offset,
535 true);
536 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
537 SDValue HiVal =
538 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
539 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
540 Offset+4,
541 true);
542 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
543
544 SDValue LoVal =
545 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
546
547 if (IsLittleEndian)
548 std::swap(LoVal, HiVal);
549
550 SDValue WholeValue =
551 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
552 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
553 InVals.push_back(WholeValue);
554 continue;
555 }
556
557 int FI = MF.getFrameInfo().CreateFixedObject(4,
558 Offset,
559 true);
560 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
561 SDValue Load ;
562 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
563 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
564 } else if (VA.getValVT() == MVT::f128) {
565 report_fatal_error("SPARCv8 does not handle f128 in calls; "
566 "pass indirectly");
567 } else {
568 // We shouldn't see any other value types here.
569 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
570 }
571 InVals.push_back(Load);
572 }
573
574 if (MF.getFunction().hasStructRetAttr()) {
575 // Copy the SRet Argument to SRetReturnReg.
577 Register Reg = SFI->getSRetReturnReg();
578 if (!Reg) {
579 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
580 SFI->setSRetReturnReg(Reg);
581 }
582 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
583 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
584 }
585
586 // Store remaining ArgRegs to the stack if this is a varargs function.
587 if (isVarArg) {
588 static const MCPhysReg ArgRegs[] = {
589 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
590 };
591 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
592 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
593 unsigned ArgOffset = CCInfo.getStackSize();
594 if (NumAllocated == 6)
595 ArgOffset += StackOffset;
596 else {
597 assert(!ArgOffset);
598 ArgOffset = 68+4*NumAllocated;
599 }
600
601 // Remember the vararg offset for the va_start implementation.
602 FuncInfo->setVarArgsFrameOffset(ArgOffset);
603
604 std::vector<SDValue> OutChains;
605
606 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
607 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
608 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
609 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
610
611 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
612 true);
613 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
614
615 OutChains.push_back(
616 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
617 ArgOffset += 4;
618 }
619
620 if (!OutChains.empty()) {
621 OutChains.push_back(Chain);
622 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
623 }
624 }
625
626 return Chain;
627}
628
629// Lower formal arguments for the 64 bit ABI.
631 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
632 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
633 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
635
636 // Analyze arguments according to CC_Sparc64.
638 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
639 *DAG.getContext());
640 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
641
642 // The argument array begins at %fp+BIAS+128, after the register save area.
643 const unsigned ArgArea = 128;
644
645 for (const CCValAssign &VA : ArgLocs) {
646 if (VA.isRegLoc()) {
647 // This argument is passed in a register.
648 // All integer register arguments are promoted by the caller to i64.
649
650 // Create a virtual register for the promoted live-in value.
651 Register VReg = MF.addLiveIn(VA.getLocReg(),
652 getRegClassFor(VA.getLocVT()));
653 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
654
655 // Get the high bits for i32 struct elements.
656 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
657 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
658 DAG.getConstant(32, DL, MVT::i32));
659
660 // The caller promoted the argument, so insert an Assert?ext SDNode so we
661 // won't promote the value again in this function.
662 switch (VA.getLocInfo()) {
664 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
665 DAG.getValueType(VA.getValVT()));
666 break;
668 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
669 DAG.getValueType(VA.getValVT()));
670 break;
671 default:
672 break;
673 }
674
675 // Truncate the register down to the argument type.
676 if (VA.isExtInLoc())
677 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
678
679 InVals.push_back(Arg);
680 continue;
681 }
682
683 // The registers are exhausted. This argument was passed on the stack.
684 assert(VA.isMemLoc());
685 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
686 // beginning of the arguments area at %fp+BIAS+128.
687 unsigned Offset = VA.getLocMemOffset() + ArgArea;
688 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
689 // Adjust offset for extended arguments, SPARC is big-endian.
690 // The caller will have written the full slot with extended bytes, but we
691 // prefer our own extending loads.
692 if (VA.isExtInLoc())
693 Offset += 8 - ValSize;
694 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
695 InVals.push_back(
696 DAG.getLoad(VA.getValVT(), DL, Chain,
699 }
700
701 if (!IsVarArg)
702 return Chain;
703
704 // This function takes variable arguments, some of which may have been passed
705 // in registers %i0-%i5. Variable floating point arguments are never passed
706 // in floating point registers. They go on %i0-%i5 or on the stack like
707 // integer arguments.
708 //
709 // The va_start intrinsic needs to know the offset to the first variable
710 // argument.
711 unsigned ArgOffset = CCInfo.getStackSize();
713 // Skip the 128 bytes of register save area.
714 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
715 Subtarget->getStackPointerBias());
716
717 // Save the variable arguments that were passed in registers.
718 // The caller is required to reserve stack space for 6 arguments regardless
719 // of how many arguments were actually passed.
720 SmallVector<SDValue, 8> OutChains;
721 for (; ArgOffset < 6*8; ArgOffset += 8) {
722 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
723 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
724 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
725 auto PtrVT = getPointerTy(MF.getDataLayout());
726 OutChains.push_back(
727 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
729 }
730
731 if (!OutChains.empty())
732 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
733
734 return Chain;
735}
736
737// Check whether any of the argument registers are reserved
739 const MachineFunction &MF) {
740 // The register window design means that outgoing parameters at O*
741 // will appear in the callee as I*.
742 // Be conservative and check both sides of the register names.
743 bool Outgoing =
744 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
745 return TRI->isReservedReg(MF, r);
746 });
747 bool Incoming =
748 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
749 return TRI->isReservedReg(MF, r);
750 });
751 return Outgoing || Incoming;
752}
753
755 const Function &F = MF.getFunction();
756 F.getContext().diagnose(DiagnosticInfoUnsupported{
757 F, ("SPARC doesn't support"
758 " function calls if any of the argument registers is reserved.")});
759}
760
763 SmallVectorImpl<SDValue> &InVals) const {
764 if (Subtarget->is64Bit())
765 return LowerCall_64(CLI, InVals);
766 return LowerCall_32(CLI, InVals);
767}
768
769static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
770 const CallBase *Call) {
771 if (Call)
772 return Call->hasFnAttr(Attribute::ReturnsTwice);
773
774 const Function *CalleeFn = nullptr;
776 CalleeFn = dyn_cast<Function>(G->getGlobal());
777 } else if (ExternalSymbolSDNode *E =
779 const Function &Fn = DAG.getMachineFunction().getFunction();
780 const Module *M = Fn.getParent();
781 const char *CalleeName = E->getSymbol();
782 CalleeFn = M->getFunction(CalleeName);
783 }
784
785 if (!CalleeFn)
786 return false;
787 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
788}
789
790/// IsEligibleForTailCallOptimization - Check whether the call is eligible
791/// for tail call optimization.
793 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
794
795 auto &Outs = CLI.Outs;
796 auto &Caller = MF.getFunction();
797
798 // Do not tail call opt functions with "disable-tail-calls" attribute.
799 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
800 return false;
801
802 // Do not tail call opt if the stack is used to pass parameters.
803 // 64-bit targets have a slightly higher limit since the ABI requires
804 // to allocate some space even when all the parameters fit inside registers.
805 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
806 if (CCInfo.getStackSize() > StackSizeLimit)
807 return false;
808
809 // Do not tail call opt if either the callee or caller returns
810 // a struct and the other does not.
811 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
812 return false;
813
814 // Byval parameters hand the function a pointer directly into the stack area
815 // we want to reuse during a tail call.
816 for (auto &Arg : Outs)
817 if (Arg.Flags.isByVal())
818 return false;
819
820 return true;
821}
822
823// Lower a call for the 32-bit ABI.
826 SmallVectorImpl<SDValue> &InVals) const {
827 SelectionDAG &DAG = CLI.DAG;
828 SDLoc &dl = CLI.DL;
830 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
832 SDValue Chain = CLI.Chain;
833 SDValue Callee = CLI.Callee;
834 bool &isTailCall = CLI.IsTailCall;
835 CallingConv::ID CallConv = CLI.CallConv;
836 bool isVarArg = CLI.IsVarArg;
838
839 // Analyze operands of the call, assigning locations to each operand.
841 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
842 *DAG.getContext());
843 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
844
845 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
846 CCInfo, CLI, DAG.getMachineFunction());
847
848 // Get the size of the outgoing arguments stack space requirement.
849 unsigned ArgsSize = CCInfo.getStackSize();
850
851 // Keep stack frames 8-byte aligned.
852 ArgsSize = (ArgsSize+7) & ~7;
853
855
856 // Create local copies for byval args.
857 SmallVector<SDValue, 8> ByValArgs;
858 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
859 ISD::ArgFlagsTy Flags = Outs[i].Flags;
860 if (!Flags.isByVal())
861 continue;
862
863 SDValue Arg = OutVals[i];
864 unsigned Size = Flags.getByValSize();
865 Align Alignment = Flags.getNonZeroByValAlign();
866
867 if (Size > 0U) {
868 int FI = MFI.CreateStackObject(Size, Alignment, false);
869 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
870 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
871
872 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
873 false, // isVolatile,
874 (Size <= 32), // AlwaysInline if size <= 32,
875 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
877 ByValArgs.push_back(FIPtr);
878 }
879 else {
880 SDValue nullVal;
881 ByValArgs.push_back(nullVal);
882 }
883 }
884
885 assert(!isTailCall || ArgsSize == 0);
886
887 if (!isTailCall)
888 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
889
891 SmallVector<SDValue, 8> MemOpChains;
892
893 const unsigned StackOffset = 92;
894 bool hasStructRetAttr = false;
895 unsigned SRetArgSize = 0;
896 // Walk the register/memloc assignments, inserting copies/loads.
897 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
898 i != e;
899 ++i, ++realArgIdx) {
900 CCValAssign &VA = ArgLocs[i];
901 SDValue Arg = OutVals[realArgIdx];
902
903 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
904
905 // Use local copy if it is a byval arg.
906 if (Flags.isByVal()) {
907 Arg = ByValArgs[byvalArgIdx++];
908 if (!Arg) {
909 continue;
910 }
911 }
912
913 // Promote the value if needed.
914 switch (VA.getLocInfo()) {
915 default: llvm_unreachable("Unknown loc info!");
916 case CCValAssign::Full: break;
918 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
919 break;
921 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
922 break;
924 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
925 break;
927 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
928 break;
929 }
930
931 if (Flags.isSRet()) {
932 assert(VA.needsCustom());
933
934 if (isTailCall)
935 continue;
936
937 // store SRet argument in %sp+64
938 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
939 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
940 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
941 MemOpChains.push_back(
942 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
943 hasStructRetAttr = true;
944 // sret only allowed on first argument
945 assert(Outs[realArgIdx].OrigArgIndex == 0);
946 SRetArgSize =
947 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
948 continue;
949 }
950
951 if (VA.needsCustom()) {
952 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
953
954 if (VA.isMemLoc()) {
955 unsigned Offset = VA.getLocMemOffset() + StackOffset;
956 // if it is double-word aligned, just store.
957 if (Offset % 8 == 0) {
958 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
959 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
960 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
961 MemOpChains.push_back(
962 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
963 continue;
964 }
965 }
966
967 if (VA.getLocVT() == MVT::f64) {
968 // Move from the float value from float registers into the
969 // integer registers.
971 Arg = bitcastConstantFPToInt(C, dl, DAG);
972 else
973 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
974 }
975
976 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
977 Arg,
978 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
979 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
980 Arg,
981 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
982
983 if (VA.isRegLoc()) {
984 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
985 assert(i+1 != e);
986 CCValAssign &NextVA = ArgLocs[++i];
987 if (NextVA.isRegLoc()) {
988 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
989 } else {
990 // Store the second part in stack.
991 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
992 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
993 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
994 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
995 MemOpChains.push_back(
996 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
997 }
998 } else {
999 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1000 // Store the first part.
1001 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1002 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1003 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1004 MemOpChains.push_back(
1005 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1006 // Store the second part.
1007 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1008 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1009 MemOpChains.push_back(
1010 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1011 }
1012 continue;
1013 }
1014
1015 // Arguments that can be passed on register must be kept at
1016 // RegsToPass vector
1017 if (VA.isRegLoc()) {
1018 if (VA.getLocVT() != MVT::f32) {
1019 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1020 continue;
1021 }
1022 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1023 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1024 continue;
1025 }
1026
1027 assert(VA.isMemLoc());
1028
1029 // Create a store off the stack pointer for this argument.
1030 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1032 dl);
1033 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1034 MemOpChains.push_back(
1035 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1036 }
1037
1038
1039 // Emit all stores, make sure the occur before any copies into physregs.
1040 if (!MemOpChains.empty())
1041 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1042
1043 // Build a sequence of copy-to-reg nodes chained together with token
1044 // chain and flag operands which copy the outgoing args into registers.
1045 // The InGlue in necessary since all emitted instructions must be
1046 // stuck together.
1047 SDValue InGlue;
1048 for (const auto &[OrigReg, N] : RegsToPass) {
1049 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1050 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1051 InGlue = Chain.getValue(1);
1052 }
1053
1054 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1055
1056 // If the callee is a GlobalAddress node (quite common, every direct call is)
1057 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1058 // Likewise ExternalSymbol -> TargetExternalSymbol.
1060 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1062 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1063
1064 // Returns a chain & a flag for retval copy to use
1065 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1067 Ops.push_back(Chain);
1068 Ops.push_back(Callee);
1069 if (hasStructRetAttr)
1070 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1071 for (const auto &[OrigReg, N] : RegsToPass) {
1072 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1073 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1074 }
1075
1076 // Add a register mask operand representing the call-preserved registers.
1077 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1078 const uint32_t *Mask =
1079 ((hasReturnsTwice)
1080 ? TRI->getRTCallPreservedMask(CallConv)
1081 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1082
1083 if (isAnyArgRegReserved(TRI, MF))
1085
1086 assert(Mask && "Missing call preserved mask for calling convention");
1087 Ops.push_back(DAG.getRegisterMask(Mask));
1088
1089 if (InGlue.getNode())
1090 Ops.push_back(InGlue);
1091
1092 if (isTailCall) {
1094 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1095 }
1096
1097 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1098 InGlue = Chain.getValue(1);
1099
1100 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1101 InGlue = Chain.getValue(1);
1102
1103 // Assign locations to each value returned by this call.
1105 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1106 *DAG.getContext());
1107
1108 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1109
1110 // Copy all of the result registers out of their specified physreg.
1111 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1112 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1113 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1114 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1116 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1117 Chain = Lo.getValue(1);
1118 InGlue = Lo.getValue(2);
1119 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1120 DAG.getConstant(0, dl, MVT::i32));
1122 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1123 Chain = Hi.getValue(1);
1124 InGlue = Hi.getValue(2);
1125 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1126 DAG.getConstant(1, dl, MVT::i32));
1127 InVals.push_back(Vec);
1128 } else {
1129 Chain =
1130 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1131 RVLocs[i].getValVT(), InGlue)
1132 .getValue(1);
1133 InGlue = Chain.getValue(2);
1134 InVals.push_back(Chain.getValue(0));
1135 }
1136 }
1137
1138 return Chain;
1139}
1140
1141// FIXME? Maybe this could be a TableGen attribute on some registers and
1142// this table could be generated automatically from RegInfo.
1144 const MachineFunction &MF) const {
1146 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1147 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1148 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1149 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1150 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1151 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1152 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1153 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1154 .Default(0);
1155
1156 // If we're directly referencing register names
1157 // (e.g in GCC C extension `register int r asm("g1");`),
1158 // make sure that said register is in the reserve list.
1159 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1160 if (!TRI->isReservedReg(MF, Reg))
1161 Reg = Register();
1162
1163 return Reg;
1164}
1165
1166// Fixup floating point arguments in the ... part of a varargs call.
1167//
1168// The SPARC v9 ABI requires that floating point arguments are treated the same
1169// as integers when calling a varargs function. This does not apply to the
1170// fixed arguments that are part of the function's prototype.
1171//
1172// This function post-processes a CCValAssign array created by
1173// AnalyzeCallOperands().
1176 for (CCValAssign &VA : ArgLocs) {
1177 MVT ValTy = VA.getLocVT();
1178 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1179 // varargs functions.
1180 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1181 continue;
1182 // The fixed arguments to a varargs function still go in FP registers.
1183 if (!Outs[VA.getValNo()].Flags.isVarArg())
1184 continue;
1185
1186 // This floating point argument should be reassigned.
1187 // Determine the offset into the argument array.
1188 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1189 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1190 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1191 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1192
1193 if (Offset < 6*8) {
1194 // This argument should go in %i0-%i5.
1195 unsigned IReg = SP::I0 + Offset/8;
1196 if (ValTy == MVT::f64)
1197 // Full register, just bitconvert into i64.
1198 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1200 else {
1201 assert(ValTy == MVT::f128 && "Unexpected type!");
1202 // Full register, just bitconvert into i128 -- We will lower this into
1203 // two i64s in LowerCall_64.
1204 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1205 MVT::i128, CCValAssign::BCvt);
1206 }
1207 } else {
1208 // This needs to go to memory, we're out of integer registers.
1209 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1210 VA.getLocVT(), VA.getLocInfo());
1211 }
1212 }
1213}
1214
1215// Lower a call for the 64-bit ABI.
1216SDValue
1218 SmallVectorImpl<SDValue> &InVals) const {
1219 SelectionDAG &DAG = CLI.DAG;
1220 SDLoc DL = CLI.DL;
1221 SDValue Chain = CLI.Chain;
1222 auto PtrVT = getPointerTy(DAG.getDataLayout());
1224
1225 // Analyze operands of the call, assigning locations to each operand.
1227 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1228 *DAG.getContext());
1229 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1230
1232 CCInfo, CLI, DAG.getMachineFunction());
1233
1234 // Get the size of the outgoing arguments stack space requirement.
1235 // The stack offset computed by CC_Sparc64 includes all arguments.
1236 // Called functions expect 6 argument words to exist in the stack frame, used
1237 // or not.
1238 unsigned StackReserved = 6 * 8u;
1239 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1240
1241 // Keep stack frames 16-byte aligned.
1242 ArgsSize = alignTo(ArgsSize, 16);
1243
1244 // Varargs calls require special treatment.
1245 if (CLI.IsVarArg)
1246 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1247
1248 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1249
1250 // Adjust the stack pointer to make room for the arguments.
1251 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1252 // with more than 6 arguments.
1253 if (!CLI.IsTailCall)
1254 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1255
1256 // Collect the set of registers to pass to the function and their values.
1257 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1258 // instruction.
1260
1261 // Collect chains from all the memory opeations that copy arguments to the
1262 // stack. They must follow the stack pointer adjustment above and precede the
1263 // call instruction itself.
1264 SmallVector<SDValue, 8> MemOpChains;
1265
1266 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1267 const CCValAssign &VA = ArgLocs[i];
1268 SDValue Arg = CLI.OutVals[i];
1269
1270 // Promote the value if needed.
1271 switch (VA.getLocInfo()) {
1272 default:
1273 llvm_unreachable("Unknown location info!");
1274 case CCValAssign::Full:
1275 break;
1276 case CCValAssign::SExt:
1277 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1278 break;
1279 case CCValAssign::ZExt:
1280 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1281 break;
1282 case CCValAssign::AExt:
1283 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1284 break;
1285 case CCValAssign::BCvt:
1286 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1287 // SPARC does not support i128 natively. Lower it into two i64, see below.
1288 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1289 || VA.getLocVT() != MVT::i128)
1290 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1291 break;
1292 }
1293
1294 if (VA.isRegLoc()) {
1295 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1296 && VA.getLocVT() == MVT::i128) {
1297 // Store and reload into the integer register reg and reg+1.
1298 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1299 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1300 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1301 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1302 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1303 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1304 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1305
1306 // Store to %sp+BIAS+128+Offset
1307 SDValue Store =
1308 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1309 // Load into Reg and Reg+1
1310 SDValue Hi64 =
1311 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1312 SDValue Lo64 =
1313 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1314
1315 Register HiReg = VA.getLocReg();
1316 Register LoReg = VA.getLocReg() + 1;
1317 if (!CLI.IsTailCall) {
1318 HiReg = toCallerWindow(HiReg);
1319 LoReg = toCallerWindow(LoReg);
1320 }
1321
1322 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1323 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1324 continue;
1325 }
1326
1327 // The custom bit on an i32 return value indicates that it should be
1328 // passed in the high bits of the register.
1329 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1330 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1331 DAG.getConstant(32, DL, MVT::i32));
1332
1333 // The next value may go in the low bits of the same register.
1334 // Handle both at once.
1335 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1336 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1337 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1338 CLI.OutVals[i+1]);
1339 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1340 // Skip the next value, it's already done.
1341 ++i;
1342 }
1343 }
1344
1345 Register Reg = VA.getLocReg();
1346 if (!CLI.IsTailCall)
1347 Reg = toCallerWindow(Reg);
1348 RegsToPass.push_back(std::make_pair(Reg, Arg));
1349 continue;
1350 }
1351
1352 assert(VA.isMemLoc());
1353
1354 // Create a store off the stack pointer for this argument.
1355 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1356 // The argument area starts at %fp+BIAS+128 in the callee frame,
1357 // %sp+BIAS+128 in ours.
1358 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1359 Subtarget->getStackPointerBias() +
1360 128, DL);
1361 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1362 MemOpChains.push_back(
1363 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1364 }
1365
1366 // Emit all stores, make sure they occur before the call.
1367 if (!MemOpChains.empty())
1368 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1369
1370 // Build a sequence of CopyToReg nodes glued together with token chain and
1371 // glue operands which copy the outgoing args into registers. The InGlue is
1372 // necessary since all emitted instructions must be stuck together in order
1373 // to pass the live physical registers.
1374 SDValue InGlue;
1375 for (const auto &[Reg, N] : RegsToPass) {
1376 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1377 InGlue = Chain.getValue(1);
1378 }
1379
1380 // If the callee is a GlobalAddress node (quite common, every direct call is)
1381 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1382 // Likewise ExternalSymbol -> TargetExternalSymbol.
1383 SDValue Callee = CLI.Callee;
1384 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1386 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1388 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1389
1390 // Build the operands for the call instruction itself.
1392 Ops.push_back(Chain);
1393 Ops.push_back(Callee);
1394 for (const auto &[Reg, N] : RegsToPass)
1395 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1396
1397 // Add a register mask operand representing the call-preserved registers.
1398 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1399 const uint32_t *Mask =
1400 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1401 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1402 CLI.CallConv));
1403
1404 if (isAnyArgRegReserved(TRI, MF))
1406
1407 assert(Mask && "Missing call preserved mask for calling convention");
1408 Ops.push_back(DAG.getRegisterMask(Mask));
1409
1410 // Make sure the CopyToReg nodes are glued to the call instruction which
1411 // consumes the registers.
1412 if (InGlue.getNode())
1413 Ops.push_back(InGlue);
1414
1415 // Now the call itself.
1416 if (CLI.IsTailCall) {
1418 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1419 }
1420 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1421 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1422 InGlue = Chain.getValue(1);
1423
1424 // Revert the stack pointer immediately after the call.
1425 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1426 InGlue = Chain.getValue(1);
1427
1428 // Now extract the return values. This is more or less the same as
1429 // LowerFormalArguments_64.
1430
1431 // Assign locations to each value returned by this call.
1433 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1434 *DAG.getContext());
1435
1436 // Set inreg flag manually for codegen generated library calls that
1437 // return float.
1438 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1439 CLI.Ins[0].Flags.setInReg();
1440
1441 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1442
1443 // Copy all of the result registers out of their specified physreg.
1444 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1445 CCValAssign &VA = RVLocs[i];
1446 assert(VA.isRegLoc() && "Can only return in registers!");
1447 unsigned Reg = toCallerWindow(VA.getLocReg());
1448
1449 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1450 // reside in the same register in the high and low bits. Reuse the
1451 // CopyFromReg previous node to avoid duplicate copies.
1452 SDValue RV;
1453 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1454 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1455 RV = Chain.getValue(0);
1456
1457 // But usually we'll create a new CopyFromReg for a different register.
1458 if (!RV.getNode()) {
1459 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1460 Chain = RV.getValue(1);
1461 InGlue = Chain.getValue(2);
1462 }
1463
1464 // Get the high bits for i32 struct elements.
1465 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1466 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1467 DAG.getConstant(32, DL, MVT::i32));
1468
1469 // The callee promoted the return value, so insert an Assert?ext SDNode so
1470 // we won't promote the value again in this function.
1471 switch (VA.getLocInfo()) {
1472 case CCValAssign::SExt:
1473 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1474 DAG.getValueType(VA.getValVT()));
1475 break;
1476 case CCValAssign::ZExt:
1477 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1478 DAG.getValueType(VA.getValVT()));
1479 break;
1480 default:
1481 break;
1482 }
1483
1484 // Truncate the register down to the return value type.
1485 if (VA.isExtInLoc())
1486 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1487
1488 InVals.push_back(RV);
1489 }
1490
1491 return Chain;
1492}
1493
1494//===----------------------------------------------------------------------===//
1495// TargetLowering Implementation
1496//===----------------------------------------------------------------------===//
1497
1505
1506/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1507/// rcond condition.
1509 switch (CC) {
1510 default:
1511 llvm_unreachable("Unknown/unsigned integer condition code!");
1512 case ISD::SETEQ:
1513 return SPCC::REG_Z;
1514 case ISD::SETNE:
1515 return SPCC::REG_NZ;
1516 case ISD::SETLT:
1517 return SPCC::REG_LZ;
1518 case ISD::SETGT:
1519 return SPCC::REG_GZ;
1520 case ISD::SETLE:
1521 return SPCC::REG_LEZ;
1522 case ISD::SETGE:
1523 return SPCC::REG_GEZ;
1524 }
1525}
1526
1527/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1528/// condition.
1530 switch (CC) {
1531 default: llvm_unreachable("Unknown integer condition code!");
1532 case ISD::SETEQ: return SPCC::ICC_E;
1533 case ISD::SETNE: return SPCC::ICC_NE;
1534 case ISD::SETLT: return SPCC::ICC_L;
1535 case ISD::SETGT: return SPCC::ICC_G;
1536 case ISD::SETLE: return SPCC::ICC_LE;
1537 case ISD::SETGE: return SPCC::ICC_GE;
1538 case ISD::SETULT: return SPCC::ICC_CS;
1539 case ISD::SETULE: return SPCC::ICC_LEU;
1540 case ISD::SETUGT: return SPCC::ICC_GU;
1541 case ISD::SETUGE: return SPCC::ICC_CC;
1542 }
1543}
1544
1545/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1546/// FCC condition.
1548 switch (CC) {
1549 default: llvm_unreachable("Unknown fp condition code!");
1550 case ISD::SETEQ:
1551 case ISD::SETOEQ: return SPCC::FCC_E;
1552 case ISD::SETNE:
1553 case ISD::SETUNE: return SPCC::FCC_NE;
1554 case ISD::SETLT:
1555 case ISD::SETOLT: return SPCC::FCC_L;
1556 case ISD::SETGT:
1557 case ISD::SETOGT: return SPCC::FCC_G;
1558 case ISD::SETLE:
1559 case ISD::SETOLE: return SPCC::FCC_LE;
1560 case ISD::SETGE:
1561 case ISD::SETOGE: return SPCC::FCC_GE;
1562 case ISD::SETULT: return SPCC::FCC_UL;
1563 case ISD::SETULE: return SPCC::FCC_ULE;
1564 case ISD::SETUGT: return SPCC::FCC_UG;
1565 case ISD::SETUGE: return SPCC::FCC_UGE;
1566 case ISD::SETUO: return SPCC::FCC_U;
1567 case ISD::SETO: return SPCC::FCC_O;
1568 case ISD::SETONE: return SPCC::FCC_LG;
1569 case ISD::SETUEQ: return SPCC::FCC_UE;
1570 }
1571}
1572
1574 const SparcSubtarget &STI)
1575 : TargetLowering(TM), Subtarget(&STI) {
1576 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1577
1578 // Instructions which use registers as conditionals examine all the
1579 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1580 // matters much whether it's ZeroOrOneBooleanContent, or
1581 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1582 // former.
1585
1586 // Set up the register classes.
1587 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1588 if (!Subtarget->useSoftFloat()) {
1589 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1590 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1591 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1592 }
1593 if (Subtarget->is64Bit()) {
1594 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1595 } else {
1596 // On 32bit sparc, we define a double-register 32bit register
1597 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1598 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1599
1600 // ...but almost all operations must be expanded, so set that as
1601 // the default.
1602 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1603 setOperationAction(Op, MVT::v2i32, Expand);
1604 }
1605 // Truncating/extending stores/loads are also not supported.
1607 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1608 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1609 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1610
1611 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1612 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1613 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1614
1615 setTruncStoreAction(VT, MVT::v2i32, Expand);
1616 setTruncStoreAction(MVT::v2i32, VT, Expand);
1617 }
1618 // However, load and store *are* legal.
1619 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1620 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1623
1624 // And we need to promote i64 loads/stores into vector load/store
1625 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1626 setOperationAction(ISD::STORE, MVT::i64, Custom);
1627
1628 // Sadly, this doesn't work:
1629 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1630 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1631 }
1632
1633 // Turn FP extload into load/fpextend
1634 for (MVT VT : MVT::fp_valuetypes()) {
1635 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1636 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1637 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1638 }
1639
1640 // Sparc doesn't have i1 sign extending load
1641 for (MVT VT : MVT::integer_valuetypes())
1642 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1643
1644 // Turn FP truncstore into trunc + store.
1645 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1646 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1647 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1648 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1649 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1650 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1651
1652 // Custom legalize GlobalAddress nodes into LO/HI parts.
1657
1658 // Sparc doesn't have sext_inreg, replace them with shl/sra
1662
1663 // Sparc has no REM or DIVREM operations.
1668
1669 // ... nor does SparcV9.
1670 if (Subtarget->is64Bit()) {
1675 }
1676
1677 // Custom expand fp<->sint
1682
1683 // Custom Expand fp<->uint
1688
1689 // Lower f16 conversion operations into library calls
1690 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1691 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1692 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1693 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1694 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1695 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1696
1697 setOperationAction(ISD::BITCAST, MVT::f32,
1698 Subtarget->isVIS3() ? Legal : Expand);
1699 setOperationAction(ISD::BITCAST, MVT::i32,
1700 Subtarget->isVIS3() ? Legal : Expand);
1701
1702 // Sparc has no select or setcc: expand to SELECT_CC.
1707
1712
1713 // Sparc doesn't have BRCOND either, it has BR_CC.
1714 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1715 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1716 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1717 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1718 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1719 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1720 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1721
1726
1731
1732 if (Subtarget->isVIS3()) {
1735 }
1736
1737 if (Subtarget->is64Bit()) {
1738 setOperationAction(ISD::BITCAST, MVT::f64,
1739 Subtarget->isVIS3() ? Legal : Expand);
1740 setOperationAction(ISD::BITCAST, MVT::i64,
1741 Subtarget->isVIS3() ? Legal : Expand);
1744 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1746
1748 Subtarget->usePopc() ? Legal : Expand);
1750 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1751 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1752 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1753 }
1754
1755 // ATOMICs.
1756 // Atomics are supported on SparcV9. 32-bit atomics are also
1757 // supported by some Leon SparcV8 variants. Otherwise, atomics
1758 // are unsupported.
1759 if (Subtarget->isV9()) {
1760 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1761 // but it hasn't been implemented in the backend yet.
1762 if (Subtarget->is64Bit())
1764 else
1766 } else if (Subtarget->hasLeonCasa())
1768 else
1770
1772
1773 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1774
1775 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1776
1777 // Custom Lower Atomic LOAD/STORE
1778 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1779 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1780
1781 if (Subtarget->is64Bit()) {
1782 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1783 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1784 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1785 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1786 }
1787
1788 if (!Subtarget->isV9()) {
1789 // SparcV8 does not have FNEGD and FABSD.
1790 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1791 setOperationAction(ISD::FABS, MVT::f64, Custom);
1792 }
1793
1794 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1795 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1796 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1797 setOperationAction(ISD::FREM , MVT::f128, Expand);
1798 setOperationAction(ISD::FMA , MVT::f128, Expand);
1799 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1800 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1801 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1802 setOperationAction(ISD::FREM , MVT::f64, Expand);
1803 setOperationAction(ISD::FMA, MVT::f64,
1804 Subtarget->isUA2007() ? Legal : Expand);
1805 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1806 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1807 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1808 setOperationAction(ISD::FREM , MVT::f32, Expand);
1809 setOperationAction(ISD::FMA, MVT::f32,
1810 Subtarget->isUA2007() ? Legal : Expand);
1811 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1812 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1817 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1818 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1819 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1820
1824
1825 // Expands to [SU]MUL_LOHI.
1829
1830 if (Subtarget->useSoftMulDiv()) {
1831 // .umul works for both signed and unsigned
1836 }
1837
1838 if (Subtarget->is64Bit()) {
1842 Subtarget->isVIS3() ? Legal : Expand);
1844 Subtarget->isVIS3() ? Legal : Expand);
1845
1849 }
1850
1851 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1852 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1853 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1854 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1855
1856 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1857 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1858
1859 // Use the default implementation.
1860 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1861 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1862 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1863 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1864 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1865
1867
1869 Subtarget->usePopc() ? Legal : Expand);
1870
1871 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1872 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1873 setOperationAction(ISD::STORE, MVT::f128, Legal);
1874 } else {
1875 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1876 setOperationAction(ISD::STORE, MVT::f128, Custom);
1877 }
1878
1879 if (Subtarget->hasHardQuad()) {
1880 setOperationAction(ISD::FADD, MVT::f128, Legal);
1881 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1882 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1883 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1884 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1885 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1887 if (Subtarget->isV9()) {
1888 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1889 setOperationAction(ISD::FABS, MVT::f128, Legal);
1890 } else {
1891 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1892 setOperationAction(ISD::FABS, MVT::f128, Custom);
1893 }
1894 } else {
1895 // Custom legalize f128 operations.
1896
1897 setOperationAction(ISD::FADD, MVT::f128, Custom);
1898 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1899 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1900 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1901 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1902 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1903 setOperationAction(ISD::FABS, MVT::f128, Custom);
1904
1905 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1908
1909 // Setup Runtime library names.
1910 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1911 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Qp_add);
1912 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Qp_sub);
1913 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Qp_mul);
1914 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Qp_div);
1915 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Qp_sqrt);
1916 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Qp_qtoi);
1917 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Qp_qtoui);
1918 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Qp_itoq);
1919 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Qp_uitoq);
1920 setLibcallImpl(RTLIB::FPTOSINT_F128_I64, RTLIB::impl__Qp_qtox);
1921 setLibcallImpl(RTLIB::FPTOUINT_F128_I64, RTLIB::impl__Qp_qtoux);
1922 setLibcallImpl(RTLIB::SINTTOFP_I64_F128, RTLIB::impl__Qp_xtoq);
1923 setLibcallImpl(RTLIB::UINTTOFP_I64_F128, RTLIB::impl__Qp_uxtoq);
1924 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Qp_stoq);
1925 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Qp_dtoq);
1926 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Qp_qtos);
1927 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Qp_qtod);
1928 } else if (!Subtarget->useSoftFloat()) {
1929 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Q_add);
1930 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Q_sub);
1931 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Q_mul);
1932 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Q_div);
1933 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Q_sqrt);
1934 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Q_qtoi);
1935 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Q_qtou);
1936 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Q_itoq);
1937 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Q_utoq);
1938 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Q_stoq);
1939 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Q_dtoq);
1940 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Q_qtos);
1941 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Q_qtod);
1942 }
1943 }
1944
1945 if (Subtarget->fixAllFDIVSQRT()) {
1946 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1947 // the former instructions generate errata on LEON processors.
1949 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1950 }
1951
1952 if (Subtarget->hasNoFMULS()) {
1954 }
1955
1956 // Custom combine bitcast between f64 and v2i32
1957 if (!Subtarget->is64Bit())
1958 setTargetDAGCombine(ISD::BITCAST);
1959
1960 if (Subtarget->hasLeonCycleCounter())
1961 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1962
1963 if (Subtarget->isVIS3()) {
1968
1969 setOperationAction(ISD::CTTZ, MVT::i32,
1970 Subtarget->is64Bit() ? Promote : Expand);
1973 Subtarget->is64Bit() ? Promote : Expand);
1975 } else if (Subtarget->usePopc()) {
1980
1985 } else {
1989 Subtarget->is64Bit() ? Promote : LibCall);
1991
1992 // FIXME here we don't have any ISA extensions that could help us, so to
1993 // prevent large expansions those should be made into LibCalls.
1998 }
1999
2001
2003
2004 computeRegisterProperties(Subtarget->getRegisterInfo());
2005}
2006
2008 return Subtarget->useSoftFloat();
2009}
2010
2012 EVT VT) const {
2013 if (!VT.isVector())
2014 return MVT::i32;
2016}
2017
2018/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2019/// be zero. Op is expected to be a target specific node. Used by DAG
2020/// combiner.
2022 (const SDValue Op,
2023 KnownBits &Known,
2024 const APInt &DemandedElts,
2025 const SelectionDAG &DAG,
2026 unsigned Depth) const {
2027 KnownBits Known2;
2028 Known.resetAll();
2029
2030 switch (Op.getOpcode()) {
2031 default: break;
2032 case SPISD::SELECT_ICC:
2033 case SPISD::SELECT_XCC:
2034 case SPISD::SELECT_FCC:
2035 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2036 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2037
2038 // Only known if known in both the LHS and RHS.
2039 Known = Known.intersectWith(Known2);
2040 break;
2041 }
2042}
2043
2044// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2045// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2047 ISD::CondCode CC, unsigned &SPCC) {
2048 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2049 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2050 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2051 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2052 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2053 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2054 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2055 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2056 SDValue CMPCC = LHS.getOperand(3);
2057 SPCC = LHS.getConstantOperandVal(2);
2058 LHS = CMPCC.getOperand(0);
2059 RHS = CMPCC.getOperand(1);
2060 }
2061}
2062
2063// Convert to a target node and set target flags.
2065 SelectionDAG &DAG) const {
2067 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2068 SDLoc(GA),
2069 GA->getValueType(0),
2070 GA->getOffset(), TF);
2071
2073 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2074 CP->getAlign(), CP->getOffset(), TF);
2075
2077 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2078 Op.getValueType(),
2079 0,
2080 TF);
2081
2083 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2084 ES->getValueType(0), TF);
2085
2086 llvm_unreachable("Unhandled address SDNode");
2087}
2088
2089// Split Op into high and low parts according to HiTF and LoTF.
2090// Return an ADD node combining the parts.
2092 unsigned HiTF, unsigned LoTF,
2093 SelectionDAG &DAG) const {
2094 SDLoc DL(Op);
2095 EVT VT = Op.getValueType();
2096 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2097 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2098 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2099}
2100
2101// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2102// or ExternalSymbol SDNode.
2104 SDLoc DL(Op);
2105 EVT VT = getPointerTy(DAG.getDataLayout());
2106
2107 // Handle PIC mode first. SPARC needs a got load for every variable!
2108 if (isPositionIndependent()) {
2109 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2110 PICLevel::Level picLevel = M->getPICLevel();
2111 SDValue Idx;
2112
2113 if (picLevel == PICLevel::SmallPIC) {
2114 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2115 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2116 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2117 } else {
2118 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2119 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2120 }
2121
2122 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2123 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2124 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2125 // function has calls.
2127 MFI.setHasCalls(true);
2128 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2130 }
2131
2132 // This is one of the absolute code models.
2133 switch(getTargetMachine().getCodeModel()) {
2134 default:
2135 llvm_unreachable("Unsupported absolute code model");
2136 case CodeModel::Small:
2137 // abs32.
2138 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2139 case CodeModel::Medium: {
2140 // abs44.
2141 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2142 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2143 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2144 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2145 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2146 }
2147 case CodeModel::Large: {
2148 // abs64.
2149 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2150 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2151 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2152 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2153 }
2154 }
2155}
2156
2161
2166
2171
2173 SelectionDAG &DAG) const {
2174
2176 if (DAG.getTarget().useEmulatedTLS())
2177 return LowerToTLSEmulatedModel(GA, DAG);
2178
2179 SDLoc DL(GA);
2180 const GlobalValue *GV = GA->getGlobal();
2181 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2182
2184
2185 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2186 unsigned HiTF =
2187 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2188 : ELF::R_SPARC_TLS_LDM_HI22);
2189 unsigned LoTF =
2190 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2191 : ELF::R_SPARC_TLS_LDM_LO10);
2192 unsigned addTF =
2193 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2194 : ELF::R_SPARC_TLS_LDM_ADD);
2195 unsigned callTF =
2196 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2197 : ELF::R_SPARC_TLS_LDM_CALL);
2198
2199 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2200 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2201 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2202 withTargetFlags(Op, addTF, DAG));
2203
2204 SDValue Chain = DAG.getEntryNode();
2205 SDValue InGlue;
2206
2207 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2208 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2209 InGlue = Chain.getValue(1);
2210 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2211 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2212
2213 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2214 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2216 assert(Mask && "Missing call preserved mask for calling convention");
2217 SDValue Ops[] = {Chain,
2218 Callee,
2219 Symbol,
2220 DAG.getRegister(SP::O0, PtrVT),
2221 DAG.getRegisterMask(Mask),
2222 InGlue};
2223 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2224 InGlue = Chain.getValue(1);
2225 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2226 InGlue = Chain.getValue(1);
2227 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2228
2229 if (model != TLSModel::LocalDynamic)
2230 return Ret;
2231
2232 SDValue Hi =
2233 DAG.getNode(SPISD::Hi, DL, PtrVT,
2234 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2235 SDValue Lo =
2236 DAG.getNode(SPISD::Lo, DL, PtrVT,
2237 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2238 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2239 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2240 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2241 }
2242
2243 if (model == TLSModel::InitialExec) {
2244 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2245 : ELF::R_SPARC_TLS_IE_LD);
2246
2247 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2248
2249 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2250 // function has calls.
2252 MFI.setHasCalls(true);
2253
2254 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2255 ELF::R_SPARC_TLS_IE_LO10, DAG);
2256 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2257 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2258 DL, PtrVT, Ptr,
2259 withTargetFlags(Op, ldTF, DAG));
2260 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2261 DAG.getRegister(SP::G7, PtrVT), Offset,
2262 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2263 }
2264
2265 assert(model == TLSModel::LocalExec);
2266 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2267 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2268 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2269 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2270 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2271
2272 return DAG.getNode(ISD::ADD, DL, PtrVT,
2273 DAG.getRegister(SP::G7, PtrVT), Offset);
2274}
2275
2277 ArgListTy &Args, SDValue Arg,
2278 const SDLoc &DL,
2279 SelectionDAG &DAG) const {
2281 EVT ArgVT = Arg.getValueType();
2282 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2283
2284 if (ArgTy->isFP128Ty()) {
2285 // Create a stack object and pass the pointer to the library function.
2286 int FI = MFI.CreateStackObject(16, Align(8), false);
2287 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2288 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2289 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2290 } else {
2291 Args.emplace_back(Arg, ArgTy);
2292 }
2293 return Chain;
2294}
2295
2296SDValue
2298 const char *LibFuncName,
2299 unsigned numArgs) const {
2300
2301 ArgListTy Args;
2302
2304 auto PtrVT = getPointerTy(DAG.getDataLayout());
2305
2306 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2307 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2308 Type *RetTyABI = RetTy;
2309 SDValue Chain = DAG.getEntryNode();
2310 SDValue RetPtr;
2311
2312 if (RetTy->isFP128Ty()) {
2313 // Create a Stack Object to receive the return value of type f128.
2314 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2315 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2316 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2317 if (!Subtarget->is64Bit()) {
2318 Entry.IsSRet = true;
2319 Entry.IndirectType = RetTy;
2320 }
2321 Entry.IsReturned = false;
2322 Args.push_back(Entry);
2323 RetTyABI = Type::getVoidTy(*DAG.getContext());
2324 }
2325
2326 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2327 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2328 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2329 }
2331 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2332 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2333
2334 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2335
2336 // chain is in second result.
2337 if (RetTyABI == RetTy)
2338 return CallInfo.first;
2339
2340 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2341
2342 Chain = CallInfo.second;
2343
2344 // Load RetPtr to get the return value.
2345 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2347}
2348
2350 unsigned &SPCC, const SDLoc &DL,
2351 SelectionDAG &DAG) const {
2352
2353 const char *LibCall = nullptr;
2354 bool is64Bit = Subtarget->is64Bit();
2355 switch(SPCC) {
2356 default: llvm_unreachable("Unhandled conditional code!");
2357 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2358 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2359 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2360 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2361 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2362 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2363 case SPCC::FCC_UL :
2364 case SPCC::FCC_ULE:
2365 case SPCC::FCC_UG :
2366 case SPCC::FCC_UGE:
2367 case SPCC::FCC_U :
2368 case SPCC::FCC_O :
2369 case SPCC::FCC_LG :
2370 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2371 }
2372
2373 auto PtrVT = getPointerTy(DAG.getDataLayout());
2374 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2375 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2376 ArgListTy Args;
2377 SDValue Chain = DAG.getEntryNode();
2378 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2379 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2380
2382 CLI.setDebugLoc(DL).setChain(Chain)
2383 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2384
2385 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2386
2387 // result is in first, and chain is in second result.
2388 SDValue Result = CallInfo.first;
2389
2390 switch(SPCC) {
2391 default: {
2392 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2394 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2395 }
2396 case SPCC::FCC_UL : {
2397 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2398 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2399 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2401 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2402 }
2403 case SPCC::FCC_ULE: {
2404 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2406 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2407 }
2408 case SPCC::FCC_UG : {
2409 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2410 SPCC = SPCC::ICC_G;
2411 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2412 }
2413 case SPCC::FCC_UGE: {
2414 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2416 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2417 }
2418
2419 case SPCC::FCC_U : {
2420 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2421 SPCC = SPCC::ICC_E;
2422 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2423 }
2424 case SPCC::FCC_O : {
2425 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2427 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2428 }
2429 case SPCC::FCC_LG : {
2430 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2431 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2432 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2434 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2435 }
2436 case SPCC::FCC_UE : {
2437 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2438 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2439 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2440 SPCC = SPCC::ICC_E;
2441 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2442 }
2443 }
2444}
2445
2446static SDValue
2448 const SparcTargetLowering &TLI) {
2449
2450 if (Op.getOperand(0).getValueType() == MVT::f64)
2451 return TLI.LowerF128Op(Op, DAG,
2452 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2453
2454 if (Op.getOperand(0).getValueType() == MVT::f32)
2455 return TLI.LowerF128Op(Op, DAG,
2456 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2457
2458 llvm_unreachable("fpextend with non-float operand!");
2459 return SDValue();
2460}
2461
2462static SDValue
2464 const SparcTargetLowering &TLI) {
2465 // FP_ROUND on f64 and f32 are legal.
2466 if (Op.getOperand(0).getValueType() != MVT::f128)
2467 return Op;
2468
2469 if (Op.getValueType() == MVT::f64)
2470 return TLI.LowerF128Op(Op, DAG,
2471 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2472 if (Op.getValueType() == MVT::f32)
2473 return TLI.LowerF128Op(Op, DAG,
2474 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2475
2476 llvm_unreachable("fpround to non-float!");
2477 return SDValue();
2478}
2479
2481 const SparcTargetLowering &TLI,
2482 bool hasHardQuad) {
2483 SDLoc dl(Op);
2484 EVT VT = Op.getValueType();
2485 assert(VT == MVT::i32 || VT == MVT::i64);
2486
2487 // Expand f128 operations to fp128 abi calls.
2488 if (Op.getOperand(0).getValueType() == MVT::f128
2489 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2490 const char *libName = TLI.getLibcallName(VT == MVT::i32
2491 ? RTLIB::FPTOSINT_F128_I32
2492 : RTLIB::FPTOSINT_F128_I64);
2493 return TLI.LowerF128Op(Op, DAG, libName, 1);
2494 }
2495
2496 // Expand if the resulting type is illegal.
2497 if (!TLI.isTypeLegal(VT))
2498 return SDValue();
2499
2500 // Otherwise, Convert the fp value to integer in an FP register.
2501 if (VT == MVT::i32)
2502 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2503 else
2504 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2505
2506 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2507}
2508
2510 const SparcTargetLowering &TLI,
2511 bool hasHardQuad) {
2512 SDLoc dl(Op);
2513 EVT OpVT = Op.getOperand(0).getValueType();
2514 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2515
2516 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2517
2518 // Expand f128 operations to fp128 ABI calls.
2519 if (Op.getValueType() == MVT::f128
2520 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2521 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2522 ? RTLIB::SINTTOFP_I32_F128
2523 : RTLIB::SINTTOFP_I64_F128);
2524 return TLI.LowerF128Op(Op, DAG, libName, 1);
2525 }
2526
2527 // Expand if the operand type is illegal.
2528 if (!TLI.isTypeLegal(OpVT))
2529 return SDValue();
2530
2531 // Otherwise, Convert the int value to FP in an FP register.
2532 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2533 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2534 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2535}
2536
2538 const SparcTargetLowering &TLI,
2539 bool hasHardQuad) {
2540 EVT VT = Op.getValueType();
2541
2542 // Expand if it does not involve f128 or the target has support for
2543 // quad floating point instructions and the resulting type is legal.
2544 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2545 (hasHardQuad && TLI.isTypeLegal(VT)))
2546 return SDValue();
2547
2548 assert(VT == MVT::i32 || VT == MVT::i64);
2549
2550 return TLI.LowerF128Op(Op, DAG,
2551 TLI.getLibcallName(VT == MVT::i32
2552 ? RTLIB::FPTOUINT_F128_I32
2553 : RTLIB::FPTOUINT_F128_I64),
2554 1);
2555}
2556
2558 const SparcTargetLowering &TLI,
2559 bool hasHardQuad) {
2560 EVT OpVT = Op.getOperand(0).getValueType();
2561 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2562
2563 // Expand if it does not involve f128 or the target has support for
2564 // quad floating point instructions and the operand type is legal.
2565 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2566 return SDValue();
2567
2568 return TLI.LowerF128Op(Op, DAG,
2569 TLI.getLibcallName(OpVT == MVT::i32
2570 ? RTLIB::UINTTOFP_I32_F128
2571 : RTLIB::UINTTOFP_I64_F128),
2572 1);
2573}
2574
2576 const SparcTargetLowering &TLI, bool hasHardQuad,
2577 bool isV9, bool is64Bit) {
2578 SDValue Chain = Op.getOperand(0);
2579 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2580 SDValue LHS = Op.getOperand(2);
2581 SDValue RHS = Op.getOperand(3);
2582 SDValue Dest = Op.getOperand(4);
2583 SDLoc dl(Op);
2584 unsigned Opc, SPCC = ~0U;
2585
2586 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2587 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2589 assert(LHS.getValueType() == RHS.getValueType());
2590
2591 // Get the condition flag.
2592 SDValue CompareFlag;
2593 if (LHS.getValueType().isInteger()) {
2594 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2595 // and the RHS is zero we might be able to use a specialized branch.
2596 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2598 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2599 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2600 LHS);
2601
2602 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2603 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2604 if (isV9)
2605 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2606 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2607 else
2608 // Non-v9 targets don't have xcc.
2609 Opc = SPISD::BRICC;
2610 } else {
2611 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2612 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2613 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2614 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2615 } else {
2616 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2617 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2618 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2619 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2620 }
2621 }
2622 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2623 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2624}
2625
2627 const SparcTargetLowering &TLI, bool hasHardQuad,
2628 bool isV9, bool is64Bit) {
2629 SDValue LHS = Op.getOperand(0);
2630 SDValue RHS = Op.getOperand(1);
2631 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2632 SDValue TrueVal = Op.getOperand(2);
2633 SDValue FalseVal = Op.getOperand(3);
2634 SDLoc dl(Op);
2635 unsigned Opc, SPCC = ~0U;
2636
2637 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2638 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2640 assert(LHS.getValueType() == RHS.getValueType());
2641
2642 SDValue CompareFlag;
2643 if (LHS.getValueType().isInteger()) {
2644 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2645 // and the RHS is zero we might be able to use a specialized select.
2646 // All SELECT_CC between any two scalar integer types are eligible for
2647 // lowering to specialized instructions. Additionally, f32 and f64 types
2648 // are also eligible, but for f128 we can only use the specialized
2649 // instruction when we have hardquad.
2650 EVT ValType = TrueVal.getValueType();
2651 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2652 ValType == MVT::f64 ||
2653 (ValType == MVT::f128 && hasHardQuad);
2654 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2655 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2656 return DAG.getNode(
2657 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2658 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2659
2660 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2661 Opc = LHS.getValueType() == MVT::i32 ?
2662 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2663 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2664 } else {
2665 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2666 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2667 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2668 Opc = SPISD::SELECT_ICC;
2669 } else {
2670 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2671 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2672 Opc = SPISD::SELECT_FCC;
2673 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2674 }
2675 }
2676 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2677 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2678}
2679
2681 const SparcTargetLowering &TLI) {
2684 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2685
2686 // Need frame address to find the address of VarArgsFrameIndex.
2688
2689 // vastart just stores the address of the VarArgsFrameIndex slot into the
2690 // memory location argument.
2691 SDLoc DL(Op);
2692 SDValue Offset =
2693 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2694 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2695 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2696 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2697 MachinePointerInfo(SV));
2698}
2699
2701 SDNode *Node = Op.getNode();
2702 EVT VT = Node->getValueType(0);
2703 SDValue InChain = Node->getOperand(0);
2704 SDValue VAListPtr = Node->getOperand(1);
2705 EVT PtrVT = VAListPtr.getValueType();
2706 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2707 SDLoc DL(Node);
2708 SDValue VAList =
2709 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2710 // Increment the pointer, VAList, to the next vaarg.
2711 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2713 DL));
2714 // Store the incremented VAList to the legalized pointer.
2715 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2716 MachinePointerInfo(SV));
2717 // Load the actual argument out of the pointer VAList.
2718 // We can't count on greater alignment than the word size.
2719 return DAG.getLoad(
2720 VT, DL, InChain, VAList, MachinePointerInfo(),
2721 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2722}
2723
2725 const SparcSubtarget *Subtarget) {
2726 SDValue Chain = Op.getOperand(0);
2727 SDValue Size = Op.getOperand(1);
2728 SDValue Alignment = Op.getOperand(2);
2729 MaybeAlign MaybeAlignment =
2730 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2731 EVT VT = Size->getValueType(0);
2732 SDLoc dl(Op);
2733
2734 unsigned SPReg = SP::O6;
2735 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2736
2737 // The resultant pointer needs to be above the register spill area
2738 // at the bottom of the stack.
2739 unsigned regSpillArea;
2740 if (Subtarget->is64Bit()) {
2741 regSpillArea = 128;
2742 } else {
2743 // On Sparc32, the size of the spill area is 92. Unfortunately,
2744 // that's only 4-byte aligned, not 8-byte aligned (the stack
2745 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2746 // aligned dynamic allocation, we actually need to add 96 to the
2747 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2748
2749 // That also means adding 4 to the size of the allocation --
2750 // before applying the 8-byte rounding. Unfortunately, we the
2751 // value we get here has already had rounding applied. So, we need
2752 // to add 8, instead, wasting a bit more memory.
2753
2754 // Further, this only actually needs to be done if the required
2755 // alignment is > 4, but, we've lost that info by this point, too,
2756 // so we always apply it.
2757
2758 // (An alternative approach would be to always reserve 96 bytes
2759 // instead of the required 92, but then we'd waste 4 extra bytes
2760 // in every frame, not just those with dynamic stack allocations)
2761
2762 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2763
2764 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2765 DAG.getConstant(8, dl, VT));
2766 regSpillArea = 96;
2767 }
2768
2769 int64_t Bias = Subtarget->getStackPointerBias();
2770
2771 // Debias and increment SP past the reserved spill area.
2772 // We need the SP to point to the first usable region before calculating
2773 // anything to prevent any of the pointers from becoming out of alignment when
2774 // we rebias the SP later on.
2775 SDValue StartOfUsableStack = DAG.getNode(
2776 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2777 SDValue AllocatedPtr =
2778 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2779
2780 bool IsOveraligned = MaybeAlignment.has_value();
2781 SDValue AlignedPtr =
2782 IsOveraligned
2783 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2784 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2785 : AllocatedPtr;
2786
2787 // Now that we are done, restore the bias and reserved spill area.
2788 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2789 DAG.getConstant(regSpillArea + Bias, dl, VT));
2790 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2791 SDValue Ops[2] = {AlignedPtr, Chain};
2792 return DAG.getMergeValues(Ops, dl);
2793}
2794
2795
2797 SDLoc dl(Op);
2798 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2799 dl, MVT::Other, DAG.getEntryNode());
2800 return Chain;
2801}
2802
2804 const SparcSubtarget *Subtarget,
2805 bool AlwaysFlush = false) {
2807 MFI.setFrameAddressIsTaken(true);
2808
2809 EVT VT = Op.getValueType();
2810 SDLoc dl(Op);
2811 unsigned FrameReg = SP::I6;
2812 unsigned stackBias = Subtarget->getStackPointerBias();
2813
2814 SDValue FrameAddr;
2815 SDValue Chain;
2816
2817 // flush first to make sure the windowed registers' values are in stack
2818 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2819
2820 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2821
2822 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2823
2824 while (depth--) {
2825 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2826 DAG.getIntPtrConstant(Offset, dl));
2827 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2828 }
2829 if (Subtarget->is64Bit())
2830 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2831 DAG.getIntPtrConstant(stackBias, dl));
2832 return FrameAddr;
2833}
2834
2835
2837 const SparcSubtarget *Subtarget) {
2838
2839 uint64_t depth = Op.getConstantOperandVal(0);
2840
2841 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2842
2843}
2844
2846 const SparcTargetLowering &TLI,
2847 const SparcSubtarget *Subtarget) {
2849 MachineFrameInfo &MFI = MF.getFrameInfo();
2850 MFI.setReturnAddressIsTaken(true);
2851
2852 EVT VT = Op.getValueType();
2853 SDLoc dl(Op);
2854 uint64_t depth = Op.getConstantOperandVal(0);
2855
2856 SDValue RetAddr;
2857 if (depth == 0) {
2858 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2859 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2860 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2861 return RetAddr;
2862 }
2863
2864 // Need frame address to find return address of the caller.
2865 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2866
2867 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2869 dl, VT,
2870 FrameAddr,
2871 DAG.getIntPtrConstant(Offset, dl));
2872 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2873
2874 return RetAddr;
2875}
2876
2877static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2878 unsigned opcode) {
2879 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2880 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2881
2882 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2883 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2884 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2885
2886 // Note: in little-endian, the floating-point value is stored in the
2887 // registers are in the opposite order, so the subreg with the sign
2888 // bit is the highest-numbered (odd), rather than the
2889 // lowest-numbered (even).
2890
2891 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2892 SrcReg64);
2893 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2894 SrcReg64);
2895
2896 if (DAG.getDataLayout().isLittleEndian())
2897 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2898 else
2899 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2900
2901 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2902 dl, MVT::f64), 0);
2903 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2904 DstReg64, Hi32);
2905 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2906 DstReg64, Lo32);
2907 return DstReg64;
2908}
2909
2910// Lower a f128 load into two f64 loads.
2912{
2913 SDLoc dl(Op);
2914 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2915 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2916
2917 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2918
2919 SDValue Hi64 =
2920 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2921 LdNode->getPointerInfo(), Alignment);
2922 EVT addrVT = LdNode->getBasePtr().getValueType();
2923 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2924 LdNode->getBasePtr(),
2925 DAG.getConstant(8, dl, addrVT));
2926 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2927 LdNode->getPointerInfo().getWithOffset(8),
2928 Alignment);
2929
2930 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2931 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2932
2933 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2934 dl, MVT::f128);
2935 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2936 MVT::f128,
2937 SDValue(InFP128, 0),
2938 Hi64,
2939 SubRegEven);
2940 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2941 MVT::f128,
2942 SDValue(InFP128, 0),
2943 Lo64,
2944 SubRegOdd);
2945 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2946 SDValue(Lo64.getNode(), 1) };
2947 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2948 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2949 return DAG.getMergeValues(Ops, dl);
2950}
2951
2953{
2954 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2955
2956 EVT MemVT = LdNode->getMemoryVT();
2957 if (MemVT == MVT::f128)
2958 return LowerF128Load(Op, DAG);
2959
2960 return Op;
2961}
2962
2963// Lower a f128 store into two f64 stores.
2965 SDLoc dl(Op);
2966 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2967 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2968
2969 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2970 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2971
2972 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2973 dl,
2974 MVT::f64,
2975 StNode->getValue(),
2976 SubRegEven);
2977 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2978 dl,
2979 MVT::f64,
2980 StNode->getValue(),
2981 SubRegOdd);
2982
2983 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
2984
2985 SDValue OutChains[2];
2986 OutChains[0] =
2987 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2988 StNode->getBasePtr(), StNode->getPointerInfo(),
2989 Alignment);
2990 EVT addrVT = StNode->getBasePtr().getValueType();
2991 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2992 StNode->getBasePtr(),
2993 DAG.getConstant(8, dl, addrVT));
2994 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2995 StNode->getPointerInfo().getWithOffset(8),
2996 Alignment);
2997 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2998}
2999
3001{
3002 SDLoc dl(Op);
3003 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3004
3005 EVT MemVT = St->getMemoryVT();
3006 if (MemVT == MVT::f128)
3007 return LowerF128Store(Op, DAG);
3008
3009 if (MemVT == MVT::i64) {
3010 // Custom handling for i64 stores: turn it into a bitcast and a
3011 // v2i32 store.
3012 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3013 SDValue Chain = DAG.getStore(
3014 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3015 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3016 return Chain;
3017 }
3018
3019 return SDValue();
3020}
3021
3023 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3024 && "invalid opcode");
3025
3026 SDLoc dl(Op);
3027
3028 if (Op.getValueType() == MVT::f64)
3029 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3030 if (Op.getValueType() != MVT::f128)
3031 return Op;
3032
3033 // Lower fabs/fneg on f128 to fabs/fneg on f64
3034 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3035 // (As with LowerF64Op, on little-endian, we need to negate the odd
3036 // subreg)
3037
3038 SDValue SrcReg128 = Op.getOperand(0);
3039 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3040 SrcReg128);
3041 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3042 SrcReg128);
3043
3044 if (DAG.getDataLayout().isLittleEndian()) {
3045 if (isV9)
3046 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3047 else
3048 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3049 } else {
3050 if (isV9)
3051 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3052 else
3053 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3054 }
3055
3056 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3057 dl, MVT::f128), 0);
3058 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3059 DstReg128, Hi64);
3060 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3061 DstReg128, Lo64);
3062 return DstReg128;
3063}
3064
3066 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3067 // Expand with a fence.
3068 return SDValue();
3069 }
3070
3071 // Monotonic load/stores are legal.
3072 return Op;
3073}
3074
3076 SelectionDAG &DAG) const {
3077 unsigned IntNo = Op.getConstantOperandVal(0);
3078 switch (IntNo) {
3079 default: return SDValue(); // Don't custom lower most intrinsics.
3080 case Intrinsic::thread_pointer: {
3081 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3082 return DAG.getRegister(SP::G7, PtrVT);
3083 }
3084 }
3085}
3086
3089
3090 bool hasHardQuad = Subtarget->hasHardQuad();
3091 bool isV9 = Subtarget->isV9();
3092 bool is64Bit = Subtarget->is64Bit();
3093
3094 switch (Op.getOpcode()) {
3095 default: llvm_unreachable("Should not custom lower this!");
3096
3097 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3098 Subtarget);
3099 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3100 Subtarget);
3102 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3103 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3104 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3105 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3106 hasHardQuad);
3107 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3108 hasHardQuad);
3109 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3110 hasHardQuad);
3111 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3112 hasHardQuad);
3113 case ISD::BR_CC:
3114 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3115 case ISD::SELECT_CC:
3116 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3117 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3118 case ISD::VAARG: return LowerVAARG(Op, DAG);
3119 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3120 Subtarget);
3121
3122 case ISD::LOAD: return LowerLOAD(Op, DAG);
3123 case ISD::STORE: return LowerSTORE(Op, DAG);
3124 case ISD::FADD: return LowerF128Op(Op, DAG,
3125 getLibcallName(RTLIB::ADD_F128), 2);
3126 case ISD::FSUB: return LowerF128Op(Op, DAG,
3127 getLibcallName(RTLIB::SUB_F128), 2);
3128 case ISD::FMUL: return LowerF128Op(Op, DAG,
3129 getLibcallName(RTLIB::MUL_F128), 2);
3130 case ISD::FDIV: return LowerF128Op(Op, DAG,
3131 getLibcallName(RTLIB::DIV_F128), 2);
3132 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3133 getLibcallName(RTLIB::SQRT_F128),1);
3134 case ISD::FABS:
3135 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3136 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3137 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3138 case ISD::ATOMIC_LOAD:
3139 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3141 }
3142}
3143
3145 const SDLoc &DL,
3146 SelectionDAG &DAG) const {
3147 APInt V = C->getValueAPF().bitcastToAPInt();
3148 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3149 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3150 if (DAG.getDataLayout().isLittleEndian())
3151 std::swap(Lo, Hi);
3152 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3153}
3154
3156 DAGCombinerInfo &DCI) const {
3157 SDLoc dl(N);
3158 SDValue Src = N->getOperand(0);
3159
3160 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3161 Src.getSimpleValueType() == MVT::f64)
3163
3164 return SDValue();
3165}
3166
3168 DAGCombinerInfo &DCI) const {
3169 switch (N->getOpcode()) {
3170 default:
3171 break;
3172 case ISD::BITCAST:
3173 return PerformBITCASTCombine(N, DCI);
3174 }
3175 return SDValue();
3176}
3177
3180 MachineBasicBlock *BB) const {
3181 switch (MI.getOpcode()) {
3182 default: llvm_unreachable("Unknown SELECT_CC!");
3183 case SP::SELECT_CC_Int_ICC:
3184 case SP::SELECT_CC_FP_ICC:
3185 case SP::SELECT_CC_DFP_ICC:
3186 case SP::SELECT_CC_QFP_ICC:
3187 if (Subtarget->isV9())
3188 return expandSelectCC(MI, BB, SP::BPICC);
3189 return expandSelectCC(MI, BB, SP::BCOND);
3190 case SP::SELECT_CC_Int_XCC:
3191 case SP::SELECT_CC_FP_XCC:
3192 case SP::SELECT_CC_DFP_XCC:
3193 case SP::SELECT_CC_QFP_XCC:
3194 return expandSelectCC(MI, BB, SP::BPXCC);
3195 case SP::SELECT_CC_Int_FCC:
3196 case SP::SELECT_CC_FP_FCC:
3197 case SP::SELECT_CC_DFP_FCC:
3198 case SP::SELECT_CC_QFP_FCC:
3199 if (Subtarget->isV9())
3200 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3201 return expandSelectCC(MI, BB, SP::FBCOND);
3202 }
3203}
3204
3207 unsigned BROpcode) const {
3208 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3209 DebugLoc dl = MI.getDebugLoc();
3210 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3211
3212 // To "insert" a SELECT_CC instruction, we actually have to insert the
3213 // triangle control-flow pattern. The incoming instruction knows the
3214 // destination vreg to set, the condition code register to branch on, the
3215 // true/false values to select between, and the condition code for the branch.
3216 //
3217 // We produce the following control flow:
3218 // ThisMBB
3219 // | \
3220 // | IfFalseMBB
3221 // | /
3222 // SinkMBB
3223 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3225
3226 MachineBasicBlock *ThisMBB = BB;
3227 MachineFunction *F = BB->getParent();
3228 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3229 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3230 F->insert(It, IfFalseMBB);
3231 F->insert(It, SinkMBB);
3232
3233 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3234 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3235 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3236 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3237
3238 // Set the new successors for ThisMBB.
3239 ThisMBB->addSuccessor(IfFalseMBB);
3240 ThisMBB->addSuccessor(SinkMBB);
3241
3242 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3243 .addMBB(SinkMBB)
3244 .addImm(CC);
3245
3246 // IfFalseMBB just falls through to SinkMBB.
3247 IfFalseMBB->addSuccessor(SinkMBB);
3248
3249 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3250 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3251 MI.getOperand(0).getReg())
3252 .addReg(MI.getOperand(1).getReg())
3253 .addMBB(ThisMBB)
3254 .addReg(MI.getOperand(2).getReg())
3255 .addMBB(IfFalseMBB);
3256
3257 MI.eraseFromParent(); // The pseudo instruction is gone now.
3258 return SinkMBB;
3259}
3260
3261//===----------------------------------------------------------------------===//
3262// Sparc Inline Assembly Support
3263//===----------------------------------------------------------------------===//
3264
3265/// getConstraintType - Given a constraint letter, return the type of
3266/// constraint it is for this target.
3269 if (Constraint.size() == 1) {
3270 switch (Constraint[0]) {
3271 default: break;
3272 case 'r':
3273 case 'f':
3274 case 'e':
3275 return C_RegisterClass;
3276 case 'I': // SIMM13
3277 return C_Immediate;
3278 }
3279 }
3280
3281 return TargetLowering::getConstraintType(Constraint);
3282}
3283
3286 const char *constraint) const {
3288 Value *CallOperandVal = info.CallOperandVal;
3289 // If we don't have a value, we can't do a match,
3290 // but allow it at the lowest weight.
3291 if (!CallOperandVal)
3292 return CW_Default;
3293
3294 // Look at the constraint type.
3295 switch (*constraint) {
3296 default:
3298 break;
3299 case 'I': // SIMM13
3300 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3301 if (isInt<13>(C->getSExtValue()))
3302 weight = CW_Constant;
3303 }
3304 break;
3305 }
3306 return weight;
3307}
3308
3309/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3310/// vector. If it is invalid, don't add anything to Ops.
3312 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3313 SelectionDAG &DAG) const {
3314 SDValue Result;
3315
3316 // Only support length 1 constraints for now.
3317 if (Constraint.size() > 1)
3318 return;
3319
3320 char ConstraintLetter = Constraint[0];
3321 switch (ConstraintLetter) {
3322 default: break;
3323 case 'I':
3325 if (isInt<13>(C->getSExtValue())) {
3326 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3327 Op.getValueType());
3328 break;
3329 }
3330 return;
3331 }
3332 }
3333
3334 if (Result.getNode()) {
3335 Ops.push_back(Result);
3336 return;
3337 }
3339}
3340
3341std::pair<unsigned, const TargetRegisterClass *>
3343 StringRef Constraint,
3344 MVT VT) const {
3345 if (Constraint.empty())
3346 return std::make_pair(0U, nullptr);
3347
3348 if (Constraint.size() == 1) {
3349 switch (Constraint[0]) {
3350 case 'r':
3351 if (VT == MVT::v2i32)
3352 return std::make_pair(0U, &SP::IntPairRegClass);
3353 else if (Subtarget->is64Bit())
3354 return std::make_pair(0U, &SP::I64RegsRegClass);
3355 else
3356 return std::make_pair(0U, &SP::IntRegsRegClass);
3357 case 'f':
3358 if (VT == MVT::f32 || VT == MVT::i32)
3359 return std::make_pair(0U, &SP::FPRegsRegClass);
3360 else if (VT == MVT::f64 || VT == MVT::i64)
3361 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3362 else if (VT == MVT::f128)
3363 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3364 // This will generate an error message
3365 return std::make_pair(0U, nullptr);
3366 case 'e':
3367 if (VT == MVT::f32 || VT == MVT::i32)
3368 return std::make_pair(0U, &SP::FPRegsRegClass);
3369 else if (VT == MVT::f64 || VT == MVT::i64 )
3370 return std::make_pair(0U, &SP::DFPRegsRegClass);
3371 else if (VT == MVT::f128)
3372 return std::make_pair(0U, &SP::QFPRegsRegClass);
3373 // This will generate an error message
3374 return std::make_pair(0U, nullptr);
3375 }
3376 }
3377
3378 if (Constraint.front() != '{')
3379 return std::make_pair(0U, nullptr);
3380
3381 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3382 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3383 if (RegName.empty())
3384 return std::make_pair(0U, nullptr);
3385
3386 unsigned long long RegNo;
3387 // Handle numbered register aliases.
3388 if (RegName[0] == 'r' &&
3389 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3390 // r0-r7 -> g0-g7
3391 // r8-r15 -> o0-o7
3392 // r16-r23 -> l0-l7
3393 // r24-r31 -> i0-i7
3394 if (RegNo > 31)
3395 return std::make_pair(0U, nullptr);
3396 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3397 char RegType = RegTypes[RegNo / 8];
3398 char RegIndex = '0' + (RegNo % 8);
3399 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3400 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3401 }
3402
3403 // Rewrite the fN constraint according to the value type if needed.
3404 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3405 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3406 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3408 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3409 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3411 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3412 } else {
3413 return std::make_pair(0U, nullptr);
3414 }
3415 }
3416
3417 auto ResultPair =
3419 if (!ResultPair.second)
3420 return std::make_pair(0U, nullptr);
3421
3422 // Force the use of I64Regs over IntRegs for 64-bit values.
3423 if (Subtarget->is64Bit() && VT == MVT::i64) {
3424 assert(ResultPair.second == &SP::IntRegsRegClass &&
3425 "Unexpected register class");
3426 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3427 }
3428
3429 return ResultPair;
3430}
3431
3432bool
3434 // The Sparc target isn't yet aware of offsets.
3435 return false;
3436}
3437
3440 SelectionDAG &DAG) const {
3441
3442 SDLoc dl(N);
3443
3444 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3445
3446 switch (N->getOpcode()) {
3447 default:
3448 llvm_unreachable("Do not know how to custom type legalize this operation!");
3449
3450 case ISD::FP_TO_SINT:
3451 case ISD::FP_TO_UINT:
3452 // Custom lower only if it involves f128 or i64.
3453 if (N->getOperand(0).getValueType() != MVT::f128
3454 || N->getValueType(0) != MVT::i64)
3455 return;
3456 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3457 ? RTLIB::FPTOSINT_F128_I64
3458 : RTLIB::FPTOUINT_F128_I64);
3459
3460 Results.push_back(LowerF128Op(SDValue(N, 0),
3461 DAG,
3462 getLibcallName(libCall),
3463 1));
3464 return;
3465 case ISD::READCYCLECOUNTER: {
3466 assert(Subtarget->hasLeonCycleCounter());
3467 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3468 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3469 SDValue Ops[] = { Lo, Hi };
3470 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3471 Results.push_back(Pair);
3472 Results.push_back(N->getOperand(0));
3473 return;
3474 }
3475 case ISD::SINT_TO_FP:
3476 case ISD::UINT_TO_FP:
3477 // Custom lower only if it involves f128 or i64.
3478 if (N->getValueType(0) != MVT::f128
3479 || N->getOperand(0).getValueType() != MVT::i64)
3480 return;
3481
3482 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3483 ? RTLIB::SINTTOFP_I64_F128
3484 : RTLIB::UINTTOFP_I64_F128);
3485
3486 Results.push_back(LowerF128Op(SDValue(N, 0),
3487 DAG,
3488 getLibcallName(libCall),
3489 1));
3490 return;
3491 case ISD::LOAD: {
3493 // Custom handling only for i64: turn i64 load into a v2i32 load,
3494 // and a bitcast.
3495 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3496 return;
3497
3498 SDLoc dl(N);
3499 SDValue LoadRes = DAG.getExtLoad(
3500 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3501 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3502 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3503
3504 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3505 Results.push_back(Res);
3506 Results.push_back(LoadRes.getValue(1));
3507 return;
3508 }
3509 }
3510}
3511
3512// Override to enable LOAD_STACK_GUARD lowering on Linux.
3514 if (!Subtarget->getTargetTriple().isOSLinux())
3516 return true;
3517}
3518
3520 if (Subtarget->isVIS3())
3521 return VT == MVT::f32 || VT == MVT::f64;
3522 return false;
3523}
3524
3526 bool ForCodeSize) const {
3527 if (VT != MVT::f32 && VT != MVT::f64)
3528 return false;
3529 if (Subtarget->isVIS() && Imm.isZero())
3530 return true;
3531 if (Subtarget->isVIS3())
3532 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3533 Imm.getExactLog2Abs() == -1;
3534 return false;
3535}
3536
3537bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3538
3540 // We lack native cttz, however,
3541 // On 64-bit targets it is cheap to implement it in terms of popc.
3542 if (Subtarget->is64Bit() && Subtarget->usePopc())
3543 return true;
3544 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3545 return isCheapToSpeculateCtlz(Ty);
3546}
3547
3549 EVT VT) const {
3550 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3551}
3552
3554 SDNode *Node) const {
3555 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3556 // If the result is dead, replace it with %g0.
3557 if (!Node->hasAnyUseOfValue(0))
3558 MI.getOperand(0).setReg(SP::G0);
3559}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:55
#define G(x, y, z)
Definition MD5.cpp:56
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:801
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:774
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:765
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:835
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:862
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:826
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:773
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:778
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:228
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:695
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:756
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:563
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:832
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:793
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:870
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:908
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:730
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:552
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:941
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:838
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:815
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:521
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:543
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:311
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})