LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/IRBuilder.h"
37#include "llvm/IR/Module.h"
40using namespace llvm;
41
42
43//===----------------------------------------------------------------------===//
44// Calling Convention Implementation
45//===----------------------------------------------------------------------===//
46
47static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
48 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags, CCState &State)
50{
51 assert (ArgFlags.isSRet());
52
53 // Assign SRet argument.
54 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
55 0,
56 LocVT, LocInfo));
57 return true;
58}
59
60static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
61 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
62 ISD::ArgFlagsTy &ArgFlags, CCState &State)
63{
64 static const MCPhysReg RegList[] = {
65 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
66 };
67 // Try to get first reg.
68 if (Register Reg = State.AllocateReg(RegList)) {
69 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
70 } else {
71 // Assign whole thing in stack.
72 State.addLoc(CCValAssign::getCustomMem(
73 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
74 return true;
75 }
76
77 // Try to get second reg.
78 if (Register Reg = State.AllocateReg(RegList))
79 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
80 else
81 State.addLoc(CCValAssign::getCustomMem(
82 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
83 return true;
84}
85
86static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
87 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
88 ISD::ArgFlagsTy &ArgFlags, CCState &State)
89{
90 static const MCPhysReg RegList[] = {
91 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
92 };
93
94 // Try to get first reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97 else
98 return false;
99
100 // Try to get second reg.
101 if (Register Reg = State.AllocateReg(RegList))
102 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 else
104 return false;
105
106 return true;
107}
108
109// Allocate a full-sized argument for the 64-bit ABI.
110static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
111 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
112 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
113 assert((LocVT == MVT::f32 || LocVT == MVT::f128
114 || LocVT.getSizeInBits() == 64) &&
115 "Can't handle non-64 bits locations");
116
117 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
118 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
119 Align alignment =
120 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
121 unsigned Offset = State.AllocateStack(size, alignment);
122 unsigned Reg = 0;
123
124 if (LocVT == MVT::i64 && Offset < 6*8)
125 // Promote integers to %i0-%i5.
126 Reg = SP::I0 + Offset/8;
127 else if (LocVT == MVT::f64 && Offset < 16*8)
128 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
129 Reg = SP::D0 + Offset/8;
130 else if (LocVT == MVT::f32 && Offset < 16*8)
131 // Promote floats to %f1, %f3, ...
132 Reg = SP::F1 + Offset/4;
133 else if (LocVT == MVT::f128 && Offset < 16*8)
134 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
135 Reg = SP::Q0 + Offset/16;
136
137 // Promote to register when possible, otherwise use the stack slot.
138 if (Reg) {
139 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
140 return true;
141 }
142
143 // Bail out if this is a return CC and we run out of registers to place
144 // values into.
145 if (IsReturn)
146 return false;
147
148 // This argument goes on the stack in an 8-byte slot.
149 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
150 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
151 if (LocVT == MVT::f32)
152 Offset += 4;
153
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return true;
156}
157
158// Allocate a half-sized argument for the 64-bit ABI.
159//
160// This is used when passing { float, int } structs by value in registers.
161static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
162 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
163 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
164 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
165 unsigned Offset = State.AllocateStack(4, Align(4));
166
167 if (LocVT == MVT::f32 && Offset < 16*8) {
168 // Promote floats to %f0-%f31.
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
170 LocVT, LocInfo));
171 return true;
172 }
173
174 if (LocVT == MVT::i32 && Offset < 6*8) {
175 // Promote integers to %i0-%i5, using half the register.
176 unsigned Reg = SP::I0 + Offset/8;
177 LocVT = MVT::i64;
178 LocInfo = CCValAssign::AExt;
179
180 // Set the Custom bit if this i32 goes in the high bits of a register.
181 if (Offset % 8 == 0)
182 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
183 LocVT, LocInfo));
184 else
185 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
186 return true;
187 }
188
189 // Bail out if this is a return CC and we run out of registers to place
190 // values into.
191 if (IsReturn)
192 return false;
193
194 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 return true;
196}
197
198static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202 State);
203}
204
205static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209 State);
210}
211
212static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216 State);
217}
218
219static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
220 CCValAssign::LocInfo &LocInfo,
221 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
222 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
223 State);
224}
225
226#include "SparcGenCallingConv.inc"
227
228// The calling conventions in SparcCallingConv.td are described in terms of the
229// callee's register window. This function translates registers to the
230// corresponding caller window %o register.
231static unsigned toCallerWindow(unsigned Reg) {
232 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
233 "Unexpected enum");
234 if (Reg >= SP::I0 && Reg <= SP::I7)
235 return Reg - SP::I0 + SP::O0;
236 return Reg;
237}
238
240 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
241 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
242 const Type *RetTy) const {
244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
245 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
246 : RetCC_Sparc32);
247}
248
251 bool IsVarArg,
253 const SmallVectorImpl<SDValue> &OutVals,
254 const SDLoc &DL, SelectionDAG &DAG) const {
255 if (Subtarget->is64Bit())
256 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
258}
259
262 bool IsVarArg,
264 const SmallVectorImpl<SDValue> &OutVals,
265 const SDLoc &DL, SelectionDAG &DAG) const {
267
268 // CCValAssign - represent the assignment of the return value to locations.
270
271 // CCState - Info about the registers and stack slot.
272 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
273 *DAG.getContext());
274
275 // Analyze return values.
276 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
277
278 SDValue Glue;
279 SmallVector<SDValue, 4> RetOps(1, Chain);
280 // Make room for the return address offset.
281 RetOps.push_back(SDValue());
282
283 // Copy the result values into the output registers.
284 for (unsigned i = 0, realRVLocIdx = 0;
285 i != RVLocs.size();
286 ++i, ++realRVLocIdx) {
287 CCValAssign &VA = RVLocs[i];
288 assert(VA.isRegLoc() && "Can only return in registers!");
289
290 SDValue Arg = OutVals[realRVLocIdx];
291
292 if (VA.needsCustom()) {
293 assert(VA.getLocVT() == MVT::v2i32);
294 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
295 // happen by default if this wasn't a legal type)
296
297 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
298 Arg,
300 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
301 Arg,
303
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
305 Glue = Chain.getValue(1);
306 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
307 VA = RVLocs[++i]; // skip ahead to next loc
308 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
309 Glue);
310 } else
311 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
312
313 // Guarantee that all emitted copies are stuck together with flags.
314 Glue = Chain.getValue(1);
315 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
316 }
317
318 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
319 // If the function returns a struct, copy the SRetReturnReg to I0
320 if (MF.getFunction().hasStructRetAttr()) {
322 Register Reg = SFI->getSRetReturnReg();
323 if (!Reg)
324 llvm_unreachable("sret virtual register not created in the entry block");
325 auto PtrVT = getPointerTy(DAG.getDataLayout());
326 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
327 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
328 Glue = Chain.getValue(1);
329 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
330 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
331 }
332
333 RetOps[0] = Chain; // Update chain.
334 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
335
336 // Add the glue if we have it.
337 if (Glue.getNode())
338 RetOps.push_back(Glue);
339
340 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
341}
342
343// Lower return values for the 64-bit ABI.
344// Return values are passed the exactly the same way as function arguments.
347 bool IsVarArg,
349 const SmallVectorImpl<SDValue> &OutVals,
350 const SDLoc &DL, SelectionDAG &DAG) const {
351 // CCValAssign - represent the assignment of the return value to locations.
353
354 // CCState - Info about the registers and stack slot.
355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
356 *DAG.getContext());
357
358 // Analyze return values.
359 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
360
361 SDValue Glue;
362 SmallVector<SDValue, 4> RetOps(1, Chain);
363
364 // The second operand on the return instruction is the return address offset.
365 // The return address is always %i7+8 with the 64-bit ABI.
366 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
367
368 // Copy the result values into the output registers.
369 for (unsigned i = 0; i != RVLocs.size(); ++i) {
370 CCValAssign &VA = RVLocs[i];
371 assert(VA.isRegLoc() && "Can only return in registers!");
372 SDValue OutVal = OutVals[i];
373
374 // Integer return values must be sign or zero extended by the callee.
375 switch (VA.getLocInfo()) {
376 case CCValAssign::Full: break;
378 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
379 break;
381 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
382 break;
384 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
385 break;
386 default:
387 llvm_unreachable("Unknown loc info!");
388 }
389
390 // The custom bit on an i32 return value indicates that it should be passed
391 // in the high bits of the register.
392 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
393 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
394 DAG.getConstant(32, DL, MVT::i32));
395
396 // The next value may go in the low bits of the same register.
397 // Handle both at once.
398 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
399 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
400 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
401 // Skip the next value, it's already done.
402 ++i;
403 }
404 }
405
406 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
407
408 // Guarantee that all emitted copies are stuck together with flags.
409 Glue = Chain.getValue(1);
410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
411 }
412
413 RetOps[0] = Chain; // Update chain.
414
415 // Add the flag if we have it.
416 if (Glue.getNode())
417 RetOps.push_back(Glue);
418
419 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
420}
421
423 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
425 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
426 if (Subtarget->is64Bit())
427 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
430 DL, DAG, InVals);
431}
432
433/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
434/// passed in either one or two GPRs, including FP values. TODO: we should
435/// pass FP values in FP registers for fastcc functions.
437 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
438 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
439 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
441 MachineRegisterInfo &RegInfo = MF.getRegInfo();
443
444 // Assign locations to all of the incoming arguments.
446 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
447 *DAG.getContext());
448 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
449
450 const unsigned StackOffset = 92;
451 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
452
453 unsigned InIdx = 0;
454 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
455 CCValAssign &VA = ArgLocs[i];
456
457 if (Ins[InIdx].Flags.isSRet()) {
458 if (InIdx != 0)
459 report_fatal_error("sparc only supports sret on the first parameter");
460 // Get SRet from [%fp+64].
461 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
462 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
463 SDValue Arg =
464 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
465 InVals.push_back(Arg);
466 continue;
467 }
468
469 if (VA.isRegLoc()) {
470 if (VA.needsCustom()) {
471 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
472
473 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
474 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
475 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
476
477 assert(i+1 < e);
478 CCValAssign &NextVA = ArgLocs[++i];
479
480 SDValue LoVal;
481 if (NextVA.isMemLoc()) {
482 int FrameIdx = MF.getFrameInfo().
483 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
484 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
485 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
486 } else {
487 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
488 &SP::IntRegsRegClass);
489 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
490 }
491
492 if (IsLittleEndian)
493 std::swap(LoVal, HiVal);
494
495 SDValue WholeValue =
496 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
497 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
498 InVals.push_back(WholeValue);
499 continue;
500 }
501 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
502 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
503 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
504 if (VA.getLocVT() == MVT::f32)
505 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
506 else if (VA.getLocVT() != MVT::i32) {
507 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
508 DAG.getValueType(VA.getLocVT()));
509 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
510 }
511 InVals.push_back(Arg);
512 continue;
513 }
514
515 assert(VA.isMemLoc());
516
517 unsigned Offset = VA.getLocMemOffset()+StackOffset;
518 auto PtrVT = getPointerTy(DAG.getDataLayout());
519
520 if (VA.needsCustom()) {
521 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
522 // If it is double-word aligned, just load.
523 if (Offset % 8 == 0) {
524 int FI = MF.getFrameInfo().CreateFixedObject(8,
525 Offset,
526 true);
527 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
528 SDValue Load =
529 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
530 InVals.push_back(Load);
531 continue;
532 }
533
534 int FI = MF.getFrameInfo().CreateFixedObject(4,
535 Offset,
536 true);
537 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
538 SDValue HiVal =
539 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
540 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
541 Offset+4,
542 true);
543 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
544
545 SDValue LoVal =
546 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
547
548 if (IsLittleEndian)
549 std::swap(LoVal, HiVal);
550
551 SDValue WholeValue =
552 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
553 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
554 InVals.push_back(WholeValue);
555 continue;
556 }
557
558 int FI = MF.getFrameInfo().CreateFixedObject(4,
559 Offset,
560 true);
561 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
562 SDValue Load ;
563 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
564 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
565 } else if (VA.getValVT() == MVT::f128) {
566 report_fatal_error("SPARCv8 does not handle f128 in calls; "
567 "pass indirectly");
568 } else {
569 // We shouldn't see any other value types here.
570 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
571 }
572 InVals.push_back(Load);
573 }
574
575 if (MF.getFunction().hasStructRetAttr()) {
576 // Copy the SRet Argument to SRetReturnReg.
578 Register Reg = SFI->getSRetReturnReg();
579 if (!Reg) {
580 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
581 SFI->setSRetReturnReg(Reg);
582 }
583 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
584 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
585 }
586
587 // Store remaining ArgRegs to the stack if this is a varargs function.
588 if (isVarArg) {
589 static const MCPhysReg ArgRegs[] = {
590 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
591 };
592 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
593 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
594 unsigned ArgOffset = CCInfo.getStackSize();
595 if (NumAllocated == 6)
596 ArgOffset += StackOffset;
597 else {
598 assert(!ArgOffset);
599 ArgOffset = 68+4*NumAllocated;
600 }
601
602 // Remember the vararg offset for the va_start implementation.
603 FuncInfo->setVarArgsFrameOffset(ArgOffset);
604
605 std::vector<SDValue> OutChains;
606
607 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
608 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
609 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
610 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
611
612 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
613 true);
614 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
615
616 OutChains.push_back(
617 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
618 ArgOffset += 4;
619 }
620
621 if (!OutChains.empty()) {
622 OutChains.push_back(Chain);
623 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
624 }
625 }
626
627 return Chain;
628}
629
630// Lower formal arguments for the 64 bit ABI.
632 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
633 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
634 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
636
637 // Analyze arguments according to CC_Sparc64.
639 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
640 *DAG.getContext());
641 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
642
643 // The argument array begins at %fp+BIAS+128, after the register save area.
644 const unsigned ArgArea = 128;
645
646 for (const CCValAssign &VA : ArgLocs) {
647 if (VA.isRegLoc()) {
648 // This argument is passed in a register.
649 // All integer register arguments are promoted by the caller to i64.
650
651 // Create a virtual register for the promoted live-in value.
652 Register VReg = MF.addLiveIn(VA.getLocReg(),
653 getRegClassFor(VA.getLocVT()));
654 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
655
656 // Get the high bits for i32 struct elements.
657 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
658 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
659 DAG.getConstant(32, DL, MVT::i32));
660
661 // The caller promoted the argument, so insert an Assert?ext SDNode so we
662 // won't promote the value again in this function.
663 switch (VA.getLocInfo()) {
665 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
666 DAG.getValueType(VA.getValVT()));
667 break;
669 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
670 DAG.getValueType(VA.getValVT()));
671 break;
672 default:
673 break;
674 }
675
676 // Truncate the register down to the argument type.
677 if (VA.isExtInLoc())
678 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
679
680 InVals.push_back(Arg);
681 continue;
682 }
683
684 // The registers are exhausted. This argument was passed on the stack.
685 assert(VA.isMemLoc());
686 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
687 // beginning of the arguments area at %fp+BIAS+128.
688 unsigned Offset = VA.getLocMemOffset() + ArgArea;
689 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
690 // Adjust offset for extended arguments, SPARC is big-endian.
691 // The caller will have written the full slot with extended bytes, but we
692 // prefer our own extending loads.
693 if (VA.isExtInLoc())
694 Offset += 8 - ValSize;
695 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
696 InVals.push_back(
697 DAG.getLoad(VA.getValVT(), DL, Chain,
700 }
701
702 if (!IsVarArg)
703 return Chain;
704
705 // This function takes variable arguments, some of which may have been passed
706 // in registers %i0-%i5. Variable floating point arguments are never passed
707 // in floating point registers. They go on %i0-%i5 or on the stack like
708 // integer arguments.
709 //
710 // The va_start intrinsic needs to know the offset to the first variable
711 // argument.
712 unsigned ArgOffset = CCInfo.getStackSize();
714 // Skip the 128 bytes of register save area.
715 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
716 Subtarget->getStackPointerBias());
717
718 // Save the variable arguments that were passed in registers.
719 // The caller is required to reserve stack space for 6 arguments regardless
720 // of how many arguments were actually passed.
721 SmallVector<SDValue, 8> OutChains;
722 for (; ArgOffset < 6*8; ArgOffset += 8) {
723 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
724 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
725 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
726 auto PtrVT = getPointerTy(MF.getDataLayout());
727 OutChains.push_back(
728 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
730 }
731
732 if (!OutChains.empty())
733 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
734
735 return Chain;
736}
737
738// Check whether any of the argument registers are reserved
740 const MachineFunction &MF) {
741 // The register window design means that outgoing parameters at O*
742 // will appear in the callee as I*.
743 // Be conservative and check both sides of the register names.
744 bool Outgoing =
745 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
746 return TRI->isReservedReg(MF, r);
747 });
748 bool Incoming =
749 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
750 return TRI->isReservedReg(MF, r);
751 });
752 return Outgoing || Incoming;
753}
754
756 const Function &F = MF.getFunction();
757 F.getContext().diagnose(DiagnosticInfoUnsupported{
758 F, ("SPARC doesn't support"
759 " function calls if any of the argument registers is reserved.")});
760}
761
764 SmallVectorImpl<SDValue> &InVals) const {
765 if (Subtarget->is64Bit())
766 return LowerCall_64(CLI, InVals);
767 return LowerCall_32(CLI, InVals);
768}
769
770static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
771 const CallBase *Call) {
772 if (Call)
773 return Call->hasFnAttr(Attribute::ReturnsTwice);
774
775 const Function *CalleeFn = nullptr;
777 CalleeFn = dyn_cast<Function>(G->getGlobal());
778 } else if (ExternalSymbolSDNode *E =
780 const Function &Fn = DAG.getMachineFunction().getFunction();
781 const Module *M = Fn.getParent();
782 const char *CalleeName = E->getSymbol();
783 CalleeFn = M->getFunction(CalleeName);
784 }
785
786 if (!CalleeFn)
787 return false;
788 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
789}
790
791/// IsEligibleForTailCallOptimization - Check whether the call is eligible
792/// for tail call optimization.
794 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
795
796 auto &Outs = CLI.Outs;
797 auto &Caller = MF.getFunction();
798
799 // Do not tail call opt functions with "disable-tail-calls" attribute.
800 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
801 return false;
802
803 // Do not tail call opt if the stack is used to pass parameters.
804 // 64-bit targets have a slightly higher limit since the ABI requires
805 // to allocate some space even when all the parameters fit inside registers.
806 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
807 if (CCInfo.getStackSize() > StackSizeLimit)
808 return false;
809
810 // Do not tail call opt if either the callee or caller returns
811 // a struct and the other does not.
812 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
813 return false;
814
815 // Byval parameters hand the function a pointer directly into the stack area
816 // we want to reuse during a tail call.
817 for (auto &Arg : Outs)
818 if (Arg.Flags.isByVal())
819 return false;
820
821 return true;
822}
823
824// Lower a call for the 32-bit ABI.
827 SmallVectorImpl<SDValue> &InVals) const {
828 SelectionDAG &DAG = CLI.DAG;
829 SDLoc &dl = CLI.DL;
831 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
833 SDValue Chain = CLI.Chain;
834 SDValue Callee = CLI.Callee;
835 bool &isTailCall = CLI.IsTailCall;
836 CallingConv::ID CallConv = CLI.CallConv;
837 bool isVarArg = CLI.IsVarArg;
839
840 // Analyze operands of the call, assigning locations to each operand.
842 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
843 *DAG.getContext());
844 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
845
846 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
847 CCInfo, CLI, DAG.getMachineFunction());
848
849 // Get the size of the outgoing arguments stack space requirement.
850 unsigned ArgsSize = CCInfo.getStackSize();
851
852 // Keep stack frames 8-byte aligned.
853 ArgsSize = (ArgsSize+7) & ~7;
854
856
857 // Create local copies for byval args.
858 SmallVector<SDValue, 8> ByValArgs;
859 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
860 ISD::ArgFlagsTy Flags = Outs[i].Flags;
861 if (!Flags.isByVal())
862 continue;
863
864 SDValue Arg = OutVals[i];
865 unsigned Size = Flags.getByValSize();
866 Align Alignment = Flags.getNonZeroByValAlign();
867
868 if (Size > 0U) {
869 int FI = MFI.CreateStackObject(Size, Alignment, false);
870 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
871 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
872
873 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
874 false, // isVolatile,
875 (Size <= 32), // AlwaysInline if size <= 32,
876 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
878 ByValArgs.push_back(FIPtr);
879 }
880 else {
881 SDValue nullVal;
882 ByValArgs.push_back(nullVal);
883 }
884 }
885
886 assert(!isTailCall || ArgsSize == 0);
887
888 if (!isTailCall)
889 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
890
892 SmallVector<SDValue, 8> MemOpChains;
893
894 const unsigned StackOffset = 92;
895 bool hasStructRetAttr = false;
896 unsigned SRetArgSize = 0;
897 // Walk the register/memloc assignments, inserting copies/loads.
898 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
899 i != e;
900 ++i, ++realArgIdx) {
901 CCValAssign &VA = ArgLocs[i];
902 SDValue Arg = OutVals[realArgIdx];
903
904 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
905
906 // Use local copy if it is a byval arg.
907 if (Flags.isByVal()) {
908 Arg = ByValArgs[byvalArgIdx++];
909 if (!Arg) {
910 continue;
911 }
912 }
913
914 // Promote the value if needed.
915 switch (VA.getLocInfo()) {
916 default: llvm_unreachable("Unknown loc info!");
917 case CCValAssign::Full: break;
919 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
920 break;
922 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
923 break;
925 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
926 break;
928 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
929 break;
930 }
931
932 if (Flags.isSRet()) {
933 assert(VA.needsCustom());
934
935 if (isTailCall)
936 continue;
937
938 // store SRet argument in %sp+64
939 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
940 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
941 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
942 MemOpChains.push_back(
943 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
944 hasStructRetAttr = true;
945 // sret only allowed on first argument
946 assert(Outs[realArgIdx].OrigArgIndex == 0);
947 SRetArgSize =
948 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
949 continue;
950 }
951
952 if (VA.needsCustom()) {
953 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
954
955 if (VA.isMemLoc()) {
956 unsigned Offset = VA.getLocMemOffset() + StackOffset;
957 // if it is double-word aligned, just store.
958 if (Offset % 8 == 0) {
959 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
960 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
961 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
962 MemOpChains.push_back(
963 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
964 continue;
965 }
966 }
967
968 if (VA.getLocVT() == MVT::f64) {
969 // Move from the float value from float registers into the
970 // integer registers.
972 Arg = bitcastConstantFPToInt(C, dl, DAG);
973 else
974 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
975 }
976
977 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
978 Arg,
979 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
980 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
981 Arg,
982 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
983
984 if (VA.isRegLoc()) {
985 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
986 assert(i+1 != e);
987 CCValAssign &NextVA = ArgLocs[++i];
988 if (NextVA.isRegLoc()) {
989 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
990 } else {
991 // Store the second part in stack.
992 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
993 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
994 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
995 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
996 MemOpChains.push_back(
997 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
998 }
999 } else {
1000 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1001 // Store the first part.
1002 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1003 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1004 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1005 MemOpChains.push_back(
1006 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1007 // Store the second part.
1008 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1009 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1010 MemOpChains.push_back(
1011 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1012 }
1013 continue;
1014 }
1015
1016 // Arguments that can be passed on register must be kept at
1017 // RegsToPass vector
1018 if (VA.isRegLoc()) {
1019 if (VA.getLocVT() != MVT::f32) {
1020 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1021 continue;
1022 }
1023 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1024 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1025 continue;
1026 }
1027
1028 assert(VA.isMemLoc());
1029
1030 // Create a store off the stack pointer for this argument.
1031 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1033 dl);
1034 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1035 MemOpChains.push_back(
1036 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1037 }
1038
1039
1040 // Emit all stores, make sure the occur before any copies into physregs.
1041 if (!MemOpChains.empty())
1042 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1043
1044 // Build a sequence of copy-to-reg nodes chained together with token
1045 // chain and flag operands which copy the outgoing args into registers.
1046 // The InGlue in necessary since all emitted instructions must be
1047 // stuck together.
1048 SDValue InGlue;
1049 for (const auto &[OrigReg, N] : RegsToPass) {
1050 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1051 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1052 InGlue = Chain.getValue(1);
1053 }
1054
1055 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1056
1057 // If the callee is a GlobalAddress node (quite common, every direct call is)
1058 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1059 // Likewise ExternalSymbol -> TargetExternalSymbol.
1061 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1063 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1064
1065 // Returns a chain & a flag for retval copy to use
1066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1068 Ops.push_back(Chain);
1069 Ops.push_back(Callee);
1070 if (hasStructRetAttr)
1071 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1072 for (const auto &[OrigReg, N] : RegsToPass) {
1073 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1074 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1075 }
1076
1077 // Add a register mask operand representing the call-preserved registers.
1078 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1079 const uint32_t *Mask =
1080 ((hasReturnsTwice)
1081 ? TRI->getRTCallPreservedMask(CallConv)
1082 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1083
1084 if (isAnyArgRegReserved(TRI, MF))
1086
1087 assert(Mask && "Missing call preserved mask for calling convention");
1088 Ops.push_back(DAG.getRegisterMask(Mask));
1089
1090 if (InGlue.getNode())
1091 Ops.push_back(InGlue);
1092
1093 if (isTailCall) {
1095 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1096 }
1097
1098 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1099 InGlue = Chain.getValue(1);
1100
1101 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1102 InGlue = Chain.getValue(1);
1103
1104 // Assign locations to each value returned by this call.
1106 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1107 *DAG.getContext());
1108
1109 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1110
1111 // Copy all of the result registers out of their specified physreg.
1112 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1113 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1114 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1115 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1117 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1118 Chain = Lo.getValue(1);
1119 InGlue = Lo.getValue(2);
1120 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1121 DAG.getConstant(0, dl, MVT::i32));
1123 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1124 Chain = Hi.getValue(1);
1125 InGlue = Hi.getValue(2);
1126 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1127 DAG.getConstant(1, dl, MVT::i32));
1128 InVals.push_back(Vec);
1129 } else {
1130 Chain =
1131 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1132 RVLocs[i].getValVT(), InGlue)
1133 .getValue(1);
1134 InGlue = Chain.getValue(2);
1135 InVals.push_back(Chain.getValue(0));
1136 }
1137 }
1138
1139 return Chain;
1140}
1141
1142// FIXME? Maybe this could be a TableGen attribute on some registers and
1143// this table could be generated automatically from RegInfo.
1145 const MachineFunction &MF) const {
1147 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1148 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1149 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1150 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1151 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1152 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1153 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1154 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1155 .Default(0);
1156
1157 // If we're directly referencing register names
1158 // (e.g in GCC C extension `register int r asm("g1");`),
1159 // make sure that said register is in the reserve list.
1160 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1161 if (!TRI->isReservedReg(MF, Reg))
1162 Reg = Register();
1163
1164 return Reg;
1165}
1166
1167// Fixup floating point arguments in the ... part of a varargs call.
1168//
1169// The SPARC v9 ABI requires that floating point arguments are treated the same
1170// as integers when calling a varargs function. This does not apply to the
1171// fixed arguments that are part of the function's prototype.
1172//
1173// This function post-processes a CCValAssign array created by
1174// AnalyzeCallOperands().
1177 for (CCValAssign &VA : ArgLocs) {
1178 MVT ValTy = VA.getLocVT();
1179 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1180 // varargs functions.
1181 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1182 continue;
1183 // The fixed arguments to a varargs function still go in FP registers.
1184 if (!Outs[VA.getValNo()].Flags.isVarArg())
1185 continue;
1186
1187 // This floating point argument should be reassigned.
1188 // Determine the offset into the argument array.
1189 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1190 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1191 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1192 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1193
1194 if (Offset < 6*8) {
1195 // This argument should go in %i0-%i5.
1196 unsigned IReg = SP::I0 + Offset/8;
1197 if (ValTy == MVT::f64)
1198 // Full register, just bitconvert into i64.
1199 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1201 else {
1202 assert(ValTy == MVT::f128 && "Unexpected type!");
1203 // Full register, just bitconvert into i128 -- We will lower this into
1204 // two i64s in LowerCall_64.
1205 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1206 MVT::i128, CCValAssign::BCvt);
1207 }
1208 } else {
1209 // This needs to go to memory, we're out of integer registers.
1210 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1211 VA.getLocVT(), VA.getLocInfo());
1212 }
1213 }
1214}
1215
1216// Lower a call for the 64-bit ABI.
1217SDValue
1219 SmallVectorImpl<SDValue> &InVals) const {
1220 SelectionDAG &DAG = CLI.DAG;
1221 SDLoc DL = CLI.DL;
1222 SDValue Chain = CLI.Chain;
1223 auto PtrVT = getPointerTy(DAG.getDataLayout());
1225
1226 // Analyze operands of the call, assigning locations to each operand.
1228 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1229 *DAG.getContext());
1230 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1231
1233 CCInfo, CLI, DAG.getMachineFunction());
1234
1235 // Get the size of the outgoing arguments stack space requirement.
1236 // The stack offset computed by CC_Sparc64 includes all arguments.
1237 // Called functions expect 6 argument words to exist in the stack frame, used
1238 // or not.
1239 unsigned StackReserved = 6 * 8u;
1240 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1241
1242 // Keep stack frames 16-byte aligned.
1243 ArgsSize = alignTo(ArgsSize, 16);
1244
1245 // Varargs calls require special treatment.
1246 if (CLI.IsVarArg)
1247 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1248
1249 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1250
1251 // Adjust the stack pointer to make room for the arguments.
1252 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1253 // with more than 6 arguments.
1254 if (!CLI.IsTailCall)
1255 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1256
1257 // Collect the set of registers to pass to the function and their values.
1258 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1259 // instruction.
1261
1262 // Collect chains from all the memory opeations that copy arguments to the
1263 // stack. They must follow the stack pointer adjustment above and precede the
1264 // call instruction itself.
1265 SmallVector<SDValue, 8> MemOpChains;
1266
1267 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1268 const CCValAssign &VA = ArgLocs[i];
1269 SDValue Arg = CLI.OutVals[i];
1270
1271 // Promote the value if needed.
1272 switch (VA.getLocInfo()) {
1273 default:
1274 llvm_unreachable("Unknown location info!");
1275 case CCValAssign::Full:
1276 break;
1277 case CCValAssign::SExt:
1278 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1279 break;
1280 case CCValAssign::ZExt:
1281 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1282 break;
1283 case CCValAssign::AExt:
1284 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1285 break;
1286 case CCValAssign::BCvt:
1287 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1288 // SPARC does not support i128 natively. Lower it into two i64, see below.
1289 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1290 || VA.getLocVT() != MVT::i128)
1291 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1292 break;
1293 }
1294
1295 if (VA.isRegLoc()) {
1296 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1297 && VA.getLocVT() == MVT::i128) {
1298 // Store and reload into the integer register reg and reg+1.
1299 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1300 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1301 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1302 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1303 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1304 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1305 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1306
1307 // Store to %sp+BIAS+128+Offset
1308 SDValue Store =
1309 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1310 // Load into Reg and Reg+1
1311 SDValue Hi64 =
1312 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1313 SDValue Lo64 =
1314 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1315
1316 Register HiReg = VA.getLocReg();
1317 Register LoReg = VA.getLocReg() + 1;
1318 if (!CLI.IsTailCall) {
1319 HiReg = toCallerWindow(HiReg);
1320 LoReg = toCallerWindow(LoReg);
1321 }
1322
1323 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1324 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1325 continue;
1326 }
1327
1328 // The custom bit on an i32 return value indicates that it should be
1329 // passed in the high bits of the register.
1330 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1331 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1332 DAG.getConstant(32, DL, MVT::i32));
1333
1334 // The next value may go in the low bits of the same register.
1335 // Handle both at once.
1336 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1337 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1338 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1339 CLI.OutVals[i+1]);
1340 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1341 // Skip the next value, it's already done.
1342 ++i;
1343 }
1344 }
1345
1346 Register Reg = VA.getLocReg();
1347 if (!CLI.IsTailCall)
1348 Reg = toCallerWindow(Reg);
1349 RegsToPass.push_back(std::make_pair(Reg, Arg));
1350 continue;
1351 }
1352
1353 assert(VA.isMemLoc());
1354
1355 // Create a store off the stack pointer for this argument.
1356 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1357 // The argument area starts at %fp+BIAS+128 in the callee frame,
1358 // %sp+BIAS+128 in ours.
1359 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1360 Subtarget->getStackPointerBias() +
1361 128, DL);
1362 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1363 MemOpChains.push_back(
1364 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1365 }
1366
1367 // Emit all stores, make sure they occur before the call.
1368 if (!MemOpChains.empty())
1369 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1370
1371 // Build a sequence of CopyToReg nodes glued together with token chain and
1372 // glue operands which copy the outgoing args into registers. The InGlue is
1373 // necessary since all emitted instructions must be stuck together in order
1374 // to pass the live physical registers.
1375 SDValue InGlue;
1376 for (const auto &[Reg, N] : RegsToPass) {
1377 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1378 InGlue = Chain.getValue(1);
1379 }
1380
1381 // If the callee is a GlobalAddress node (quite common, every direct call is)
1382 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1383 // Likewise ExternalSymbol -> TargetExternalSymbol.
1384 SDValue Callee = CLI.Callee;
1385 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1387 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1389 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1390
1391 // Build the operands for the call instruction itself.
1393 Ops.push_back(Chain);
1394 Ops.push_back(Callee);
1395 for (const auto &[Reg, N] : RegsToPass)
1396 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1397
1398 // Add a register mask operand representing the call-preserved registers.
1399 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1400 const uint32_t *Mask =
1401 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1402 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1403 CLI.CallConv));
1404
1405 if (isAnyArgRegReserved(TRI, MF))
1407
1408 assert(Mask && "Missing call preserved mask for calling convention");
1409 Ops.push_back(DAG.getRegisterMask(Mask));
1410
1411 // Make sure the CopyToReg nodes are glued to the call instruction which
1412 // consumes the registers.
1413 if (InGlue.getNode())
1414 Ops.push_back(InGlue);
1415
1416 // Now the call itself.
1417 if (CLI.IsTailCall) {
1419 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1420 }
1421 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1422 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1423 InGlue = Chain.getValue(1);
1424
1425 // Revert the stack pointer immediately after the call.
1426 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1427 InGlue = Chain.getValue(1);
1428
1429 // Now extract the return values. This is more or less the same as
1430 // LowerFormalArguments_64.
1431
1432 // Assign locations to each value returned by this call.
1434 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1435 *DAG.getContext());
1436
1437 // Set inreg flag manually for codegen generated library calls that
1438 // return float.
1439 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1440 CLI.Ins[0].Flags.setInReg();
1441
1442 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1443
1444 // Copy all of the result registers out of their specified physreg.
1445 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1446 CCValAssign &VA = RVLocs[i];
1447 assert(VA.isRegLoc() && "Can only return in registers!");
1448 unsigned Reg = toCallerWindow(VA.getLocReg());
1449
1450 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1451 // reside in the same register in the high and low bits. Reuse the
1452 // CopyFromReg previous node to avoid duplicate copies.
1453 SDValue RV;
1454 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1455 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1456 RV = Chain.getValue(0);
1457
1458 // But usually we'll create a new CopyFromReg for a different register.
1459 if (!RV.getNode()) {
1460 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1461 Chain = RV.getValue(1);
1462 InGlue = Chain.getValue(2);
1463 }
1464
1465 // Get the high bits for i32 struct elements.
1466 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1467 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1468 DAG.getConstant(32, DL, MVT::i32));
1469
1470 // The callee promoted the return value, so insert an Assert?ext SDNode so
1471 // we won't promote the value again in this function.
1472 switch (VA.getLocInfo()) {
1473 case CCValAssign::SExt:
1474 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1475 DAG.getValueType(VA.getValVT()));
1476 break;
1477 case CCValAssign::ZExt:
1478 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1479 DAG.getValueType(VA.getValVT()));
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 // Truncate the register down to the return value type.
1486 if (VA.isExtInLoc())
1487 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1488
1489 InVals.push_back(RV);
1490 }
1491
1492 return Chain;
1493}
1494
1495//===----------------------------------------------------------------------===//
1496// TargetLowering Implementation
1497//===----------------------------------------------------------------------===//
1498
1506
1507/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1508/// rcond condition.
1510 switch (CC) {
1511 default:
1512 llvm_unreachable("Unknown/unsigned integer condition code!");
1513 case ISD::SETEQ:
1514 return SPCC::REG_Z;
1515 case ISD::SETNE:
1516 return SPCC::REG_NZ;
1517 case ISD::SETLT:
1518 return SPCC::REG_LZ;
1519 case ISD::SETGT:
1520 return SPCC::REG_GZ;
1521 case ISD::SETLE:
1522 return SPCC::REG_LEZ;
1523 case ISD::SETGE:
1524 return SPCC::REG_GEZ;
1525 }
1526}
1527
1528/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1529/// condition.
1531 switch (CC) {
1532 default: llvm_unreachable("Unknown integer condition code!");
1533 case ISD::SETEQ: return SPCC::ICC_E;
1534 case ISD::SETNE: return SPCC::ICC_NE;
1535 case ISD::SETLT: return SPCC::ICC_L;
1536 case ISD::SETGT: return SPCC::ICC_G;
1537 case ISD::SETLE: return SPCC::ICC_LE;
1538 case ISD::SETGE: return SPCC::ICC_GE;
1539 case ISD::SETULT: return SPCC::ICC_CS;
1540 case ISD::SETULE: return SPCC::ICC_LEU;
1541 case ISD::SETUGT: return SPCC::ICC_GU;
1542 case ISD::SETUGE: return SPCC::ICC_CC;
1543 }
1544}
1545
1546/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1547/// FCC condition.
1549 switch (CC) {
1550 default: llvm_unreachable("Unknown fp condition code!");
1551 case ISD::SETEQ:
1552 case ISD::SETOEQ: return SPCC::FCC_E;
1553 case ISD::SETNE:
1554 case ISD::SETUNE: return SPCC::FCC_NE;
1555 case ISD::SETLT:
1556 case ISD::SETOLT: return SPCC::FCC_L;
1557 case ISD::SETGT:
1558 case ISD::SETOGT: return SPCC::FCC_G;
1559 case ISD::SETLE:
1560 case ISD::SETOLE: return SPCC::FCC_LE;
1561 case ISD::SETGE:
1562 case ISD::SETOGE: return SPCC::FCC_GE;
1563 case ISD::SETULT: return SPCC::FCC_UL;
1564 case ISD::SETULE: return SPCC::FCC_ULE;
1565 case ISD::SETUGT: return SPCC::FCC_UG;
1566 case ISD::SETUGE: return SPCC::FCC_UGE;
1567 case ISD::SETUO: return SPCC::FCC_U;
1568 case ISD::SETO: return SPCC::FCC_O;
1569 case ISD::SETONE: return SPCC::FCC_LG;
1570 case ISD::SETUEQ: return SPCC::FCC_UE;
1571 }
1572}
1573
1575 const SparcSubtarget &STI)
1576 : TargetLowering(TM), Subtarget(&STI) {
1577 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1578
1579 // Instructions which use registers as conditionals examine all the
1580 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1581 // matters much whether it's ZeroOrOneBooleanContent, or
1582 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1583 // former.
1586
1587 // Set up the register classes.
1588 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1589 if (!Subtarget->useSoftFloat()) {
1590 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1591 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1592 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1593 }
1594 if (Subtarget->is64Bit()) {
1595 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1596 } else {
1597 // On 32bit sparc, we define a double-register 32bit register
1598 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1599 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1600
1601 // ...but almost all operations must be expanded, so set that as
1602 // the default.
1603 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1604 setOperationAction(Op, MVT::v2i32, Expand);
1605 }
1606 // Truncating/extending stores/loads are also not supported.
1608 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1609 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1610 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1611
1612 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1613 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1614 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1615
1616 setTruncStoreAction(VT, MVT::v2i32, Expand);
1617 setTruncStoreAction(MVT::v2i32, VT, Expand);
1618 }
1619 // However, load and store *are* legal.
1620 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1621 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1624
1625 // And we need to promote i64 loads/stores into vector load/store
1626 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1627 setOperationAction(ISD::STORE, MVT::i64, Custom);
1628
1629 // Sadly, this doesn't work:
1630 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1631 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1632 }
1633
1634 // Turn FP extload into load/fpextend
1635 for (MVT VT : MVT::fp_valuetypes()) {
1636 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1637 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1638 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1639 }
1640
1641 // Sparc doesn't have i1 sign extending load
1642 for (MVT VT : MVT::integer_valuetypes())
1643 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1644
1645 // Turn FP truncstore into trunc + store.
1646 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1647 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1648 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1649 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1650 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1651 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1652
1653 // Custom legalize GlobalAddress nodes into LO/HI parts.
1658
1659 // Sparc doesn't have sext_inreg, replace them with shl/sra
1663
1664 // Sparc has no REM or DIVREM operations.
1669
1670 // ... nor does SparcV9.
1671 if (Subtarget->is64Bit()) {
1676 }
1677
1678 // Custom expand fp<->sint
1683
1684 // Custom Expand fp<->uint
1689
1690 // Lower f16 conversion operations into library calls
1691 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1692 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1693 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1694 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1695 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1696 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1697
1698 setOperationAction(ISD::BITCAST, MVT::f32,
1699 Subtarget->isVIS3() ? Legal : Expand);
1700 setOperationAction(ISD::BITCAST, MVT::i32,
1701 Subtarget->isVIS3() ? Legal : Expand);
1702
1703 // Sparc has no select or setcc: expand to SELECT_CC.
1708
1713
1714 // Sparc doesn't have BRCOND either, it has BR_CC.
1715 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1716 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1717 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1718 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1719 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1720 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1721 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1722
1727
1732
1733 if (Subtarget->isVIS3()) {
1736 }
1737
1738 if (Subtarget->is64Bit()) {
1739 setOperationAction(ISD::BITCAST, MVT::f64,
1740 Subtarget->isVIS3() ? Legal : Expand);
1741 setOperationAction(ISD::BITCAST, MVT::i64,
1742 Subtarget->isVIS3() ? Legal : Expand);
1745 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1747
1749 Subtarget->usePopc() ? Legal : Expand);
1751 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1752 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1753 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1754 }
1755
1756 // ATOMICs.
1757 // Atomics are supported on SparcV9. 32-bit atomics are also
1758 // supported by some Leon SparcV8 variants. Otherwise, atomics
1759 // are unsupported.
1760 if (Subtarget->isV9()) {
1761 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1762 // but it hasn't been implemented in the backend yet.
1763 if (Subtarget->is64Bit())
1765 else
1767 } else if (Subtarget->hasLeonCasa())
1769 else
1771
1773
1774 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1775
1776 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1777
1778 // Custom Lower Atomic LOAD/STORE
1779 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1780 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1781
1782 if (Subtarget->is64Bit()) {
1783 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1784 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1785 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1786 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1787 }
1788
1789 if (!Subtarget->isV9()) {
1790 // SparcV8 does not have FNEGD and FABSD.
1791 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1792 setOperationAction(ISD::FABS, MVT::f64, Custom);
1793 }
1794
1795 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1796 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1797 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1798 setOperationAction(ISD::FREM , MVT::f128, Expand);
1799 setOperationAction(ISD::FMA , MVT::f128, Expand);
1800 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1801 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1802 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1803 setOperationAction(ISD::FREM , MVT::f64, Expand);
1804 setOperationAction(ISD::FMA, MVT::f64,
1805 Subtarget->isUA2007() ? Legal : Expand);
1806 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1807 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1808 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1809 setOperationAction(ISD::FREM , MVT::f32, Expand);
1810 setOperationAction(ISD::FMA, MVT::f32,
1811 Subtarget->isUA2007() ? Legal : Expand);
1812 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1813 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1818 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1819 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1820 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1821
1825
1826 // Expands to [SU]MUL_LOHI.
1830
1831 if (Subtarget->useSoftMulDiv()) {
1832 // .umul works for both signed and unsigned
1837 }
1838
1839 if (Subtarget->is64Bit()) {
1843 Subtarget->isVIS3() ? Legal : Expand);
1845 Subtarget->isVIS3() ? Legal : Expand);
1846
1850 }
1851
1852 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1853 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1854 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1855 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1856
1857 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1858 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1859
1860 // Use the default implementation.
1861 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1862 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1863 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1864 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1865 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1866
1868
1870 Subtarget->usePopc() ? Legal : Expand);
1871
1872 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1873 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1874 setOperationAction(ISD::STORE, MVT::f128, Legal);
1875 } else {
1876 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1877 setOperationAction(ISD::STORE, MVT::f128, Custom);
1878 }
1879
1880 if (Subtarget->hasHardQuad()) {
1881 setOperationAction(ISD::FADD, MVT::f128, Legal);
1882 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1883 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1884 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1885 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1886 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1888 if (Subtarget->isV9()) {
1889 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1890 setOperationAction(ISD::FABS, MVT::f128, Legal);
1891 } else {
1892 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1893 setOperationAction(ISD::FABS, MVT::f128, Custom);
1894 }
1895 } else {
1896 // Custom legalize f128 operations.
1897
1898 setOperationAction(ISD::FADD, MVT::f128, Custom);
1899 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1900 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1901 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1902 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1903 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1904 setOperationAction(ISD::FABS, MVT::f128, Custom);
1905
1906 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1909
1910 // Setup Runtime library names.
1911 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1912 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Qp_add);
1913 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Qp_sub);
1914 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Qp_mul);
1915 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Qp_div);
1916 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Qp_sqrt);
1917 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Qp_qtoi);
1918 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Qp_qtoui);
1919 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Qp_itoq);
1920 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Qp_uitoq);
1921 setLibcallImpl(RTLIB::FPTOSINT_F128_I64, RTLIB::impl__Qp_qtox);
1922 setLibcallImpl(RTLIB::FPTOUINT_F128_I64, RTLIB::impl__Qp_qtoux);
1923 setLibcallImpl(RTLIB::SINTTOFP_I64_F128, RTLIB::impl__Qp_xtoq);
1924 setLibcallImpl(RTLIB::UINTTOFP_I64_F128, RTLIB::impl__Qp_uxtoq);
1925 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Qp_stoq);
1926 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Qp_dtoq);
1927 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Qp_qtos);
1928 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Qp_qtod);
1929 } else if (!Subtarget->useSoftFloat()) {
1930 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Q_add);
1931 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Q_sub);
1932 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Q_mul);
1933 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Q_div);
1934 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Q_sqrt);
1935 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Q_qtoi);
1936 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Q_qtou);
1937 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Q_itoq);
1938 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Q_utoq);
1939 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Q_stoq);
1940 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Q_dtoq);
1941 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Q_qtos);
1942 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Q_qtod);
1943 }
1944 }
1945
1946 if (Subtarget->fixAllFDIVSQRT()) {
1947 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1948 // the former instructions generate errata on LEON processors.
1950 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1951 }
1952
1953 if (Subtarget->hasNoFMULS()) {
1955 }
1956
1957 // Custom combine bitcast between f64 and v2i32
1958 if (!Subtarget->is64Bit())
1959 setTargetDAGCombine(ISD::BITCAST);
1960
1961 if (Subtarget->hasLeonCycleCounter())
1962 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1963
1964 if (Subtarget->isVIS3()) {
1969
1970 setOperationAction(ISD::CTTZ, MVT::i32,
1971 Subtarget->is64Bit() ? Promote : Expand);
1974 Subtarget->is64Bit() ? Promote : Expand);
1976 } else if (Subtarget->usePopc()) {
1981
1986 } else {
1990 Subtarget->is64Bit() ? Promote : LibCall);
1992
1993 // FIXME here we don't have any ISA extensions that could help us, so to
1994 // prevent large expansions those should be made into LibCalls.
1999 }
2000
2002
2004
2005 computeRegisterProperties(Subtarget->getRegisterInfo());
2006}
2007
2009 return Subtarget->useSoftFloat();
2010}
2011
2013 EVT VT) const {
2014 if (!VT.isVector())
2015 return MVT::i32;
2017}
2018
2019/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2020/// be zero. Op is expected to be a target specific node. Used by DAG
2021/// combiner.
2023 (const SDValue Op,
2024 KnownBits &Known,
2025 const APInt &DemandedElts,
2026 const SelectionDAG &DAG,
2027 unsigned Depth) const {
2028 KnownBits Known2;
2029 Known.resetAll();
2030
2031 switch (Op.getOpcode()) {
2032 default: break;
2033 case SPISD::SELECT_ICC:
2034 case SPISD::SELECT_XCC:
2035 case SPISD::SELECT_FCC:
2036 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2037 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2038
2039 // Only known if known in both the LHS and RHS.
2040 Known = Known.intersectWith(Known2);
2041 break;
2042 }
2043}
2044
2045// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2046// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2048 ISD::CondCode CC, unsigned &SPCC) {
2049 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2050 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2051 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2052 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2053 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2054 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2055 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2056 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2057 SDValue CMPCC = LHS.getOperand(3);
2058 SPCC = LHS.getConstantOperandVal(2);
2059 LHS = CMPCC.getOperand(0);
2060 RHS = CMPCC.getOperand(1);
2061 }
2062}
2063
2064// Convert to a target node and set target flags.
2066 SelectionDAG &DAG) const {
2068 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2069 SDLoc(GA),
2070 GA->getValueType(0),
2071 GA->getOffset(), TF);
2072
2074 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2075 CP->getAlign(), CP->getOffset(), TF);
2076
2078 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2079 Op.getValueType(),
2080 0,
2081 TF);
2082
2084 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2085 ES->getValueType(0), TF);
2086
2087 llvm_unreachable("Unhandled address SDNode");
2088}
2089
2090// Split Op into high and low parts according to HiTF and LoTF.
2091// Return an ADD node combining the parts.
2093 unsigned HiTF, unsigned LoTF,
2094 SelectionDAG &DAG) const {
2095 SDLoc DL(Op);
2096 EVT VT = Op.getValueType();
2097 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2098 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2099 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2100}
2101
2102// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2103// or ExternalSymbol SDNode.
2105 SDLoc DL(Op);
2106 EVT VT = getPointerTy(DAG.getDataLayout());
2107
2108 // Handle PIC mode first. SPARC needs a got load for every variable!
2109 if (isPositionIndependent()) {
2110 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2111 PICLevel::Level picLevel = M->getPICLevel();
2112 SDValue Idx;
2113
2114 if (picLevel == PICLevel::SmallPIC) {
2115 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2116 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2117 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2118 } else {
2119 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2120 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2121 }
2122
2123 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2124 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2125 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2126 // function has calls.
2128 MFI.setHasCalls(true);
2129 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2131 }
2132
2133 // This is one of the absolute code models.
2134 switch(getTargetMachine().getCodeModel()) {
2135 default:
2136 llvm_unreachable("Unsupported absolute code model");
2137 case CodeModel::Small:
2138 // abs32.
2139 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2140 case CodeModel::Medium: {
2141 // abs44.
2142 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2143 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2144 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2145 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2146 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2147 }
2148 case CodeModel::Large: {
2149 // abs64.
2150 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2151 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2152 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2153 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2154 }
2155 }
2156}
2157
2162
2167
2172
2174 SelectionDAG &DAG) const {
2175
2177 if (DAG.getTarget().useEmulatedTLS())
2178 return LowerToTLSEmulatedModel(GA, DAG);
2179
2180 SDLoc DL(GA);
2181 const GlobalValue *GV = GA->getGlobal();
2182 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2183
2185
2186 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2187 unsigned HiTF =
2188 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2189 : ELF::R_SPARC_TLS_LDM_HI22);
2190 unsigned LoTF =
2191 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2192 : ELF::R_SPARC_TLS_LDM_LO10);
2193 unsigned addTF =
2194 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2195 : ELF::R_SPARC_TLS_LDM_ADD);
2196 unsigned callTF =
2197 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2198 : ELF::R_SPARC_TLS_LDM_CALL);
2199
2200 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2201 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2202 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2203 withTargetFlags(Op, addTF, DAG));
2204
2205 SDValue Chain = DAG.getEntryNode();
2206 SDValue InGlue;
2207
2208 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2209 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2210 InGlue = Chain.getValue(1);
2211 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2212 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2213
2214 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2215 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2217 assert(Mask && "Missing call preserved mask for calling convention");
2218 SDValue Ops[] = {Chain,
2219 Callee,
2220 Symbol,
2221 DAG.getRegister(SP::O0, PtrVT),
2222 DAG.getRegisterMask(Mask),
2223 InGlue};
2224 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2225 InGlue = Chain.getValue(1);
2226 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2227 InGlue = Chain.getValue(1);
2228 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2229
2230 if (model != TLSModel::LocalDynamic)
2231 return Ret;
2232
2233 SDValue Hi =
2234 DAG.getNode(SPISD::Hi, DL, PtrVT,
2235 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2236 SDValue Lo =
2237 DAG.getNode(SPISD::Lo, DL, PtrVT,
2238 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2239 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2240 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2241 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2242 }
2243
2244 if (model == TLSModel::InitialExec) {
2245 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2246 : ELF::R_SPARC_TLS_IE_LD);
2247
2248 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2249
2250 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2251 // function has calls.
2253 MFI.setHasCalls(true);
2254
2255 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2256 ELF::R_SPARC_TLS_IE_LO10, DAG);
2257 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2258 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2259 DL, PtrVT, Ptr,
2260 withTargetFlags(Op, ldTF, DAG));
2261 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2262 DAG.getRegister(SP::G7, PtrVT), Offset,
2263 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2264 }
2265
2266 assert(model == TLSModel::LocalExec);
2267 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2268 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2269 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2270 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2271 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2272
2273 return DAG.getNode(ISD::ADD, DL, PtrVT,
2274 DAG.getRegister(SP::G7, PtrVT), Offset);
2275}
2276
2278 ArgListTy &Args, SDValue Arg,
2279 const SDLoc &DL,
2280 SelectionDAG &DAG) const {
2282 EVT ArgVT = Arg.getValueType();
2283 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2284
2285 if (ArgTy->isFP128Ty()) {
2286 // Create a stack object and pass the pointer to the library function.
2287 int FI = MFI.CreateStackObject(16, Align(8), false);
2288 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2289 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2290 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2291 } else {
2292 Args.emplace_back(Arg, ArgTy);
2293 }
2294 return Chain;
2295}
2296
2297SDValue
2299 const char *LibFuncName,
2300 unsigned numArgs) const {
2301
2302 ArgListTy Args;
2303
2305 auto PtrVT = getPointerTy(DAG.getDataLayout());
2306
2307 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2308 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2309 Type *RetTyABI = RetTy;
2310 SDValue Chain = DAG.getEntryNode();
2311 SDValue RetPtr;
2312
2313 if (RetTy->isFP128Ty()) {
2314 // Create a Stack Object to receive the return value of type f128.
2315 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2316 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2317 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2318 if (!Subtarget->is64Bit()) {
2319 Entry.IsSRet = true;
2320 Entry.IndirectType = RetTy;
2321 }
2322 Entry.IsReturned = false;
2323 Args.push_back(Entry);
2324 RetTyABI = Type::getVoidTy(*DAG.getContext());
2325 }
2326
2327 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2328 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2329 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2330 }
2332 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2333 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2334
2335 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2336
2337 // chain is in second result.
2338 if (RetTyABI == RetTy)
2339 return CallInfo.first;
2340
2341 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2342
2343 Chain = CallInfo.second;
2344
2345 // Load RetPtr to get the return value.
2346 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2348}
2349
2351 unsigned &SPCC, const SDLoc &DL,
2352 SelectionDAG &DAG) const {
2353
2354 const char *LibCall = nullptr;
2355 bool is64Bit = Subtarget->is64Bit();
2356 switch(SPCC) {
2357 default: llvm_unreachable("Unhandled conditional code!");
2358 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2359 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2360 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2361 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2362 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2363 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2364 case SPCC::FCC_UL :
2365 case SPCC::FCC_ULE:
2366 case SPCC::FCC_UG :
2367 case SPCC::FCC_UGE:
2368 case SPCC::FCC_U :
2369 case SPCC::FCC_O :
2370 case SPCC::FCC_LG :
2371 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2372 }
2373
2374 auto PtrVT = getPointerTy(DAG.getDataLayout());
2375 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2376 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2377 ArgListTy Args;
2378 SDValue Chain = DAG.getEntryNode();
2379 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2380 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2381
2383 CLI.setDebugLoc(DL).setChain(Chain)
2384 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2385
2386 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2387
2388 // result is in first, and chain is in second result.
2389 SDValue Result = CallInfo.first;
2390
2391 switch(SPCC) {
2392 default: {
2393 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2395 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2396 }
2397 case SPCC::FCC_UL : {
2398 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2399 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2400 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2402 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2403 }
2404 case SPCC::FCC_ULE: {
2405 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2407 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2408 }
2409 case SPCC::FCC_UG : {
2410 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2411 SPCC = SPCC::ICC_G;
2412 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2413 }
2414 case SPCC::FCC_UGE: {
2415 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2417 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2418 }
2419
2420 case SPCC::FCC_U : {
2421 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2422 SPCC = SPCC::ICC_E;
2423 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2424 }
2425 case SPCC::FCC_O : {
2426 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2428 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2429 }
2430 case SPCC::FCC_LG : {
2431 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2432 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2433 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2435 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2436 }
2437 case SPCC::FCC_UE : {
2438 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2439 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2440 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2441 SPCC = SPCC::ICC_E;
2442 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2443 }
2444 }
2445}
2446
2447static SDValue
2449 const SparcTargetLowering &TLI) {
2450
2451 if (Op.getOperand(0).getValueType() == MVT::f64)
2452 return TLI.LowerF128Op(Op, DAG,
2453 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2454
2455 if (Op.getOperand(0).getValueType() == MVT::f32)
2456 return TLI.LowerF128Op(Op, DAG,
2457 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2458
2459 llvm_unreachable("fpextend with non-float operand!");
2460 return SDValue();
2461}
2462
2463static SDValue
2465 const SparcTargetLowering &TLI) {
2466 // FP_ROUND on f64 and f32 are legal.
2467 if (Op.getOperand(0).getValueType() != MVT::f128)
2468 return Op;
2469
2470 if (Op.getValueType() == MVT::f64)
2471 return TLI.LowerF128Op(Op, DAG,
2472 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2473 if (Op.getValueType() == MVT::f32)
2474 return TLI.LowerF128Op(Op, DAG,
2475 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2476
2477 llvm_unreachable("fpround to non-float!");
2478 return SDValue();
2479}
2480
2482 const SparcTargetLowering &TLI,
2483 bool hasHardQuad) {
2484 SDLoc dl(Op);
2485 EVT VT = Op.getValueType();
2486 assert(VT == MVT::i32 || VT == MVT::i64);
2487
2488 // Expand f128 operations to fp128 abi calls.
2489 if (Op.getOperand(0).getValueType() == MVT::f128
2490 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2491 const char *libName = TLI.getLibcallName(VT == MVT::i32
2492 ? RTLIB::FPTOSINT_F128_I32
2493 : RTLIB::FPTOSINT_F128_I64);
2494 return TLI.LowerF128Op(Op, DAG, libName, 1);
2495 }
2496
2497 // Expand if the resulting type is illegal.
2498 if (!TLI.isTypeLegal(VT))
2499 return SDValue();
2500
2501 // Otherwise, Convert the fp value to integer in an FP register.
2502 if (VT == MVT::i32)
2503 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2504 else
2505 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2506
2507 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2508}
2509
2511 const SparcTargetLowering &TLI,
2512 bool hasHardQuad) {
2513 SDLoc dl(Op);
2514 EVT OpVT = Op.getOperand(0).getValueType();
2515 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2516
2517 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2518
2519 // Expand f128 operations to fp128 ABI calls.
2520 if (Op.getValueType() == MVT::f128
2521 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2522 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2523 ? RTLIB::SINTTOFP_I32_F128
2524 : RTLIB::SINTTOFP_I64_F128);
2525 return TLI.LowerF128Op(Op, DAG, libName, 1);
2526 }
2527
2528 // Expand if the operand type is illegal.
2529 if (!TLI.isTypeLegal(OpVT))
2530 return SDValue();
2531
2532 // Otherwise, Convert the int value to FP in an FP register.
2533 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2534 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2535 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2536}
2537
2539 const SparcTargetLowering &TLI,
2540 bool hasHardQuad) {
2541 EVT VT = Op.getValueType();
2542
2543 // Expand if it does not involve f128 or the target has support for
2544 // quad floating point instructions and the resulting type is legal.
2545 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2546 (hasHardQuad && TLI.isTypeLegal(VT)))
2547 return SDValue();
2548
2549 assert(VT == MVT::i32 || VT == MVT::i64);
2550
2551 return TLI.LowerF128Op(Op, DAG,
2552 TLI.getLibcallName(VT == MVT::i32
2553 ? RTLIB::FPTOUINT_F128_I32
2554 : RTLIB::FPTOUINT_F128_I64),
2555 1);
2556}
2557
2559 const SparcTargetLowering &TLI,
2560 bool hasHardQuad) {
2561 EVT OpVT = Op.getOperand(0).getValueType();
2562 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2563
2564 // Expand if it does not involve f128 or the target has support for
2565 // quad floating point instructions and the operand type is legal.
2566 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2567 return SDValue();
2568
2569 return TLI.LowerF128Op(Op, DAG,
2570 TLI.getLibcallName(OpVT == MVT::i32
2571 ? RTLIB::UINTTOFP_I32_F128
2572 : RTLIB::UINTTOFP_I64_F128),
2573 1);
2574}
2575
2577 const SparcTargetLowering &TLI, bool hasHardQuad,
2578 bool isV9, bool is64Bit) {
2579 SDValue Chain = Op.getOperand(0);
2580 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2581 SDValue LHS = Op.getOperand(2);
2582 SDValue RHS = Op.getOperand(3);
2583 SDValue Dest = Op.getOperand(4);
2584 SDLoc dl(Op);
2585 unsigned Opc, SPCC = ~0U;
2586
2587 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2588 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2590 assert(LHS.getValueType() == RHS.getValueType());
2591
2592 // Get the condition flag.
2593 SDValue CompareFlag;
2594 if (LHS.getValueType().isInteger()) {
2595 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2596 // and the RHS is zero we might be able to use a specialized branch.
2597 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2599 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2600 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2601 LHS);
2602
2603 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2604 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2605 if (isV9)
2606 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2607 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2608 else
2609 // Non-v9 targets don't have xcc.
2610 Opc = SPISD::BRICC;
2611 } else {
2612 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2613 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2614 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2615 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2616 } else {
2617 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2618 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2619 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2620 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2621 }
2622 }
2623 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2624 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2625}
2626
2628 const SparcTargetLowering &TLI, bool hasHardQuad,
2629 bool isV9, bool is64Bit) {
2630 SDValue LHS = Op.getOperand(0);
2631 SDValue RHS = Op.getOperand(1);
2632 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2633 SDValue TrueVal = Op.getOperand(2);
2634 SDValue FalseVal = Op.getOperand(3);
2635 SDLoc dl(Op);
2636 unsigned Opc, SPCC = ~0U;
2637
2638 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2639 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2641 assert(LHS.getValueType() == RHS.getValueType());
2642
2643 SDValue CompareFlag;
2644 if (LHS.getValueType().isInteger()) {
2645 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2646 // and the RHS is zero we might be able to use a specialized select.
2647 // All SELECT_CC between any two scalar integer types are eligible for
2648 // lowering to specialized instructions. Additionally, f32 and f64 types
2649 // are also eligible, but for f128 we can only use the specialized
2650 // instruction when we have hardquad.
2651 EVT ValType = TrueVal.getValueType();
2652 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2653 ValType == MVT::f64 ||
2654 (ValType == MVT::f128 && hasHardQuad);
2655 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2656 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2657 return DAG.getNode(
2658 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2659 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2660
2661 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2662 Opc = LHS.getValueType() == MVT::i32 ?
2663 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2664 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2665 } else {
2666 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2667 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2668 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2669 Opc = SPISD::SELECT_ICC;
2670 } else {
2671 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2672 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2673 Opc = SPISD::SELECT_FCC;
2674 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2675 }
2676 }
2677 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2678 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2679}
2680
2682 const SparcTargetLowering &TLI) {
2685 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2686
2687 // Need frame address to find the address of VarArgsFrameIndex.
2689
2690 // vastart just stores the address of the VarArgsFrameIndex slot into the
2691 // memory location argument.
2692 SDLoc DL(Op);
2693 SDValue Offset =
2694 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2695 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2696 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2697 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2698 MachinePointerInfo(SV));
2699}
2700
2702 SDNode *Node = Op.getNode();
2703 EVT VT = Node->getValueType(0);
2704 SDValue InChain = Node->getOperand(0);
2705 SDValue VAListPtr = Node->getOperand(1);
2706 EVT PtrVT = VAListPtr.getValueType();
2707 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2708 SDLoc DL(Node);
2709 SDValue VAList =
2710 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2711 // Increment the pointer, VAList, to the next vaarg.
2712 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2714 DL));
2715 // Store the incremented VAList to the legalized pointer.
2716 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2717 MachinePointerInfo(SV));
2718 // Load the actual argument out of the pointer VAList.
2719 // We can't count on greater alignment than the word size.
2720 return DAG.getLoad(
2721 VT, DL, InChain, VAList, MachinePointerInfo(),
2722 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2723}
2724
2726 const SparcSubtarget *Subtarget) {
2727 SDValue Chain = Op.getOperand(0);
2728 SDValue Size = Op.getOperand(1);
2729 SDValue Alignment = Op.getOperand(2);
2730 MaybeAlign MaybeAlignment =
2731 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2732 EVT VT = Size->getValueType(0);
2733 SDLoc dl(Op);
2734
2735 unsigned SPReg = SP::O6;
2736 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2737
2738 // The resultant pointer needs to be above the register spill area
2739 // at the bottom of the stack.
2740 unsigned regSpillArea;
2741 if (Subtarget->is64Bit()) {
2742 regSpillArea = 128;
2743 } else {
2744 // On Sparc32, the size of the spill area is 92. Unfortunately,
2745 // that's only 4-byte aligned, not 8-byte aligned (the stack
2746 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2747 // aligned dynamic allocation, we actually need to add 96 to the
2748 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2749
2750 // That also means adding 4 to the size of the allocation --
2751 // before applying the 8-byte rounding. Unfortunately, we the
2752 // value we get here has already had rounding applied. So, we need
2753 // to add 8, instead, wasting a bit more memory.
2754
2755 // Further, this only actually needs to be done if the required
2756 // alignment is > 4, but, we've lost that info by this point, too,
2757 // so we always apply it.
2758
2759 // (An alternative approach would be to always reserve 96 bytes
2760 // instead of the required 92, but then we'd waste 4 extra bytes
2761 // in every frame, not just those with dynamic stack allocations)
2762
2763 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2764
2765 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2766 DAG.getConstant(8, dl, VT));
2767 regSpillArea = 96;
2768 }
2769
2770 int64_t Bias = Subtarget->getStackPointerBias();
2771
2772 // Debias and increment SP past the reserved spill area.
2773 // We need the SP to point to the first usable region before calculating
2774 // anything to prevent any of the pointers from becoming out of alignment when
2775 // we rebias the SP later on.
2776 SDValue StartOfUsableStack = DAG.getNode(
2777 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2778 SDValue AllocatedPtr =
2779 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2780
2781 bool IsOveraligned = MaybeAlignment.has_value();
2782 SDValue AlignedPtr =
2783 IsOveraligned
2784 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2785 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2786 : AllocatedPtr;
2787
2788 // Now that we are done, restore the bias and reserved spill area.
2789 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2790 DAG.getConstant(regSpillArea + Bias, dl, VT));
2791 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2792 SDValue Ops[2] = {AlignedPtr, Chain};
2793 return DAG.getMergeValues(Ops, dl);
2794}
2795
2796
2798 SDLoc dl(Op);
2799 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2800 dl, MVT::Other, DAG.getEntryNode());
2801 return Chain;
2802}
2803
2805 const SparcSubtarget *Subtarget,
2806 bool AlwaysFlush = false) {
2808 MFI.setFrameAddressIsTaken(true);
2809
2810 EVT VT = Op.getValueType();
2811 SDLoc dl(Op);
2812 unsigned FrameReg = SP::I6;
2813 unsigned stackBias = Subtarget->getStackPointerBias();
2814
2815 SDValue FrameAddr;
2816 SDValue Chain;
2817
2818 // flush first to make sure the windowed registers' values are in stack
2819 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2820
2821 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2822
2823 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2824
2825 while (depth--) {
2826 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2827 DAG.getIntPtrConstant(Offset, dl));
2828 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2829 }
2830 if (Subtarget->is64Bit())
2831 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2832 DAG.getIntPtrConstant(stackBias, dl));
2833 return FrameAddr;
2834}
2835
2836
2838 const SparcSubtarget *Subtarget) {
2839
2840 uint64_t depth = Op.getConstantOperandVal(0);
2841
2842 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2843
2844}
2845
2847 const SparcTargetLowering &TLI,
2848 const SparcSubtarget *Subtarget) {
2850 MachineFrameInfo &MFI = MF.getFrameInfo();
2851 MFI.setReturnAddressIsTaken(true);
2852
2853 EVT VT = Op.getValueType();
2854 SDLoc dl(Op);
2855 uint64_t depth = Op.getConstantOperandVal(0);
2856
2857 SDValue RetAddr;
2858 if (depth == 0) {
2859 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2860 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2861 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2862 return RetAddr;
2863 }
2864
2865 // Need frame address to find return address of the caller.
2866 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2867
2868 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2870 dl, VT,
2871 FrameAddr,
2872 DAG.getIntPtrConstant(Offset, dl));
2873 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2874
2875 return RetAddr;
2876}
2877
2878static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2879 unsigned opcode) {
2880 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2881 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2882
2883 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2884 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2885 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2886
2887 // Note: in little-endian, the floating-point value is stored in the
2888 // registers are in the opposite order, so the subreg with the sign
2889 // bit is the highest-numbered (odd), rather than the
2890 // lowest-numbered (even).
2891
2892 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2893 SrcReg64);
2894 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2895 SrcReg64);
2896
2897 if (DAG.getDataLayout().isLittleEndian())
2898 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2899 else
2900 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2901
2902 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2903 dl, MVT::f64), 0);
2904 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2905 DstReg64, Hi32);
2906 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2907 DstReg64, Lo32);
2908 return DstReg64;
2909}
2910
2911// Lower a f128 load into two f64 loads.
2913{
2914 SDLoc dl(Op);
2915 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2916 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2917
2918 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2919
2920 SDValue Hi64 =
2921 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2922 LdNode->getPointerInfo(), Alignment);
2923 EVT addrVT = LdNode->getBasePtr().getValueType();
2924 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2925 LdNode->getBasePtr(),
2926 DAG.getConstant(8, dl, addrVT));
2927 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2928 LdNode->getPointerInfo().getWithOffset(8),
2929 Alignment);
2930
2931 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2932 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2933
2934 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2935 dl, MVT::f128);
2936 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2937 MVT::f128,
2938 SDValue(InFP128, 0),
2939 Hi64,
2940 SubRegEven);
2941 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2942 MVT::f128,
2943 SDValue(InFP128, 0),
2944 Lo64,
2945 SubRegOdd);
2946 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2947 SDValue(Lo64.getNode(), 1) };
2948 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2949 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2950 return DAG.getMergeValues(Ops, dl);
2951}
2952
2954{
2955 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2956
2957 EVT MemVT = LdNode->getMemoryVT();
2958 if (MemVT == MVT::f128)
2959 return LowerF128Load(Op, DAG);
2960
2961 return Op;
2962}
2963
2964// Lower a f128 store into two f64 stores.
2966 SDLoc dl(Op);
2967 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2968 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2969
2970 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2971 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2972
2973 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2974 dl,
2975 MVT::f64,
2976 StNode->getValue(),
2977 SubRegEven);
2978 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2979 dl,
2980 MVT::f64,
2981 StNode->getValue(),
2982 SubRegOdd);
2983
2984 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
2985
2986 SDValue OutChains[2];
2987 OutChains[0] =
2988 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2989 StNode->getBasePtr(), StNode->getPointerInfo(),
2990 Alignment);
2991 EVT addrVT = StNode->getBasePtr().getValueType();
2992 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2993 StNode->getBasePtr(),
2994 DAG.getConstant(8, dl, addrVT));
2995 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2996 StNode->getPointerInfo().getWithOffset(8),
2997 Alignment);
2998 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2999}
3000
3002{
3003 SDLoc dl(Op);
3004 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3005
3006 EVT MemVT = St->getMemoryVT();
3007 if (MemVT == MVT::f128)
3008 return LowerF128Store(Op, DAG);
3009
3010 if (MemVT == MVT::i64) {
3011 // Custom handling for i64 stores: turn it into a bitcast and a
3012 // v2i32 store.
3013 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3014 SDValue Chain = DAG.getStore(
3015 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3016 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3017 return Chain;
3018 }
3019
3020 return SDValue();
3021}
3022
3024 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3025 && "invalid opcode");
3026
3027 SDLoc dl(Op);
3028
3029 if (Op.getValueType() == MVT::f64)
3030 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3031 if (Op.getValueType() != MVT::f128)
3032 return Op;
3033
3034 // Lower fabs/fneg on f128 to fabs/fneg on f64
3035 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3036 // (As with LowerF64Op, on little-endian, we need to negate the odd
3037 // subreg)
3038
3039 SDValue SrcReg128 = Op.getOperand(0);
3040 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3041 SrcReg128);
3042 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3043 SrcReg128);
3044
3045 if (DAG.getDataLayout().isLittleEndian()) {
3046 if (isV9)
3047 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3048 else
3049 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3050 } else {
3051 if (isV9)
3052 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3053 else
3054 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3055 }
3056
3057 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3058 dl, MVT::f128), 0);
3059 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3060 DstReg128, Hi64);
3061 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3062 DstReg128, Lo64);
3063 return DstReg128;
3064}
3065
3067 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3068 // Expand with a fence.
3069 return SDValue();
3070 }
3071
3072 // Monotonic load/stores are legal.
3073 return Op;
3074}
3075
3077 SelectionDAG &DAG) const {
3078 unsigned IntNo = Op.getConstantOperandVal(0);
3079 switch (IntNo) {
3080 default: return SDValue(); // Don't custom lower most intrinsics.
3081 case Intrinsic::thread_pointer: {
3082 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3083 return DAG.getRegister(SP::G7, PtrVT);
3084 }
3085 }
3086}
3087
3090
3091 bool hasHardQuad = Subtarget->hasHardQuad();
3092 bool isV9 = Subtarget->isV9();
3093 bool is64Bit = Subtarget->is64Bit();
3094
3095 switch (Op.getOpcode()) {
3096 default: llvm_unreachable("Should not custom lower this!");
3097
3098 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3099 Subtarget);
3100 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3101 Subtarget);
3103 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3104 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3105 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3106 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3107 hasHardQuad);
3108 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3109 hasHardQuad);
3110 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3111 hasHardQuad);
3112 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3113 hasHardQuad);
3114 case ISD::BR_CC:
3115 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3116 case ISD::SELECT_CC:
3117 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3118 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3119 case ISD::VAARG: return LowerVAARG(Op, DAG);
3120 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3121 Subtarget);
3122
3123 case ISD::LOAD: return LowerLOAD(Op, DAG);
3124 case ISD::STORE: return LowerSTORE(Op, DAG);
3125 case ISD::FADD: return LowerF128Op(Op, DAG,
3126 getLibcallName(RTLIB::ADD_F128), 2);
3127 case ISD::FSUB: return LowerF128Op(Op, DAG,
3128 getLibcallName(RTLIB::SUB_F128), 2);
3129 case ISD::FMUL: return LowerF128Op(Op, DAG,
3130 getLibcallName(RTLIB::MUL_F128), 2);
3131 case ISD::FDIV: return LowerF128Op(Op, DAG,
3132 getLibcallName(RTLIB::DIV_F128), 2);
3133 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3134 getLibcallName(RTLIB::SQRT_F128),1);
3135 case ISD::FABS:
3136 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3137 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3138 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3139 case ISD::ATOMIC_LOAD:
3140 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3142 }
3143}
3144
3146 const SDLoc &DL,
3147 SelectionDAG &DAG) const {
3148 APInt V = C->getValueAPF().bitcastToAPInt();
3149 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3150 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3151 if (DAG.getDataLayout().isLittleEndian())
3152 std::swap(Lo, Hi);
3153 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3154}
3155
3157 DAGCombinerInfo &DCI) const {
3158 SDLoc dl(N);
3159 SDValue Src = N->getOperand(0);
3160
3161 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3162 Src.getSimpleValueType() == MVT::f64)
3164
3165 return SDValue();
3166}
3167
3169 DAGCombinerInfo &DCI) const {
3170 switch (N->getOpcode()) {
3171 default:
3172 break;
3173 case ISD::BITCAST:
3174 return PerformBITCASTCombine(N, DCI);
3175 }
3176 return SDValue();
3177}
3178
3181 MachineBasicBlock *BB) const {
3182 switch (MI.getOpcode()) {
3183 default: llvm_unreachable("Unknown SELECT_CC!");
3184 case SP::SELECT_CC_Int_ICC:
3185 case SP::SELECT_CC_FP_ICC:
3186 case SP::SELECT_CC_DFP_ICC:
3187 case SP::SELECT_CC_QFP_ICC:
3188 if (Subtarget->isV9())
3189 return expandSelectCC(MI, BB, SP::BPICC);
3190 return expandSelectCC(MI, BB, SP::BCOND);
3191 case SP::SELECT_CC_Int_XCC:
3192 case SP::SELECT_CC_FP_XCC:
3193 case SP::SELECT_CC_DFP_XCC:
3194 case SP::SELECT_CC_QFP_XCC:
3195 return expandSelectCC(MI, BB, SP::BPXCC);
3196 case SP::SELECT_CC_Int_FCC:
3197 case SP::SELECT_CC_FP_FCC:
3198 case SP::SELECT_CC_DFP_FCC:
3199 case SP::SELECT_CC_QFP_FCC:
3200 if (Subtarget->isV9())
3201 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3202 return expandSelectCC(MI, BB, SP::FBCOND);
3203 }
3204}
3205
3208 unsigned BROpcode) const {
3209 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3210 DebugLoc dl = MI.getDebugLoc();
3211 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3212
3213 // To "insert" a SELECT_CC instruction, we actually have to insert the
3214 // triangle control-flow pattern. The incoming instruction knows the
3215 // destination vreg to set, the condition code register to branch on, the
3216 // true/false values to select between, and the condition code for the branch.
3217 //
3218 // We produce the following control flow:
3219 // ThisMBB
3220 // | \
3221 // | IfFalseMBB
3222 // | /
3223 // SinkMBB
3224 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3226
3227 MachineBasicBlock *ThisMBB = BB;
3228 MachineFunction *F = BB->getParent();
3229 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3230 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3231 F->insert(It, IfFalseMBB);
3232 F->insert(It, SinkMBB);
3233
3234 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3235 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3236 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3237 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3238
3239 // Set the new successors for ThisMBB.
3240 ThisMBB->addSuccessor(IfFalseMBB);
3241 ThisMBB->addSuccessor(SinkMBB);
3242
3243 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3244 .addMBB(SinkMBB)
3245 .addImm(CC);
3246
3247 // IfFalseMBB just falls through to SinkMBB.
3248 IfFalseMBB->addSuccessor(SinkMBB);
3249
3250 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3251 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3252 MI.getOperand(0).getReg())
3253 .addReg(MI.getOperand(1).getReg())
3254 .addMBB(ThisMBB)
3255 .addReg(MI.getOperand(2).getReg())
3256 .addMBB(IfFalseMBB);
3257
3258 MI.eraseFromParent(); // The pseudo instruction is gone now.
3259 return SinkMBB;
3260}
3261
3262//===----------------------------------------------------------------------===//
3263// Sparc Inline Assembly Support
3264//===----------------------------------------------------------------------===//
3265
3266/// getConstraintType - Given a constraint letter, return the type of
3267/// constraint it is for this target.
3270 if (Constraint.size() == 1) {
3271 switch (Constraint[0]) {
3272 default: break;
3273 case 'r':
3274 case 'f':
3275 case 'e':
3276 return C_RegisterClass;
3277 case 'I': // SIMM13
3278 return C_Immediate;
3279 }
3280 }
3281
3282 return TargetLowering::getConstraintType(Constraint);
3283}
3284
3287 const char *constraint) const {
3289 Value *CallOperandVal = info.CallOperandVal;
3290 // If we don't have a value, we can't do a match,
3291 // but allow it at the lowest weight.
3292 if (!CallOperandVal)
3293 return CW_Default;
3294
3295 // Look at the constraint type.
3296 switch (*constraint) {
3297 default:
3299 break;
3300 case 'I': // SIMM13
3301 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3302 if (isInt<13>(C->getSExtValue()))
3303 weight = CW_Constant;
3304 }
3305 break;
3306 }
3307 return weight;
3308}
3309
3310/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3311/// vector. If it is invalid, don't add anything to Ops.
3313 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3314 SelectionDAG &DAG) const {
3315 SDValue Result;
3316
3317 // Only support length 1 constraints for now.
3318 if (Constraint.size() > 1)
3319 return;
3320
3321 char ConstraintLetter = Constraint[0];
3322 switch (ConstraintLetter) {
3323 default: break;
3324 case 'I':
3326 if (isInt<13>(C->getSExtValue())) {
3327 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3328 Op.getValueType());
3329 break;
3330 }
3331 return;
3332 }
3333 }
3334
3335 if (Result.getNode()) {
3336 Ops.push_back(Result);
3337 return;
3338 }
3340}
3341
3342std::pair<unsigned, const TargetRegisterClass *>
3344 StringRef Constraint,
3345 MVT VT) const {
3346 if (Constraint.empty())
3347 return std::make_pair(0U, nullptr);
3348
3349 if (Constraint.size() == 1) {
3350 switch (Constraint[0]) {
3351 case 'r':
3352 if (VT == MVT::v2i32)
3353 return std::make_pair(0U, &SP::IntPairRegClass);
3354 else if (Subtarget->is64Bit())
3355 return std::make_pair(0U, &SP::I64RegsRegClass);
3356 else
3357 return std::make_pair(0U, &SP::IntRegsRegClass);
3358 case 'f':
3359 if (VT == MVT::f32 || VT == MVT::i32)
3360 return std::make_pair(0U, &SP::FPRegsRegClass);
3361 else if (VT == MVT::f64 || VT == MVT::i64)
3362 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3363 else if (VT == MVT::f128)
3364 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3365 // This will generate an error message
3366 return std::make_pair(0U, nullptr);
3367 case 'e':
3368 if (VT == MVT::f32 || VT == MVT::i32)
3369 return std::make_pair(0U, &SP::FPRegsRegClass);
3370 else if (VT == MVT::f64 || VT == MVT::i64 )
3371 return std::make_pair(0U, &SP::DFPRegsRegClass);
3372 else if (VT == MVT::f128)
3373 return std::make_pair(0U, &SP::QFPRegsRegClass);
3374 // This will generate an error message
3375 return std::make_pair(0U, nullptr);
3376 }
3377 }
3378
3379 if (Constraint.front() != '{')
3380 return std::make_pair(0U, nullptr);
3381
3382 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3383 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3384 if (RegName.empty())
3385 return std::make_pair(0U, nullptr);
3386
3387 unsigned long long RegNo;
3388 // Handle numbered register aliases.
3389 if (RegName[0] == 'r' &&
3390 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3391 // r0-r7 -> g0-g7
3392 // r8-r15 -> o0-o7
3393 // r16-r23 -> l0-l7
3394 // r24-r31 -> i0-i7
3395 if (RegNo > 31)
3396 return std::make_pair(0U, nullptr);
3397 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3398 char RegType = RegTypes[RegNo / 8];
3399 char RegIndex = '0' + (RegNo % 8);
3400 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3401 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3402 }
3403
3404 // Rewrite the fN constraint according to the value type if needed.
3405 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3406 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3407 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3409 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3410 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3412 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3413 } else {
3414 return std::make_pair(0U, nullptr);
3415 }
3416 }
3417
3418 auto ResultPair =
3420 if (!ResultPair.second)
3421 return std::make_pair(0U, nullptr);
3422
3423 // Force the use of I64Regs over IntRegs for 64-bit values.
3424 if (Subtarget->is64Bit() && VT == MVT::i64) {
3425 assert(ResultPair.second == &SP::IntRegsRegClass &&
3426 "Unexpected register class");
3427 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3428 }
3429
3430 return ResultPair;
3431}
3432
3433bool
3435 // The Sparc target isn't yet aware of offsets.
3436 return false;
3437}
3438
3441 SelectionDAG &DAG) const {
3442
3443 SDLoc dl(N);
3444
3445 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3446
3447 switch (N->getOpcode()) {
3448 default:
3449 llvm_unreachable("Do not know how to custom type legalize this operation!");
3450
3451 case ISD::FP_TO_SINT:
3452 case ISD::FP_TO_UINT:
3453 // Custom lower only if it involves f128 or i64.
3454 if (N->getOperand(0).getValueType() != MVT::f128
3455 || N->getValueType(0) != MVT::i64)
3456 return;
3457 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3458 ? RTLIB::FPTOSINT_F128_I64
3459 : RTLIB::FPTOUINT_F128_I64);
3460
3461 Results.push_back(LowerF128Op(SDValue(N, 0),
3462 DAG,
3463 getLibcallName(libCall),
3464 1));
3465 return;
3466 case ISD::READCYCLECOUNTER: {
3467 assert(Subtarget->hasLeonCycleCounter());
3468 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3469 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3470 SDValue Ops[] = { Lo, Hi };
3471 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3472 Results.push_back(Pair);
3473 Results.push_back(N->getOperand(0));
3474 return;
3475 }
3476 case ISD::SINT_TO_FP:
3477 case ISD::UINT_TO_FP:
3478 // Custom lower only if it involves f128 or i64.
3479 if (N->getValueType(0) != MVT::f128
3480 || N->getOperand(0).getValueType() != MVT::i64)
3481 return;
3482
3483 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3484 ? RTLIB::SINTTOFP_I64_F128
3485 : RTLIB::UINTTOFP_I64_F128);
3486
3487 Results.push_back(LowerF128Op(SDValue(N, 0),
3488 DAG,
3489 getLibcallName(libCall),
3490 1));
3491 return;
3492 case ISD::LOAD: {
3494 // Custom handling only for i64: turn i64 load into a v2i32 load,
3495 // and a bitcast.
3496 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3497 return;
3498
3499 SDLoc dl(N);
3500 SDValue LoadRes = DAG.getExtLoad(
3501 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3502 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3503 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3504
3505 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3506 Results.push_back(Res);
3507 Results.push_back(LoadRes.getValue(1));
3508 return;
3509 }
3510 }
3511}
3512
3513// Override to enable LOAD_STACK_GUARD lowering on Linux.
3515 if (!Subtarget->getTargetTriple().isOSLinux())
3517 return true;
3518}
3519
3521 if (Subtarget->isVIS3())
3522 return VT == MVT::f32 || VT == MVT::f64;
3523 return false;
3524}
3525
3527 bool ForCodeSize) const {
3528 if (VT != MVT::f32 && VT != MVT::f64)
3529 return false;
3530 if (Subtarget->isVIS() && Imm.isZero())
3531 return true;
3532 if (Subtarget->isVIS3())
3533 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3534 Imm.getExactLog2Abs() == -1;
3535 return false;
3536}
3537
3538bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3539
3541 // We lack native cttz, however,
3542 // On 64-bit targets it is cheap to implement it in terms of popc.
3543 if (Subtarget->is64Bit() && Subtarget->usePopc())
3544 return true;
3545 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3546 return isCheapToSpeculateCtlz(Ty);
3547}
3548
3550 EVT VT) const {
3551 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3552}
3553
3555 SDNode *Node) const {
3556 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3557 // If the result is dead, replace it with %g0.
3558 if (!Node->hasAnyUseOfValue(0))
3559 MI.getOperand(0).setReg(SP::G0);
3560}
3561
3563 Instruction *Inst,
3564 AtomicOrdering Ord) const {
3565 bool HasStoreSemantics =
3567 if (HasStoreSemantics && isReleaseOrStronger(Ord))
3568 return Builder.CreateFence(AtomicOrdering::Release);
3569 return nullptr;
3570}
3571
3573 Instruction *Inst,
3574 AtomicOrdering Ord) const {
3575 // V8 loads already come with implicit acquire barrier so there's no need to
3576 // emit it again.
3577 bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
3578 if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
3579 return Builder.CreateFence(AtomicOrdering::Acquire);
3580
3581 // SC plain stores would need a trailing full barrier.
3583 return Builder.CreateFence(Ord);
3584 return nullptr;
3585}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:55
#define G(x, y, z)
Definition MD5.cpp:56
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:801
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:774
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:765
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:835
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:862
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:826
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:773
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:778
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:228
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:695
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:756
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:563
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:832
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:793
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:870
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:908
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:730
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:552
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:941
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:838
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:815
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:521
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:543
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:311
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})