LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/Module.h"
39using namespace llvm;
40
41
42//===----------------------------------------------------------------------===//
43// Calling Convention Implementation
44//===----------------------------------------------------------------------===//
45
46static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
47 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
48 ISD::ArgFlagsTy &ArgFlags, CCState &State)
49{
50 assert (ArgFlags.isSRet());
51
52 // Assign SRet argument.
53 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
54 0,
55 LocVT, LocInfo));
56 return true;
57}
58
59static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
60 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
61 ISD::ArgFlagsTy &ArgFlags, CCState &State)
62{
63 static const MCPhysReg RegList[] = {
64 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
65 };
66 // Try to get first reg.
67 if (Register Reg = State.AllocateReg(RegList)) {
68 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
69 } else {
70 // Assign whole thing in stack.
72 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
73 return true;
74 }
75
76 // Try to get second reg.
77 if (Register Reg = State.AllocateReg(RegList))
78 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
79 else
81 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
82 return true;
83}
84
85static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
86 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
87 ISD::ArgFlagsTy &ArgFlags, CCState &State)
88{
89 static const MCPhysReg RegList[] = {
90 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
91 };
92
93 // Try to get first reg.
94 if (Register Reg = State.AllocateReg(RegList))
95 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
96 else
97 return false;
98
99 // Try to get second reg.
100 if (Register Reg = State.AllocateReg(RegList))
101 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
102 else
103 return false;
104
105 return true;
106}
107
108// Allocate a full-sized argument for the 64-bit ABI.
109static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
110 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
111 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
112 assert((LocVT == MVT::f32 || LocVT == MVT::f128
113 || LocVT.getSizeInBits() == 64) &&
114 "Can't handle non-64 bits locations");
115
116 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
117 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
118 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
119 unsigned Offset = State.AllocateStack(size, alignment);
120 unsigned Reg = 0;
121
122 if (LocVT == MVT::i64 && Offset < 6*8)
123 // Promote integers to %i0-%i5.
124 Reg = SP::I0 + Offset/8;
125 else if (LocVT == MVT::f64 && Offset < 16*8)
126 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
127 Reg = SP::D0 + Offset/8;
128 else if (LocVT == MVT::f32 && Offset < 16*8)
129 // Promote floats to %f1, %f3, ...
130 Reg = SP::F1 + Offset/4;
131 else if (LocVT == MVT::f128 && Offset < 16*8)
132 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
133 Reg = SP::Q0 + Offset/16;
134
135 // Promote to register when possible, otherwise use the stack slot.
136 if (Reg) {
137 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
138 return true;
139 }
140
141 // Bail out if this is a return CC and we run out of registers to place
142 // values into.
143 if (IsReturn)
144 return false;
145
146 // This argument goes on the stack in an 8-byte slot.
147 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
148 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
149 if (LocVT == MVT::f32)
150 Offset += 4;
151
152 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
153 return true;
154}
155
156// Allocate a half-sized argument for the 64-bit ABI.
157//
158// This is used when passing { float, int } structs by value in registers.
159static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
160 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
161 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
162 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
163 unsigned Offset = State.AllocateStack(4, Align(4));
164
165 if (LocVT == MVT::f32 && Offset < 16*8) {
166 // Promote floats to %f0-%f31.
167 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
168 LocVT, LocInfo));
169 return true;
170 }
171
172 if (LocVT == MVT::i32 && Offset < 6*8) {
173 // Promote integers to %i0-%i5, using half the register.
174 unsigned Reg = SP::I0 + Offset/8;
175 LocVT = MVT::i64;
176 LocInfo = CCValAssign::AExt;
177
178 // Set the Custom bit if this i32 goes in the high bits of a register.
179 if (Offset % 8 == 0)
180 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
181 LocVT, LocInfo));
182 else
183 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
184 return true;
185 }
186
187 // Bail out if this is a return CC and we run out of registers to place
188 // values into.
189 if (IsReturn)
190 return false;
191
192 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
193 return true;
194}
195
196static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
197 CCValAssign::LocInfo &LocInfo,
198 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
199 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
200 State);
201}
202
203static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
204 CCValAssign::LocInfo &LocInfo,
205 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
206 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
207 State);
208}
209
210static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
211 CCValAssign::LocInfo &LocInfo,
212 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
213 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
214 State);
215}
216
217static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
218 CCValAssign::LocInfo &LocInfo,
219 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
220 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
221 State);
222}
223
224#include "SparcGenCallingConv.inc"
225
226// The calling conventions in SparcCallingConv.td are described in terms of the
227// callee's register window. This function translates registers to the
228// corresponding caller window %o register.
229static unsigned toCallerWindow(unsigned Reg) {
230 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
231 "Unexpected enum");
232 if (Reg >= SP::I0 && Reg <= SP::I7)
233 return Reg - SP::I0 + SP::O0;
234 return Reg;
235}
236
238 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
239 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
240 const Type *RetTy) const {
242 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
243 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
244 : RetCC_Sparc32);
245}
246
249 bool IsVarArg,
251 const SmallVectorImpl<SDValue> &OutVals,
252 const SDLoc &DL, SelectionDAG &DAG) const {
253 if (Subtarget->is64Bit())
254 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
255 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
256}
257
260 bool IsVarArg,
262 const SmallVectorImpl<SDValue> &OutVals,
263 const SDLoc &DL, SelectionDAG &DAG) const {
265
266 // CCValAssign - represent the assignment of the return value to locations.
268
269 // CCState - Info about the registers and stack slot.
270 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
271 *DAG.getContext());
272
273 // Analyze return values.
274 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
275
276 SDValue Glue;
277 SmallVector<SDValue, 4> RetOps(1, Chain);
278 // Make room for the return address offset.
279 RetOps.push_back(SDValue());
280
281 // Copy the result values into the output registers.
282 for (unsigned i = 0, realRVLocIdx = 0;
283 i != RVLocs.size();
284 ++i, ++realRVLocIdx) {
285 CCValAssign &VA = RVLocs[i];
286 assert(VA.isRegLoc() && "Can only return in registers!");
287
288 SDValue Arg = OutVals[realRVLocIdx];
289
290 if (VA.needsCustom()) {
291 assert(VA.getLocVT() == MVT::v2i32);
292 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
293 // happen by default if this wasn't a legal type)
294
295 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
296 Arg,
298 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
299 Arg,
301
302 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
303 Glue = Chain.getValue(1);
304 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
305 VA = RVLocs[++i]; // skip ahead to next loc
306 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
307 Glue);
308 } else
309 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
310
311 // Guarantee that all emitted copies are stuck together with flags.
312 Glue = Chain.getValue(1);
313 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
314 }
315
316 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
317 // If the function returns a struct, copy the SRetReturnReg to I0
318 if (MF.getFunction().hasStructRetAttr()) {
320 Register Reg = SFI->getSRetReturnReg();
321 if (!Reg)
322 llvm_unreachable("sret virtual register not created in the entry block");
323 auto PtrVT = getPointerTy(DAG.getDataLayout());
324 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
325 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
326 Glue = Chain.getValue(1);
327 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
328 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
329 }
330
331 RetOps[0] = Chain; // Update chain.
332 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
333
334 // Add the glue if we have it.
335 if (Glue.getNode())
336 RetOps.push_back(Glue);
337
338 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
339}
340
341// Lower return values for the 64-bit ABI.
342// Return values are passed the exactly the same way as function arguments.
345 bool IsVarArg,
347 const SmallVectorImpl<SDValue> &OutVals,
348 const SDLoc &DL, SelectionDAG &DAG) const {
349 // CCValAssign - represent the assignment of the return value to locations.
351
352 // CCState - Info about the registers and stack slot.
353 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
354 *DAG.getContext());
355
356 // Analyze return values.
357 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
358
359 SDValue Glue;
360 SmallVector<SDValue, 4> RetOps(1, Chain);
361
362 // The second operand on the return instruction is the return address offset.
363 // The return address is always %i7+8 with the 64-bit ABI.
364 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
365
366 // Copy the result values into the output registers.
367 for (unsigned i = 0; i != RVLocs.size(); ++i) {
368 CCValAssign &VA = RVLocs[i];
369 assert(VA.isRegLoc() && "Can only return in registers!");
370 SDValue OutVal = OutVals[i];
371
372 // Integer return values must be sign or zero extended by the callee.
373 switch (VA.getLocInfo()) {
374 case CCValAssign::Full: break;
376 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
377 break;
379 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
380 break;
382 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
383 break;
384 default:
385 llvm_unreachable("Unknown loc info!");
386 }
387
388 // The custom bit on an i32 return value indicates that it should be passed
389 // in the high bits of the register.
390 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
391 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
392 DAG.getConstant(32, DL, MVT::i32));
393
394 // The next value may go in the low bits of the same register.
395 // Handle both at once.
396 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
397 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
398 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
399 // Skip the next value, it's already done.
400 ++i;
401 }
402 }
403
404 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
405
406 // Guarantee that all emitted copies are stuck together with flags.
407 Glue = Chain.getValue(1);
408 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
409 }
410
411 RetOps[0] = Chain; // Update chain.
412
413 // Add the flag if we have it.
414 if (Glue.getNode())
415 RetOps.push_back(Glue);
416
417 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
418}
419
421 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
422 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
423 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
424 if (Subtarget->is64Bit())
425 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
426 DL, DAG, InVals);
427 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429}
430
431/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
432/// passed in either one or two GPRs, including FP values. TODO: we should
433/// pass FP values in FP registers for fastcc functions.
435 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
436 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
437 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
439 MachineRegisterInfo &RegInfo = MF.getRegInfo();
441
442 // Assign locations to all of the incoming arguments.
444 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
445 *DAG.getContext());
446 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
447
448 const unsigned StackOffset = 92;
449 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
450
451 unsigned InIdx = 0;
452 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
453 CCValAssign &VA = ArgLocs[i];
454
455 if (Ins[InIdx].Flags.isSRet()) {
456 if (InIdx != 0)
457 report_fatal_error("sparc only supports sret on the first parameter");
458 // Get SRet from [%fp+64].
459 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
460 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
461 SDValue Arg =
462 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
463 InVals.push_back(Arg);
464 continue;
465 }
466
467 if (VA.isRegLoc()) {
468 if (VA.needsCustom()) {
469 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
470
471 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
472 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
473 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
474
475 assert(i+1 < e);
476 CCValAssign &NextVA = ArgLocs[++i];
477
478 SDValue LoVal;
479 if (NextVA.isMemLoc()) {
480 int FrameIdx = MF.getFrameInfo().
481 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
482 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
483 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
484 } else {
485 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
486 &SP::IntRegsRegClass);
487 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
488 }
489
490 if (IsLittleEndian)
491 std::swap(LoVal, HiVal);
492
493 SDValue WholeValue =
494 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
495 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
496 InVals.push_back(WholeValue);
497 continue;
498 }
499 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
500 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
501 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
502 if (VA.getLocVT() == MVT::f32)
503 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
504 else if (VA.getLocVT() != MVT::i32) {
505 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
506 DAG.getValueType(VA.getLocVT()));
507 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
508 }
509 InVals.push_back(Arg);
510 continue;
511 }
512
513 assert(VA.isMemLoc());
514
515 unsigned Offset = VA.getLocMemOffset()+StackOffset;
516 auto PtrVT = getPointerTy(DAG.getDataLayout());
517
518 if (VA.needsCustom()) {
519 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
520 // If it is double-word aligned, just load.
521 if (Offset % 8 == 0) {
522 int FI = MF.getFrameInfo().CreateFixedObject(8,
523 Offset,
524 true);
525 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
526 SDValue Load =
527 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
528 InVals.push_back(Load);
529 continue;
530 }
531
532 int FI = MF.getFrameInfo().CreateFixedObject(4,
533 Offset,
534 true);
535 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
536 SDValue HiVal =
537 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
538 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
539 Offset+4,
540 true);
541 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
542
543 SDValue LoVal =
544 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
545
546 if (IsLittleEndian)
547 std::swap(LoVal, HiVal);
548
549 SDValue WholeValue =
550 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
551 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
552 InVals.push_back(WholeValue);
553 continue;
554 }
555
556 int FI = MF.getFrameInfo().CreateFixedObject(4,
557 Offset,
558 true);
559 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
560 SDValue Load ;
561 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
562 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
563 } else if (VA.getValVT() == MVT::f128) {
564 report_fatal_error("SPARCv8 does not handle f128 in calls; "
565 "pass indirectly");
566 } else {
567 // We shouldn't see any other value types here.
568 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
569 }
570 InVals.push_back(Load);
571 }
572
573 if (MF.getFunction().hasStructRetAttr()) {
574 // Copy the SRet Argument to SRetReturnReg.
576 Register Reg = SFI->getSRetReturnReg();
577 if (!Reg) {
578 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
579 SFI->setSRetReturnReg(Reg);
580 }
581 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
582 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
583 }
584
585 // Store remaining ArgRegs to the stack if this is a varargs function.
586 if (isVarArg) {
587 static const MCPhysReg ArgRegs[] = {
588 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
589 };
590 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
591 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
592 unsigned ArgOffset = CCInfo.getStackSize();
593 if (NumAllocated == 6)
594 ArgOffset += StackOffset;
595 else {
596 assert(!ArgOffset);
597 ArgOffset = 68+4*NumAllocated;
598 }
599
600 // Remember the vararg offset for the va_start implementation.
601 FuncInfo->setVarArgsFrameOffset(ArgOffset);
602
603 std::vector<SDValue> OutChains;
604
605 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
606 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
607 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
608 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
609
610 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
611 true);
612 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
613
614 OutChains.push_back(
615 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
616 ArgOffset += 4;
617 }
618
619 if (!OutChains.empty()) {
620 OutChains.push_back(Chain);
621 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
622 }
623 }
624
625 return Chain;
626}
627
628// Lower formal arguments for the 64 bit ABI.
630 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
631 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
632 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
634
635 // Analyze arguments according to CC_Sparc64.
637 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
638 *DAG.getContext());
639 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
640
641 // The argument array begins at %fp+BIAS+128, after the register save area.
642 const unsigned ArgArea = 128;
643
644 for (const CCValAssign &VA : ArgLocs) {
645 if (VA.isRegLoc()) {
646 // This argument is passed in a register.
647 // All integer register arguments are promoted by the caller to i64.
648
649 // Create a virtual register for the promoted live-in value.
650 Register VReg = MF.addLiveIn(VA.getLocReg(),
651 getRegClassFor(VA.getLocVT()));
652 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
653
654 // Get the high bits for i32 struct elements.
655 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
656 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
657 DAG.getConstant(32, DL, MVT::i32));
658
659 // The caller promoted the argument, so insert an Assert?ext SDNode so we
660 // won't promote the value again in this function.
661 switch (VA.getLocInfo()) {
663 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
664 DAG.getValueType(VA.getValVT()));
665 break;
667 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
668 DAG.getValueType(VA.getValVT()));
669 break;
670 default:
671 break;
672 }
673
674 // Truncate the register down to the argument type.
675 if (VA.isExtInLoc())
676 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
677
678 InVals.push_back(Arg);
679 continue;
680 }
681
682 // The registers are exhausted. This argument was passed on the stack.
683 assert(VA.isMemLoc());
684 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
685 // beginning of the arguments area at %fp+BIAS+128.
686 unsigned Offset = VA.getLocMemOffset() + ArgArea;
687 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
688 // Adjust offset for extended arguments, SPARC is big-endian.
689 // The caller will have written the full slot with extended bytes, but we
690 // prefer our own extending loads.
691 if (VA.isExtInLoc())
692 Offset += 8 - ValSize;
693 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
694 InVals.push_back(
695 DAG.getLoad(VA.getValVT(), DL, Chain,
698 }
699
700 if (!IsVarArg)
701 return Chain;
702
703 // This function takes variable arguments, some of which may have been passed
704 // in registers %i0-%i5. Variable floating point arguments are never passed
705 // in floating point registers. They go on %i0-%i5 or on the stack like
706 // integer arguments.
707 //
708 // The va_start intrinsic needs to know the offset to the first variable
709 // argument.
710 unsigned ArgOffset = CCInfo.getStackSize();
712 // Skip the 128 bytes of register save area.
713 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
714 Subtarget->getStackPointerBias());
715
716 // Save the variable arguments that were passed in registers.
717 // The caller is required to reserve stack space for 6 arguments regardless
718 // of how many arguments were actually passed.
719 SmallVector<SDValue, 8> OutChains;
720 for (; ArgOffset < 6*8; ArgOffset += 8) {
721 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
722 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
723 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
724 auto PtrVT = getPointerTy(MF.getDataLayout());
725 OutChains.push_back(
726 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
728 }
729
730 if (!OutChains.empty())
731 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
732
733 return Chain;
734}
735
736// Check whether any of the argument registers are reserved
738 const MachineFunction &MF) {
739 // The register window design means that outgoing parameters at O*
740 // will appear in the callee as I*.
741 // Be conservative and check both sides of the register names.
742 bool Outgoing =
743 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
744 return TRI->isReservedReg(MF, r);
745 });
746 bool Incoming =
747 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
748 return TRI->isReservedReg(MF, r);
749 });
750 return Outgoing || Incoming;
751}
752
754 const Function &F = MF.getFunction();
755 F.getContext().diagnose(DiagnosticInfoUnsupported{
756 F, ("SPARC doesn't support"
757 " function calls if any of the argument registers is reserved.")});
758}
759
762 SmallVectorImpl<SDValue> &InVals) const {
763 if (Subtarget->is64Bit())
764 return LowerCall_64(CLI, InVals);
765 return LowerCall_32(CLI, InVals);
766}
767
768static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
769 const CallBase *Call) {
770 if (Call)
771 return Call->hasFnAttr(Attribute::ReturnsTwice);
772
773 const Function *CalleeFn = nullptr;
774 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
775 CalleeFn = dyn_cast<Function>(G->getGlobal());
776 } else if (ExternalSymbolSDNode *E =
777 dyn_cast<ExternalSymbolSDNode>(Callee)) {
778 const Function &Fn = DAG.getMachineFunction().getFunction();
779 const Module *M = Fn.getParent();
780 const char *CalleeName = E->getSymbol();
781 CalleeFn = M->getFunction(CalleeName);
782 }
783
784 if (!CalleeFn)
785 return false;
786 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
787}
788
789/// IsEligibleForTailCallOptimization - Check whether the call is eligible
790/// for tail call optimization.
792 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
793
794 auto &Outs = CLI.Outs;
795 auto &Caller = MF.getFunction();
796
797 // Do not tail call opt functions with "disable-tail-calls" attribute.
798 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
799 return false;
800
801 // Do not tail call opt if the stack is used to pass parameters.
802 // 64-bit targets have a slightly higher limit since the ABI requires
803 // to allocate some space even when all the parameters fit inside registers.
804 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
805 if (CCInfo.getStackSize() > StackSizeLimit)
806 return false;
807
808 // Do not tail call opt if either the callee or caller returns
809 // a struct and the other does not.
810 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
811 return false;
812
813 // Byval parameters hand the function a pointer directly into the stack area
814 // we want to reuse during a tail call.
815 for (auto &Arg : Outs)
816 if (Arg.Flags.isByVal())
817 return false;
818
819 return true;
820}
821
822// Lower a call for the 32-bit ABI.
825 SmallVectorImpl<SDValue> &InVals) const {
826 SelectionDAG &DAG = CLI.DAG;
827 SDLoc &dl = CLI.DL;
829 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
831 SDValue Chain = CLI.Chain;
832 SDValue Callee = CLI.Callee;
833 bool &isTailCall = CLI.IsTailCall;
834 CallingConv::ID CallConv = CLI.CallConv;
835 bool isVarArg = CLI.IsVarArg;
837
838 // Analyze operands of the call, assigning locations to each operand.
840 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
841 *DAG.getContext());
842 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
843
844 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
845 CCInfo, CLI, DAG.getMachineFunction());
846
847 // Get the size of the outgoing arguments stack space requirement.
848 unsigned ArgsSize = CCInfo.getStackSize();
849
850 // Keep stack frames 8-byte aligned.
851 ArgsSize = (ArgsSize+7) & ~7;
852
854
855 // Create local copies for byval args.
856 SmallVector<SDValue, 8> ByValArgs;
857 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
858 ISD::ArgFlagsTy Flags = Outs[i].Flags;
859 if (!Flags.isByVal())
860 continue;
861
862 SDValue Arg = OutVals[i];
863 unsigned Size = Flags.getByValSize();
864 Align Alignment = Flags.getNonZeroByValAlign();
865
866 if (Size > 0U) {
867 int FI = MFI.CreateStackObject(Size, Alignment, false);
868 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
869 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
870
871 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
872 false, // isVolatile,
873 (Size <= 32), // AlwaysInline if size <= 32,
874 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
876 ByValArgs.push_back(FIPtr);
877 }
878 else {
879 SDValue nullVal;
880 ByValArgs.push_back(nullVal);
881 }
882 }
883
884 assert(!isTailCall || ArgsSize == 0);
885
886 if (!isTailCall)
887 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
888
890 SmallVector<SDValue, 8> MemOpChains;
891
892 const unsigned StackOffset = 92;
893 bool hasStructRetAttr = false;
894 unsigned SRetArgSize = 0;
895 // Walk the register/memloc assignments, inserting copies/loads.
896 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
897 i != e;
898 ++i, ++realArgIdx) {
899 CCValAssign &VA = ArgLocs[i];
900 SDValue Arg = OutVals[realArgIdx];
901
902 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
903
904 // Use local copy if it is a byval arg.
905 if (Flags.isByVal()) {
906 Arg = ByValArgs[byvalArgIdx++];
907 if (!Arg) {
908 continue;
909 }
910 }
911
912 // Promote the value if needed.
913 switch (VA.getLocInfo()) {
914 default: llvm_unreachable("Unknown loc info!");
915 case CCValAssign::Full: break;
917 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
918 break;
920 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
921 break;
923 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
924 break;
926 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
927 break;
928 }
929
930 if (Flags.isSRet()) {
931 assert(VA.needsCustom());
932
933 if (isTailCall)
934 continue;
935
936 // store SRet argument in %sp+64
937 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
938 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
939 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
940 MemOpChains.push_back(
941 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
942 hasStructRetAttr = true;
943 // sret only allowed on first argument
944 assert(Outs[realArgIdx].OrigArgIndex == 0);
945 SRetArgSize =
946 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
947 continue;
948 }
949
950 if (VA.needsCustom()) {
951 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
952
953 if (VA.isMemLoc()) {
954 unsigned Offset = VA.getLocMemOffset() + StackOffset;
955 // if it is double-word aligned, just store.
956 if (Offset % 8 == 0) {
957 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
958 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
959 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
960 MemOpChains.push_back(
961 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
962 continue;
963 }
964 }
965
966 if (VA.getLocVT() == MVT::f64) {
967 // Move from the float value from float registers into the
968 // integer registers.
969 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
970 Arg = bitcastConstantFPToInt(C, dl, DAG);
971 else
972 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
973 }
974
975 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
976 Arg,
977 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
978 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
979 Arg,
980 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
981
982 if (VA.isRegLoc()) {
983 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
984 assert(i+1 != e);
985 CCValAssign &NextVA = ArgLocs[++i];
986 if (NextVA.isRegLoc()) {
987 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
988 } else {
989 // Store the second part in stack.
990 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
991 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
992 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
993 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
994 MemOpChains.push_back(
995 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
996 }
997 } else {
998 unsigned Offset = VA.getLocMemOffset() + StackOffset;
999 // Store the first part.
1000 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1001 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1002 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1003 MemOpChains.push_back(
1004 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1005 // Store the second part.
1006 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1007 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1008 MemOpChains.push_back(
1009 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1010 }
1011 continue;
1012 }
1013
1014 // Arguments that can be passed on register must be kept at
1015 // RegsToPass vector
1016 if (VA.isRegLoc()) {
1017 if (VA.getLocVT() != MVT::f32) {
1018 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1019 continue;
1020 }
1021 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1022 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1023 continue;
1024 }
1025
1026 assert(VA.isMemLoc());
1027
1028 // Create a store off the stack pointer for this argument.
1029 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1031 dl);
1032 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1033 MemOpChains.push_back(
1034 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1035 }
1036
1037
1038 // Emit all stores, make sure the occur before any copies into physregs.
1039 if (!MemOpChains.empty())
1040 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1041
1042 // Build a sequence of copy-to-reg nodes chained together with token
1043 // chain and flag operands which copy the outgoing args into registers.
1044 // The InGlue in necessary since all emitted instructions must be
1045 // stuck together.
1046 SDValue InGlue;
1047 for (const auto &[OrigReg, N] : RegsToPass) {
1048 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1049 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1050 InGlue = Chain.getValue(1);
1051 }
1052
1053 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1054
1055 // If the callee is a GlobalAddress node (quite common, every direct call is)
1056 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1057 // Likewise ExternalSymbol -> TargetExternalSymbol.
1058 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1059 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1060 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1061 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1062
1063 // Returns a chain & a flag for retval copy to use
1064 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1066 Ops.push_back(Chain);
1067 Ops.push_back(Callee);
1068 if (hasStructRetAttr)
1069 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1070 for (const auto &[OrigReg, N] : RegsToPass) {
1071 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1072 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1073 }
1074
1075 // Add a register mask operand representing the call-preserved registers.
1076 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1077 const uint32_t *Mask =
1078 ((hasReturnsTwice)
1079 ? TRI->getRTCallPreservedMask(CallConv)
1080 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1081
1082 if (isAnyArgRegReserved(TRI, MF))
1084
1085 assert(Mask && "Missing call preserved mask for calling convention");
1086 Ops.push_back(DAG.getRegisterMask(Mask));
1087
1088 if (InGlue.getNode())
1089 Ops.push_back(InGlue);
1090
1091 if (isTailCall) {
1093 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1094 }
1095
1096 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1097 InGlue = Chain.getValue(1);
1098
1099 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1100 InGlue = Chain.getValue(1);
1101
1102 // Assign locations to each value returned by this call.
1104 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1105 *DAG.getContext());
1106
1107 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1108
1109 // Copy all of the result registers out of their specified physreg.
1110 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1111 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1112 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1113 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1115 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1116 Chain = Lo.getValue(1);
1117 InGlue = Lo.getValue(2);
1118 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1119 DAG.getConstant(0, dl, MVT::i32));
1121 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1122 Chain = Hi.getValue(1);
1123 InGlue = Hi.getValue(2);
1124 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1125 DAG.getConstant(1, dl, MVT::i32));
1126 InVals.push_back(Vec);
1127 } else {
1128 Chain =
1129 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1130 RVLocs[i].getValVT(), InGlue)
1131 .getValue(1);
1132 InGlue = Chain.getValue(2);
1133 InVals.push_back(Chain.getValue(0));
1134 }
1135 }
1136
1137 return Chain;
1138}
1139
1140// FIXME? Maybe this could be a TableGen attribute on some registers and
1141// this table could be generated automatically from RegInfo.
1143 const MachineFunction &MF) const {
1145 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1146 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1147 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1148 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1149 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1150 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1151 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1152 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1153 .Default(0);
1154
1155 // If we're directly referencing register names
1156 // (e.g in GCC C extension `register int r asm("g1");`),
1157 // make sure that said register is in the reserve list.
1158 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1159 if (!TRI->isReservedReg(MF, Reg))
1160 Reg = Register();
1161
1162 return Reg;
1163}
1164
1165// Fixup floating point arguments in the ... part of a varargs call.
1166//
1167// The SPARC v9 ABI requires that floating point arguments are treated the same
1168// as integers when calling a varargs function. This does not apply to the
1169// fixed arguments that are part of the function's prototype.
1170//
1171// This function post-processes a CCValAssign array created by
1172// AnalyzeCallOperands().
1175 for (CCValAssign &VA : ArgLocs) {
1176 MVT ValTy = VA.getLocVT();
1177 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1178 // varargs functions.
1179 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1180 continue;
1181 // The fixed arguments to a varargs function still go in FP registers.
1182 if (!Outs[VA.getValNo()].Flags.isVarArg())
1183 continue;
1184
1185 // This floating point argument should be reassigned.
1186 // Determine the offset into the argument array.
1187 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1188 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1189 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1190 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1191
1192 if (Offset < 6*8) {
1193 // This argument should go in %i0-%i5.
1194 unsigned IReg = SP::I0 + Offset/8;
1195 if (ValTy == MVT::f64)
1196 // Full register, just bitconvert into i64.
1197 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1199 else {
1200 assert(ValTy == MVT::f128 && "Unexpected type!");
1201 // Full register, just bitconvert into i128 -- We will lower this into
1202 // two i64s in LowerCall_64.
1203 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1204 MVT::i128, CCValAssign::BCvt);
1205 }
1206 } else {
1207 // This needs to go to memory, we're out of integer registers.
1208 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1209 VA.getLocVT(), VA.getLocInfo());
1210 }
1211 }
1212}
1213
1214// Lower a call for the 64-bit ABI.
1215SDValue
1217 SmallVectorImpl<SDValue> &InVals) const {
1218 SelectionDAG &DAG = CLI.DAG;
1219 SDLoc DL = CLI.DL;
1220 SDValue Chain = CLI.Chain;
1221 auto PtrVT = getPointerTy(DAG.getDataLayout());
1223
1224 // Analyze operands of the call, assigning locations to each operand.
1226 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1227 *DAG.getContext());
1228 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1229
1231 CCInfo, CLI, DAG.getMachineFunction());
1232
1233 // Get the size of the outgoing arguments stack space requirement.
1234 // The stack offset computed by CC_Sparc64 includes all arguments.
1235 // Called functions expect 6 argument words to exist in the stack frame, used
1236 // or not.
1237 unsigned StackReserved = 6 * 8u;
1238 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1239
1240 // Keep stack frames 16-byte aligned.
1241 ArgsSize = alignTo(ArgsSize, 16);
1242
1243 // Varargs calls require special treatment.
1244 if (CLI.IsVarArg)
1245 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1246
1247 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1248
1249 // Adjust the stack pointer to make room for the arguments.
1250 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1251 // with more than 6 arguments.
1252 if (!CLI.IsTailCall)
1253 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1254
1255 // Collect the set of registers to pass to the function and their values.
1256 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1257 // instruction.
1259
1260 // Collect chains from all the memory opeations that copy arguments to the
1261 // stack. They must follow the stack pointer adjustment above and precede the
1262 // call instruction itself.
1263 SmallVector<SDValue, 8> MemOpChains;
1264
1265 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1266 const CCValAssign &VA = ArgLocs[i];
1267 SDValue Arg = CLI.OutVals[i];
1268
1269 // Promote the value if needed.
1270 switch (VA.getLocInfo()) {
1271 default:
1272 llvm_unreachable("Unknown location info!");
1273 case CCValAssign::Full:
1274 break;
1275 case CCValAssign::SExt:
1276 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1277 break;
1278 case CCValAssign::ZExt:
1279 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1280 break;
1281 case CCValAssign::AExt:
1282 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1283 break;
1284 case CCValAssign::BCvt:
1285 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1286 // SPARC does not support i128 natively. Lower it into two i64, see below.
1287 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1288 || VA.getLocVT() != MVT::i128)
1289 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1290 break;
1291 }
1292
1293 if (VA.isRegLoc()) {
1294 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1295 && VA.getLocVT() == MVT::i128) {
1296 // Store and reload into the integer register reg and reg+1.
1297 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1298 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1299 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1300 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1301 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1302 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1303 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1304
1305 // Store to %sp+BIAS+128+Offset
1306 SDValue Store =
1307 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1308 // Load into Reg and Reg+1
1309 SDValue Hi64 =
1310 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1311 SDValue Lo64 =
1312 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1313
1314 Register HiReg = VA.getLocReg();
1315 Register LoReg = VA.getLocReg() + 1;
1316 if (!CLI.IsTailCall) {
1317 HiReg = toCallerWindow(HiReg);
1318 LoReg = toCallerWindow(LoReg);
1319 }
1320
1321 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1322 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1323 continue;
1324 }
1325
1326 // The custom bit on an i32 return value indicates that it should be
1327 // passed in the high bits of the register.
1328 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1329 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1330 DAG.getConstant(32, DL, MVT::i32));
1331
1332 // The next value may go in the low bits of the same register.
1333 // Handle both at once.
1334 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1335 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1336 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1337 CLI.OutVals[i+1]);
1338 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1339 // Skip the next value, it's already done.
1340 ++i;
1341 }
1342 }
1343
1344 Register Reg = VA.getLocReg();
1345 if (!CLI.IsTailCall)
1346 Reg = toCallerWindow(Reg);
1347 RegsToPass.push_back(std::make_pair(Reg, Arg));
1348 continue;
1349 }
1350
1351 assert(VA.isMemLoc());
1352
1353 // Create a store off the stack pointer for this argument.
1354 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1355 // The argument area starts at %fp+BIAS+128 in the callee frame,
1356 // %sp+BIAS+128 in ours.
1357 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1358 Subtarget->getStackPointerBias() +
1359 128, DL);
1360 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1361 MemOpChains.push_back(
1362 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1363 }
1364
1365 // Emit all stores, make sure they occur before the call.
1366 if (!MemOpChains.empty())
1367 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1368
1369 // Build a sequence of CopyToReg nodes glued together with token chain and
1370 // glue operands which copy the outgoing args into registers. The InGlue is
1371 // necessary since all emitted instructions must be stuck together in order
1372 // to pass the live physical registers.
1373 SDValue InGlue;
1374 for (const auto &[Reg, N] : RegsToPass) {
1375 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1376 InGlue = Chain.getValue(1);
1377 }
1378
1379 // If the callee is a GlobalAddress node (quite common, every direct call is)
1380 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1381 // Likewise ExternalSymbol -> TargetExternalSymbol.
1382 SDValue Callee = CLI.Callee;
1383 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1384 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1385 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1386 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1387 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1388
1389 // Build the operands for the call instruction itself.
1391 Ops.push_back(Chain);
1392 Ops.push_back(Callee);
1393 for (const auto &[Reg, N] : RegsToPass)
1394 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1395
1396 // Add a register mask operand representing the call-preserved registers.
1397 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1398 const uint32_t *Mask =
1399 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1400 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1401 CLI.CallConv));
1402
1403 if (isAnyArgRegReserved(TRI, MF))
1405
1406 assert(Mask && "Missing call preserved mask for calling convention");
1407 Ops.push_back(DAG.getRegisterMask(Mask));
1408
1409 // Make sure the CopyToReg nodes are glued to the call instruction which
1410 // consumes the registers.
1411 if (InGlue.getNode())
1412 Ops.push_back(InGlue);
1413
1414 // Now the call itself.
1415 if (CLI.IsTailCall) {
1417 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1418 }
1419 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1420 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1421 InGlue = Chain.getValue(1);
1422
1423 // Revert the stack pointer immediately after the call.
1424 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1425 InGlue = Chain.getValue(1);
1426
1427 // Now extract the return values. This is more or less the same as
1428 // LowerFormalArguments_64.
1429
1430 // Assign locations to each value returned by this call.
1432 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1433 *DAG.getContext());
1434
1435 // Set inreg flag manually for codegen generated library calls that
1436 // return float.
1437 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1438 CLI.Ins[0].Flags.setInReg();
1439
1440 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1441
1442 // Copy all of the result registers out of their specified physreg.
1443 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1444 CCValAssign &VA = RVLocs[i];
1445 assert(VA.isRegLoc() && "Can only return in registers!");
1446 unsigned Reg = toCallerWindow(VA.getLocReg());
1447
1448 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1449 // reside in the same register in the high and low bits. Reuse the
1450 // CopyFromReg previous node to avoid duplicate copies.
1451 SDValue RV;
1452 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1453 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1454 RV = Chain.getValue(0);
1455
1456 // But usually we'll create a new CopyFromReg for a different register.
1457 if (!RV.getNode()) {
1458 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1459 Chain = RV.getValue(1);
1460 InGlue = Chain.getValue(2);
1461 }
1462
1463 // Get the high bits for i32 struct elements.
1464 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1465 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1466 DAG.getConstant(32, DL, MVT::i32));
1467
1468 // The callee promoted the return value, so insert an Assert?ext SDNode so
1469 // we won't promote the value again in this function.
1470 switch (VA.getLocInfo()) {
1471 case CCValAssign::SExt:
1472 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1473 DAG.getValueType(VA.getValVT()));
1474 break;
1475 case CCValAssign::ZExt:
1476 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1477 DAG.getValueType(VA.getValVT()));
1478 break;
1479 default:
1480 break;
1481 }
1482
1483 // Truncate the register down to the return value type.
1484 if (VA.isExtInLoc())
1485 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1486
1487 InVals.push_back(RV);
1488 }
1489
1490 return Chain;
1491}
1492
1493//===----------------------------------------------------------------------===//
1494// TargetLowering Implementation
1495//===----------------------------------------------------------------------===//
1496
1498 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1499 AI->getType()->getPrimitiveSizeInBits() == 32)
1500 return AtomicExpansionKind::None; // Uses xchg instruction
1501
1503}
1504
1505/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1506/// rcond condition.
1508 switch (CC) {
1509 default:
1510 llvm_unreachable("Unknown/unsigned integer condition code!");
1511 case ISD::SETEQ:
1512 return SPCC::REG_Z;
1513 case ISD::SETNE:
1514 return SPCC::REG_NZ;
1515 case ISD::SETLT:
1516 return SPCC::REG_LZ;
1517 case ISD::SETGT:
1518 return SPCC::REG_GZ;
1519 case ISD::SETLE:
1520 return SPCC::REG_LEZ;
1521 case ISD::SETGE:
1522 return SPCC::REG_GEZ;
1523 }
1524}
1525
1526/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1527/// condition.
1529 switch (CC) {
1530 default: llvm_unreachable("Unknown integer condition code!");
1531 case ISD::SETEQ: return SPCC::ICC_E;
1532 case ISD::SETNE: return SPCC::ICC_NE;
1533 case ISD::SETLT: return SPCC::ICC_L;
1534 case ISD::SETGT: return SPCC::ICC_G;
1535 case ISD::SETLE: return SPCC::ICC_LE;
1536 case ISD::SETGE: return SPCC::ICC_GE;
1537 case ISD::SETULT: return SPCC::ICC_CS;
1538 case ISD::SETULE: return SPCC::ICC_LEU;
1539 case ISD::SETUGT: return SPCC::ICC_GU;
1540 case ISD::SETUGE: return SPCC::ICC_CC;
1541 }
1542}
1543
1544/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1545/// FCC condition.
1547 switch (CC) {
1548 default: llvm_unreachable("Unknown fp condition code!");
1549 case ISD::SETEQ:
1550 case ISD::SETOEQ: return SPCC::FCC_E;
1551 case ISD::SETNE:
1552 case ISD::SETUNE: return SPCC::FCC_NE;
1553 case ISD::SETLT:
1554 case ISD::SETOLT: return SPCC::FCC_L;
1555 case ISD::SETGT:
1556 case ISD::SETOGT: return SPCC::FCC_G;
1557 case ISD::SETLE:
1558 case ISD::SETOLE: return SPCC::FCC_LE;
1559 case ISD::SETGE:
1560 case ISD::SETOGE: return SPCC::FCC_GE;
1561 case ISD::SETULT: return SPCC::FCC_UL;
1562 case ISD::SETULE: return SPCC::FCC_ULE;
1563 case ISD::SETUGT: return SPCC::FCC_UG;
1564 case ISD::SETUGE: return SPCC::FCC_UGE;
1565 case ISD::SETUO: return SPCC::FCC_U;
1566 case ISD::SETO: return SPCC::FCC_O;
1567 case ISD::SETONE: return SPCC::FCC_LG;
1568 case ISD::SETUEQ: return SPCC::FCC_UE;
1569 }
1570}
1571
1573 const SparcSubtarget &STI)
1574 : TargetLowering(TM), Subtarget(&STI) {
1575 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1576
1577 // Instructions which use registers as conditionals examine all the
1578 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1579 // matters much whether it's ZeroOrOneBooleanContent, or
1580 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1581 // former.
1584
1585 // Set up the register classes.
1586 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1587 if (!Subtarget->useSoftFloat()) {
1588 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1589 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1590 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1591 }
1592 if (Subtarget->is64Bit()) {
1593 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1594 } else {
1595 // On 32bit sparc, we define a double-register 32bit register
1596 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1597 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1598
1599 // ...but almost all operations must be expanded, so set that as
1600 // the default.
1601 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1602 setOperationAction(Op, MVT::v2i32, Expand);
1603 }
1604 // Truncating/extending stores/loads are also not supported.
1606 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1607 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1608 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1609
1610 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1611 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1612 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1613
1614 setTruncStoreAction(VT, MVT::v2i32, Expand);
1615 setTruncStoreAction(MVT::v2i32, VT, Expand);
1616 }
1617 // However, load and store *are* legal.
1618 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1619 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1622
1623 // And we need to promote i64 loads/stores into vector load/store
1626
1627 // Sadly, this doesn't work:
1628 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1629 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1630 }
1631
1632 // Turn FP extload into load/fpextend
1633 for (MVT VT : MVT::fp_valuetypes()) {
1634 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1635 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1636 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1637 }
1638
1639 // Sparc doesn't have i1 sign extending load
1640 for (MVT VT : MVT::integer_valuetypes())
1641 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1642
1643 // Turn FP truncstore into trunc + store.
1644 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1645 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1646 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1647 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1648 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1649 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1650
1651 // Custom legalize GlobalAddress nodes into LO/HI parts.
1656
1657 // Sparc doesn't have sext_inreg, replace them with shl/sra
1661
1662 // Sparc has no REM or DIVREM operations.
1667
1668 // ... nor does SparcV9.
1669 if (Subtarget->is64Bit()) {
1674 }
1675
1676 // Custom expand fp<->sint
1681
1682 // Custom Expand fp<->uint
1687
1688 // Lower f16 conversion operations into library calls
1695
1697 Subtarget->isVIS3() ? Legal : Expand);
1699 Subtarget->isVIS3() ? Legal : Expand);
1700
1701 // Sparc has no select or setcc: expand to SELECT_CC.
1706
1711
1712 // Sparc doesn't have BRCOND either, it has BR_CC.
1714 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1715 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1720
1725
1730
1731 if (Subtarget->isVIS3()) {
1734 }
1735
1736 if (Subtarget->is64Bit()) {
1738 Subtarget->isVIS3() ? Legal : Expand);
1740 Subtarget->isVIS3() ? Legal : Expand);
1745
1747 Subtarget->usePopc() ? Legal : Expand);
1749 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1750 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1752 }
1753
1754 // ATOMICs.
1755 // Atomics are supported on SparcV9. 32-bit atomics are also
1756 // supported by some Leon SparcV8 variants. Otherwise, atomics
1757 // are unsupported.
1758 if (Subtarget->isV9()) {
1759 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1760 // but it hasn't been implemented in the backend yet.
1761 if (Subtarget->is64Bit())
1763 else
1765 } else if (Subtarget->hasLeonCasa())
1767 else
1769
1771
1773
1775
1776 // Custom Lower Atomic LOAD/STORE
1779
1780 if (Subtarget->is64Bit()) {
1785 }
1786
1787 if (!Subtarget->isV9()) {
1788 // SparcV8 does not have FNEGD and FABSD.
1791 }
1792
1793 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1794 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1796 setOperationAction(ISD::FREM , MVT::f128, Expand);
1797 setOperationAction(ISD::FMA , MVT::f128, Expand);
1798 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1799 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1801 setOperationAction(ISD::FREM , MVT::f64, Expand);
1802 setOperationAction(ISD::FMA, MVT::f64,
1803 Subtarget->isUA2007() ? Legal : Expand);
1804 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1805 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1807 setOperationAction(ISD::FREM , MVT::f32, Expand);
1808 setOperationAction(ISD::FMA, MVT::f32,
1809 Subtarget->isUA2007() ? Legal : Expand);
1810 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1811 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1816 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1817 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1818 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1819
1823
1824 // Expands to [SU]MUL_LOHI.
1828
1829 if (Subtarget->useSoftMulDiv()) {
1830 // .umul works for both signed and unsigned
1835 }
1836
1837 if (Subtarget->is64Bit()) {
1841 Subtarget->isVIS3() ? Legal : Expand);
1843 Subtarget->isVIS3() ? Legal : Expand);
1844
1848 }
1849
1850 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1851 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1852 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1853 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1854
1855 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1857
1858 // Use the default implementation.
1859 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1860 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1864
1866
1868 Subtarget->usePopc() ? Legal : Expand);
1869
1870 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1871 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1872 setOperationAction(ISD::STORE, MVT::f128, Legal);
1873 } else {
1874 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1876 }
1877
1878 if (Subtarget->hasHardQuad()) {
1879 setOperationAction(ISD::FADD, MVT::f128, Legal);
1880 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1881 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1882 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1883 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1886 if (Subtarget->isV9()) {
1887 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1888 setOperationAction(ISD::FABS, MVT::f128, Legal);
1889 } else {
1890 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1891 setOperationAction(ISD::FABS, MVT::f128, Custom);
1892 }
1893 } else {
1894 // Custom legalize f128 operations.
1895
1896 setOperationAction(ISD::FADD, MVT::f128, Custom);
1897 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1898 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1899 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1901 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1902 setOperationAction(ISD::FABS, MVT::f128, Custom);
1903
1907
1908 // Setup Runtime library names.
1909 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1910 setLibcallImpl(RTLIB::ADD_F128, RTLIB::_Qp_add);
1911 setLibcallImpl(RTLIB::SUB_F128, RTLIB::_Qp_sub);
1912 setLibcallImpl(RTLIB::MUL_F128, RTLIB::_Qp_mul);
1913 setLibcallImpl(RTLIB::DIV_F128, RTLIB::_Qp_div);
1914 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::_Qp_sqrt);
1915 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::_Qp_qtoi);
1916 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::_Qp_qtoui);
1917 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::_Qp_itoq);
1918 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::_Qp_uitoq);
1919 setLibcallImpl(RTLIB::FPTOSINT_F128_I64, RTLIB::_Qp_qtox);
1920 setLibcallImpl(RTLIB::FPTOUINT_F128_I64, RTLIB::_Qp_qtoux);
1921 setLibcallImpl(RTLIB::SINTTOFP_I64_F128, RTLIB::_Qp_xtoq);
1922 setLibcallImpl(RTLIB::UINTTOFP_I64_F128, RTLIB::_Qp_uxtoq);
1923 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::_Qp_stoq);
1924 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::_Qp_dtoq);
1925 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::_Qp_qtos);
1926 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::_Qp_qtod);
1927 } else if (!Subtarget->useSoftFloat()) {
1928 setLibcallImpl(RTLIB::ADD_F128, RTLIB::_Q_add);
1929 setLibcallImpl(RTLIB::SUB_F128, RTLIB::_Q_sub);
1930 setLibcallImpl(RTLIB::MUL_F128, RTLIB::_Q_mul);
1931 setLibcallImpl(RTLIB::DIV_F128, RTLIB::_Q_div);
1932 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::_Q_sqrt);
1933 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::_Q_qtoi);
1934 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::_Q_qtou);
1935 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::_Q_itoq);
1936 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::_Q_utoq);
1937 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::_Q_stoq);
1938 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::_Q_dtoq);
1939 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::_Q_qtos);
1940 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::_Q_qtod);
1941 }
1942 }
1943
1944 if (Subtarget->fixAllFDIVSQRT()) {
1945 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1946 // the former instructions generate errata on LEON processors.
1949 }
1950
1951 if (Subtarget->hasNoFMULS()) {
1953 }
1954
1955 // Custom combine bitcast between f64 and v2i32
1956 if (!Subtarget->is64Bit())
1958
1959 if (Subtarget->hasLeonCycleCounter())
1961
1962 if (Subtarget->isVIS3()) {
1967
1968 setOperationAction(ISD::CTTZ, MVT::i32,
1969 Subtarget->is64Bit() ? Promote : Expand);
1972 Subtarget->is64Bit() ? Promote : Expand);
1974 } else if (Subtarget->usePopc()) {
1979
1984 } else {
1988 Subtarget->is64Bit() ? Promote : LibCall);
1990
1991 // FIXME here we don't have any ISA extensions that could help us, so to
1992 // prevent large expansions those should be made into LibCalls.
1997 }
1998
2000
2002
2004}
2005
2007 return Subtarget->useSoftFloat();
2008}
2009
2011 EVT VT) const {
2012 if (!VT.isVector())
2013 return MVT::i32;
2015}
2016
2017/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2018/// be zero. Op is expected to be a target specific node. Used by DAG
2019/// combiner.
2021 (const SDValue Op,
2022 KnownBits &Known,
2023 const APInt &DemandedElts,
2024 const SelectionDAG &DAG,
2025 unsigned Depth) const {
2026 KnownBits Known2;
2027 Known.resetAll();
2028
2029 switch (Op.getOpcode()) {
2030 default: break;
2031 case SPISD::SELECT_ICC:
2032 case SPISD::SELECT_XCC:
2033 case SPISD::SELECT_FCC:
2034 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2035 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2036
2037 // Only known if known in both the LHS and RHS.
2038 Known = Known.intersectWith(Known2);
2039 break;
2040 }
2041}
2042
2043// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2044// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2045static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2046 ISD::CondCode CC, unsigned &SPCC) {
2047 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2048 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2049 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2050 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2051 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2052 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2053 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2054 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2055 SDValue CMPCC = LHS.getOperand(3);
2056 SPCC = LHS.getConstantOperandVal(2);
2057 LHS = CMPCC.getOperand(0);
2058 RHS = CMPCC.getOperand(1);
2059 }
2060}
2061
2062// Convert to a target node and set target flags.
2064 SelectionDAG &DAG) const {
2065 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2066 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2067 SDLoc(GA),
2068 GA->getValueType(0),
2069 GA->getOffset(), TF);
2070
2071 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2072 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2073 CP->getAlign(), CP->getOffset(), TF);
2074
2075 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2076 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2077 Op.getValueType(),
2078 0,
2079 TF);
2080
2081 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2082 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2083 ES->getValueType(0), TF);
2084
2085 llvm_unreachable("Unhandled address SDNode");
2086}
2087
2088// Split Op into high and low parts according to HiTF and LoTF.
2089// Return an ADD node combining the parts.
2091 unsigned HiTF, unsigned LoTF,
2092 SelectionDAG &DAG) const {
2093 SDLoc DL(Op);
2094 EVT VT = Op.getValueType();
2095 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2096 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2097 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2098}
2099
2100// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2101// or ExternalSymbol SDNode.
2103 SDLoc DL(Op);
2104 EVT VT = getPointerTy(DAG.getDataLayout());
2105
2106 // Handle PIC mode first. SPARC needs a got load for every variable!
2107 if (isPositionIndependent()) {
2108 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2109 PICLevel::Level picLevel = M->getPICLevel();
2110 SDValue Idx;
2111
2112 if (picLevel == PICLevel::SmallPIC) {
2113 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2114 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2115 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2116 } else {
2117 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2118 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2119 }
2120
2121 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2122 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2123 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2124 // function has calls.
2126 MFI.setHasCalls(true);
2127 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2129 }
2130
2131 // This is one of the absolute code models.
2132 switch(getTargetMachine().getCodeModel()) {
2133 default:
2134 llvm_unreachable("Unsupported absolute code model");
2135 case CodeModel::Small:
2136 // abs32.
2137 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2138 case CodeModel::Medium: {
2139 // abs44.
2140 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2141 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2142 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2143 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2144 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2145 }
2146 case CodeModel::Large: {
2147 // abs64.
2148 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2149 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2150 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2151 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2152 }
2153 }
2154}
2155
2157 SelectionDAG &DAG) const {
2158 return makeAddress(Op, DAG);
2159}
2160
2162 SelectionDAG &DAG) const {
2163 return makeAddress(Op, DAG);
2164}
2165
2167 SelectionDAG &DAG) const {
2168 return makeAddress(Op, DAG);
2169}
2170
2172 SelectionDAG &DAG) const {
2173
2174 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2175 if (DAG.getTarget().useEmulatedTLS())
2176 return LowerToTLSEmulatedModel(GA, DAG);
2177
2178 SDLoc DL(GA);
2179 const GlobalValue *GV = GA->getGlobal();
2180 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2181
2183
2184 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2185 unsigned HiTF =
2186 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2187 : ELF::R_SPARC_TLS_LDM_HI22);
2188 unsigned LoTF =
2189 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2190 : ELF::R_SPARC_TLS_LDM_LO10);
2191 unsigned addTF =
2192 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2193 : ELF::R_SPARC_TLS_LDM_ADD);
2194 unsigned callTF =
2195 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2196 : ELF::R_SPARC_TLS_LDM_CALL);
2197
2198 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2199 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2200 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2201 withTargetFlags(Op, addTF, DAG));
2202
2203 SDValue Chain = DAG.getEntryNode();
2204 SDValue InGlue;
2205
2206 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2207 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2208 InGlue = Chain.getValue(1);
2209 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2210 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2211
2212 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2213 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2215 assert(Mask && "Missing call preserved mask for calling convention");
2216 SDValue Ops[] = {Chain,
2217 Callee,
2218 Symbol,
2219 DAG.getRegister(SP::O0, PtrVT),
2220 DAG.getRegisterMask(Mask),
2221 InGlue};
2222 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2223 InGlue = Chain.getValue(1);
2224 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2225 InGlue = Chain.getValue(1);
2226 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2227
2228 if (model != TLSModel::LocalDynamic)
2229 return Ret;
2230
2231 SDValue Hi =
2232 DAG.getNode(SPISD::Hi, DL, PtrVT,
2233 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2234 SDValue Lo =
2235 DAG.getNode(SPISD::Lo, DL, PtrVT,
2236 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2237 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2238 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2239 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2240 }
2241
2242 if (model == TLSModel::InitialExec) {
2243 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2244 : ELF::R_SPARC_TLS_IE_LD);
2245
2246 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2247
2248 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2249 // function has calls.
2251 MFI.setHasCalls(true);
2252
2253 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2254 ELF::R_SPARC_TLS_IE_LO10, DAG);
2255 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2256 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2257 DL, PtrVT, Ptr,
2258 withTargetFlags(Op, ldTF, DAG));
2259 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2260 DAG.getRegister(SP::G7, PtrVT), Offset,
2261 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2262 }
2263
2264 assert(model == TLSModel::LocalExec);
2265 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2266 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2267 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2268 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2269 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2270
2271 return DAG.getNode(ISD::ADD, DL, PtrVT,
2272 DAG.getRegister(SP::G7, PtrVT), Offset);
2273}
2274
2276 ArgListTy &Args, SDValue Arg,
2277 const SDLoc &DL,
2278 SelectionDAG &DAG) const {
2280 EVT ArgVT = Arg.getValueType();
2281 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2282
2283 if (ArgTy->isFP128Ty()) {
2284 // Create a stack object and pass the pointer to the library function.
2285 int FI = MFI.CreateStackObject(16, Align(8), false);
2286 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2287 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2288 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2289 } else {
2290 Args.emplace_back(Arg, ArgTy);
2291 }
2292 return Chain;
2293}
2294
2295SDValue
2297 const char *LibFuncName,
2298 unsigned numArgs) const {
2299
2300 ArgListTy Args;
2301
2303 auto PtrVT = getPointerTy(DAG.getDataLayout());
2304
2305 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2306 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2307 Type *RetTyABI = RetTy;
2308 SDValue Chain = DAG.getEntryNode();
2309 SDValue RetPtr;
2310
2311 if (RetTy->isFP128Ty()) {
2312 // Create a Stack Object to receive the return value of type f128.
2313 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2314 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2315 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2316 if (!Subtarget->is64Bit()) {
2317 Entry.IsSRet = true;
2318 Entry.IndirectType = RetTy;
2319 }
2320 Entry.IsReturned = false;
2321 Args.push_back(Entry);
2322 RetTyABI = Type::getVoidTy(*DAG.getContext());
2323 }
2324
2325 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2326 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2327 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2328 }
2330 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2331 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2332
2333 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2334
2335 // chain is in second result.
2336 if (RetTyABI == RetTy)
2337 return CallInfo.first;
2338
2339 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2340
2341 Chain = CallInfo.second;
2342
2343 // Load RetPtr to get the return value.
2344 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2346}
2347
2349 unsigned &SPCC, const SDLoc &DL,
2350 SelectionDAG &DAG) const {
2351
2352 const char *LibCall = nullptr;
2353 bool is64Bit = Subtarget->is64Bit();
2354 switch(SPCC) {
2355 default: llvm_unreachable("Unhandled conditional code!");
2356 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2357 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2358 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2359 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2360 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2361 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2362 case SPCC::FCC_UL :
2363 case SPCC::FCC_ULE:
2364 case SPCC::FCC_UG :
2365 case SPCC::FCC_UGE:
2366 case SPCC::FCC_U :
2367 case SPCC::FCC_O :
2368 case SPCC::FCC_LG :
2369 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2370 }
2371
2372 auto PtrVT = getPointerTy(DAG.getDataLayout());
2373 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2375 ArgListTy Args;
2376 SDValue Chain = DAG.getEntryNode();
2377 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2378 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2379
2381 CLI.setDebugLoc(DL).setChain(Chain)
2382 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2383
2384 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2385
2386 // result is in first, and chain is in second result.
2387 SDValue Result = CallInfo.first;
2388
2389 switch(SPCC) {
2390 default: {
2391 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2392 SPCC = SPCC::ICC_NE;
2393 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2394 }
2395 case SPCC::FCC_UL : {
2396 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2397 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2398 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2399 SPCC = SPCC::ICC_NE;
2400 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2401 }
2402 case SPCC::FCC_ULE: {
2403 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2404 SPCC = SPCC::ICC_NE;
2405 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2406 }
2407 case SPCC::FCC_UG : {
2408 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2409 SPCC = SPCC::ICC_G;
2410 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2411 }
2412 case SPCC::FCC_UGE: {
2413 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2414 SPCC = SPCC::ICC_NE;
2415 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2416 }
2417
2418 case SPCC::FCC_U : {
2419 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2420 SPCC = SPCC::ICC_E;
2421 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2422 }
2423 case SPCC::FCC_O : {
2424 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2425 SPCC = SPCC::ICC_NE;
2426 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2427 }
2428 case SPCC::FCC_LG : {
2429 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2430 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2431 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2432 SPCC = SPCC::ICC_NE;
2433 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2434 }
2435 case SPCC::FCC_UE : {
2436 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2437 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2438 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2439 SPCC = SPCC::ICC_E;
2440 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2441 }
2442 }
2443}
2444
2445static SDValue
2447 const SparcTargetLowering &TLI) {
2448
2449 if (Op.getOperand(0).getValueType() == MVT::f64)
2450 return TLI.LowerF128Op(Op, DAG,
2451 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2452
2453 if (Op.getOperand(0).getValueType() == MVT::f32)
2454 return TLI.LowerF128Op(Op, DAG,
2455 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2456
2457 llvm_unreachable("fpextend with non-float operand!");
2458 return SDValue();
2459}
2460
2461static SDValue
2463 const SparcTargetLowering &TLI) {
2464 // FP_ROUND on f64 and f32 are legal.
2465 if (Op.getOperand(0).getValueType() != MVT::f128)
2466 return Op;
2467
2468 if (Op.getValueType() == MVT::f64)
2469 return TLI.LowerF128Op(Op, DAG,
2470 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2471 if (Op.getValueType() == MVT::f32)
2472 return TLI.LowerF128Op(Op, DAG,
2473 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2474
2475 llvm_unreachable("fpround to non-float!");
2476 return SDValue();
2477}
2478
2480 const SparcTargetLowering &TLI,
2481 bool hasHardQuad) {
2482 SDLoc dl(Op);
2483 EVT VT = Op.getValueType();
2484 assert(VT == MVT::i32 || VT == MVT::i64);
2485
2486 // Expand f128 operations to fp128 abi calls.
2487 if (Op.getOperand(0).getValueType() == MVT::f128
2488 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2489 const char *libName = TLI.getLibcallName(VT == MVT::i32
2490 ? RTLIB::FPTOSINT_F128_I32
2491 : RTLIB::FPTOSINT_F128_I64);
2492 return TLI.LowerF128Op(Op, DAG, libName, 1);
2493 }
2494
2495 // Expand if the resulting type is illegal.
2496 if (!TLI.isTypeLegal(VT))
2497 return SDValue();
2498
2499 // Otherwise, Convert the fp value to integer in an FP register.
2500 if (VT == MVT::i32)
2501 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2502 else
2503 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2504
2505 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2506}
2507
2509 const SparcTargetLowering &TLI,
2510 bool hasHardQuad) {
2511 SDLoc dl(Op);
2512 EVT OpVT = Op.getOperand(0).getValueType();
2513 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2514
2515 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2516
2517 // Expand f128 operations to fp128 ABI calls.
2518 if (Op.getValueType() == MVT::f128
2519 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2520 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2521 ? RTLIB::SINTTOFP_I32_F128
2522 : RTLIB::SINTTOFP_I64_F128);
2523 return TLI.LowerF128Op(Op, DAG, libName, 1);
2524 }
2525
2526 // Expand if the operand type is illegal.
2527 if (!TLI.isTypeLegal(OpVT))
2528 return SDValue();
2529
2530 // Otherwise, Convert the int value to FP in an FP register.
2531 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2532 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2533 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2534}
2535
2537 const SparcTargetLowering &TLI,
2538 bool hasHardQuad) {
2539 EVT VT = Op.getValueType();
2540
2541 // Expand if it does not involve f128 or the target has support for
2542 // quad floating point instructions and the resulting type is legal.
2543 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2544 (hasHardQuad && TLI.isTypeLegal(VT)))
2545 return SDValue();
2546
2547 assert(VT == MVT::i32 || VT == MVT::i64);
2548
2549 return TLI.LowerF128Op(Op, DAG,
2550 TLI.getLibcallName(VT == MVT::i32
2551 ? RTLIB::FPTOUINT_F128_I32
2552 : RTLIB::FPTOUINT_F128_I64),
2553 1);
2554}
2555
2557 const SparcTargetLowering &TLI,
2558 bool hasHardQuad) {
2559 EVT OpVT = Op.getOperand(0).getValueType();
2560 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2561
2562 // Expand if it does not involve f128 or the target has support for
2563 // quad floating point instructions and the operand type is legal.
2564 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2565 return SDValue();
2566
2567 return TLI.LowerF128Op(Op, DAG,
2568 TLI.getLibcallName(OpVT == MVT::i32
2569 ? RTLIB::UINTTOFP_I32_F128
2570 : RTLIB::UINTTOFP_I64_F128),
2571 1);
2572}
2573
2575 const SparcTargetLowering &TLI, bool hasHardQuad,
2576 bool isV9, bool is64Bit) {
2577 SDValue Chain = Op.getOperand(0);
2578 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2579 SDValue LHS = Op.getOperand(2);
2580 SDValue RHS = Op.getOperand(3);
2581 SDValue Dest = Op.getOperand(4);
2582 SDLoc dl(Op);
2583 unsigned Opc, SPCC = ~0U;
2584
2585 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2586 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2587 LookThroughSetCC(LHS, RHS, CC, SPCC);
2588 assert(LHS.getValueType() == RHS.getValueType());
2589
2590 // Get the condition flag.
2591 SDValue CompareFlag;
2592 if (LHS.getValueType().isInteger()) {
2593 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2594 // and the RHS is zero we might be able to use a specialized branch.
2595 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2597 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2598 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2599 LHS);
2600
2601 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2602 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2603 if (isV9)
2604 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2605 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2606 else
2607 // Non-v9 targets don't have xcc.
2608 Opc = SPISD::BRICC;
2609 } else {
2610 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2611 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2612 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2613 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2614 } else {
2615 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2616 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2617 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2618 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2619 }
2620 }
2621 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2622 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2623}
2624
2626 const SparcTargetLowering &TLI, bool hasHardQuad,
2627 bool isV9, bool is64Bit) {
2628 SDValue LHS = Op.getOperand(0);
2629 SDValue RHS = Op.getOperand(1);
2630 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2631 SDValue TrueVal = Op.getOperand(2);
2632 SDValue FalseVal = Op.getOperand(3);
2633 SDLoc dl(Op);
2634 unsigned Opc, SPCC = ~0U;
2635
2636 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2637 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2638 LookThroughSetCC(LHS, RHS, CC, SPCC);
2639 assert(LHS.getValueType() == RHS.getValueType());
2640
2641 SDValue CompareFlag;
2642 if (LHS.getValueType().isInteger()) {
2643 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2644 // and the RHS is zero we might be able to use a specialized select.
2645 // All SELECT_CC between any two scalar integer types are eligible for
2646 // lowering to specialized instructions. Additionally, f32 and f64 types
2647 // are also eligible, but for f128 we can only use the specialized
2648 // instruction when we have hardquad.
2649 EVT ValType = TrueVal.getValueType();
2650 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2651 ValType == MVT::f64 ||
2652 (ValType == MVT::f128 && hasHardQuad);
2653 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2654 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2655 return DAG.getNode(
2656 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2657 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2658
2659 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2660 Opc = LHS.getValueType() == MVT::i32 ?
2661 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2662 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2663 } else {
2664 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2665 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2666 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2667 Opc = SPISD::SELECT_ICC;
2668 } else {
2669 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2670 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2671 Opc = SPISD::SELECT_FCC;
2672 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2673 }
2674 }
2675 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2676 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2677}
2678
2680 const SparcTargetLowering &TLI) {
2683 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2684
2685 // Need frame address to find the address of VarArgsFrameIndex.
2687
2688 // vastart just stores the address of the VarArgsFrameIndex slot into the
2689 // memory location argument.
2690 SDLoc DL(Op);
2691 SDValue Offset =
2692 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2693 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2694 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2695 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2696 MachinePointerInfo(SV));
2697}
2698
2700 SDNode *Node = Op.getNode();
2701 EVT VT = Node->getValueType(0);
2702 SDValue InChain = Node->getOperand(0);
2703 SDValue VAListPtr = Node->getOperand(1);
2704 EVT PtrVT = VAListPtr.getValueType();
2705 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2706 SDLoc DL(Node);
2707 SDValue VAList =
2708 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2709 // Increment the pointer, VAList, to the next vaarg.
2710 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2712 DL));
2713 // Store the incremented VAList to the legalized pointer.
2714 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2715 MachinePointerInfo(SV));
2716 // Load the actual argument out of the pointer VAList.
2717 // We can't count on greater alignment than the word size.
2718 return DAG.getLoad(
2719 VT, DL, InChain, VAList, MachinePointerInfo(),
2720 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2721}
2722
2724 const SparcSubtarget *Subtarget) {
2725 SDValue Chain = Op.getOperand(0);
2726 SDValue Size = Op.getOperand(1);
2727 SDValue Alignment = Op.getOperand(2);
2728 MaybeAlign MaybeAlignment =
2729 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2730 EVT VT = Size->getValueType(0);
2731 SDLoc dl(Op);
2732
2733 unsigned SPReg = SP::O6;
2734 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2735
2736 // The resultant pointer needs to be above the register spill area
2737 // at the bottom of the stack.
2738 unsigned regSpillArea;
2739 if (Subtarget->is64Bit()) {
2740 regSpillArea = 128;
2741 } else {
2742 // On Sparc32, the size of the spill area is 92. Unfortunately,
2743 // that's only 4-byte aligned, not 8-byte aligned (the stack
2744 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2745 // aligned dynamic allocation, we actually need to add 96 to the
2746 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2747
2748 // That also means adding 4 to the size of the allocation --
2749 // before applying the 8-byte rounding. Unfortunately, we the
2750 // value we get here has already had rounding applied. So, we need
2751 // to add 8, instead, wasting a bit more memory.
2752
2753 // Further, this only actually needs to be done if the required
2754 // alignment is > 4, but, we've lost that info by this point, too,
2755 // so we always apply it.
2756
2757 // (An alternative approach would be to always reserve 96 bytes
2758 // instead of the required 92, but then we'd waste 4 extra bytes
2759 // in every frame, not just those with dynamic stack allocations)
2760
2761 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2762
2763 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2764 DAG.getConstant(8, dl, VT));
2765 regSpillArea = 96;
2766 }
2767
2768 int64_t Bias = Subtarget->getStackPointerBias();
2769
2770 // Debias and increment SP past the reserved spill area.
2771 // We need the SP to point to the first usable region before calculating
2772 // anything to prevent any of the pointers from becoming out of alignment when
2773 // we rebias the SP later on.
2774 SDValue StartOfUsableStack = DAG.getNode(
2775 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2776 SDValue AllocatedPtr =
2777 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2778
2779 bool IsOveraligned = MaybeAlignment.has_value();
2780 SDValue AlignedPtr =
2781 IsOveraligned
2782 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2783 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2784 : AllocatedPtr;
2785
2786 // Now that we are done, restore the bias and reserved spill area.
2787 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2788 DAG.getConstant(regSpillArea + Bias, dl, VT));
2789 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2790 SDValue Ops[2] = {AlignedPtr, Chain};
2791 return DAG.getMergeValues(Ops, dl);
2792}
2793
2794
2796 SDLoc dl(Op);
2797 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2798 dl, MVT::Other, DAG.getEntryNode());
2799 return Chain;
2800}
2801
2803 const SparcSubtarget *Subtarget,
2804 bool AlwaysFlush = false) {
2806 MFI.setFrameAddressIsTaken(true);
2807
2808 EVT VT = Op.getValueType();
2809 SDLoc dl(Op);
2810 unsigned FrameReg = SP::I6;
2811 unsigned stackBias = Subtarget->getStackPointerBias();
2812
2813 SDValue FrameAddr;
2814 SDValue Chain;
2815
2816 // flush first to make sure the windowed registers' values are in stack
2817 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2818
2819 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2820
2821 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2822
2823 while (depth--) {
2824 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2825 DAG.getIntPtrConstant(Offset, dl));
2826 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2827 }
2828 if (Subtarget->is64Bit())
2829 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2830 DAG.getIntPtrConstant(stackBias, dl));
2831 return FrameAddr;
2832}
2833
2834
2836 const SparcSubtarget *Subtarget) {
2837
2838 uint64_t depth = Op.getConstantOperandVal(0);
2839
2840 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2841
2842}
2843
2845 const SparcTargetLowering &TLI,
2846 const SparcSubtarget *Subtarget) {
2848 MachineFrameInfo &MFI = MF.getFrameInfo();
2849 MFI.setReturnAddressIsTaken(true);
2850
2851 EVT VT = Op.getValueType();
2852 SDLoc dl(Op);
2853 uint64_t depth = Op.getConstantOperandVal(0);
2854
2855 SDValue RetAddr;
2856 if (depth == 0) {
2857 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2858 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2859 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2860 return RetAddr;
2861 }
2862
2863 // Need frame address to find return address of the caller.
2864 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2865
2866 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2868 dl, VT,
2869 FrameAddr,
2870 DAG.getIntPtrConstant(Offset, dl));
2871 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2872
2873 return RetAddr;
2874}
2875
2876static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2877 unsigned opcode) {
2878 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2879 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2880
2881 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2882 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2883 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2884
2885 // Note: in little-endian, the floating-point value is stored in the
2886 // registers are in the opposite order, so the subreg with the sign
2887 // bit is the highest-numbered (odd), rather than the
2888 // lowest-numbered (even).
2889
2890 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2891 SrcReg64);
2892 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2893 SrcReg64);
2894
2895 if (DAG.getDataLayout().isLittleEndian())
2896 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2897 else
2898 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2899
2900 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2901 dl, MVT::f64), 0);
2902 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2903 DstReg64, Hi32);
2904 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2905 DstReg64, Lo32);
2906 return DstReg64;
2907}
2908
2909// Lower a f128 load into two f64 loads.
2911{
2912 SDLoc dl(Op);
2913 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2914 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2915
2916 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2917
2918 SDValue Hi64 =
2919 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2920 LdNode->getPointerInfo(), Alignment);
2921 EVT addrVT = LdNode->getBasePtr().getValueType();
2922 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2923 LdNode->getBasePtr(),
2924 DAG.getConstant(8, dl, addrVT));
2925 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2926 LdNode->getPointerInfo().getWithOffset(8),
2927 Alignment);
2928
2929 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2930 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2931
2932 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2933 dl, MVT::f128);
2934 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2935 MVT::f128,
2936 SDValue(InFP128, 0),
2937 Hi64,
2938 SubRegEven);
2939 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2940 MVT::f128,
2941 SDValue(InFP128, 0),
2942 Lo64,
2943 SubRegOdd);
2944 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2945 SDValue(Lo64.getNode(), 1) };
2946 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2947 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2948 return DAG.getMergeValues(Ops, dl);
2949}
2950
2952{
2953 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2954
2955 EVT MemVT = LdNode->getMemoryVT();
2956 if (MemVT == MVT::f128)
2957 return LowerF128Load(Op, DAG);
2958
2959 return Op;
2960}
2961
2962// Lower a f128 store into two f64 stores.
2964 SDLoc dl(Op);
2965 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2966 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2967
2968 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2969 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2970
2971 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2972 dl,
2973 MVT::f64,
2974 StNode->getValue(),
2975 SubRegEven);
2976 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2977 dl,
2978 MVT::f64,
2979 StNode->getValue(),
2980 SubRegOdd);
2981
2982 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
2983
2984 SDValue OutChains[2];
2985 OutChains[0] =
2986 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2987 StNode->getBasePtr(), StNode->getPointerInfo(),
2988 Alignment);
2989 EVT addrVT = StNode->getBasePtr().getValueType();
2990 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2991 StNode->getBasePtr(),
2992 DAG.getConstant(8, dl, addrVT));
2993 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2994 StNode->getPointerInfo().getWithOffset(8),
2995 Alignment);
2996 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2997}
2998
3000{
3001 SDLoc dl(Op);
3002 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3003
3004 EVT MemVT = St->getMemoryVT();
3005 if (MemVT == MVT::f128)
3006 return LowerF128Store(Op, DAG);
3007
3008 if (MemVT == MVT::i64) {
3009 // Custom handling for i64 stores: turn it into a bitcast and a
3010 // v2i32 store.
3011 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3012 SDValue Chain = DAG.getStore(
3013 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3014 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3015 return Chain;
3016 }
3017
3018 return SDValue();
3019}
3020
3022 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3023 && "invalid opcode");
3024
3025 SDLoc dl(Op);
3026
3027 if (Op.getValueType() == MVT::f64)
3028 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3029 if (Op.getValueType() != MVT::f128)
3030 return Op;
3031
3032 // Lower fabs/fneg on f128 to fabs/fneg on f64
3033 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3034 // (As with LowerF64Op, on little-endian, we need to negate the odd
3035 // subreg)
3036
3037 SDValue SrcReg128 = Op.getOperand(0);
3038 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3039 SrcReg128);
3040 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3041 SrcReg128);
3042
3043 if (DAG.getDataLayout().isLittleEndian()) {
3044 if (isV9)
3045 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3046 else
3047 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3048 } else {
3049 if (isV9)
3050 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3051 else
3052 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3053 }
3054
3055 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3056 dl, MVT::f128), 0);
3057 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3058 DstReg128, Hi64);
3059 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3060 DstReg128, Lo64);
3061 return DstReg128;
3062}
3063
3065 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3066 // Expand with a fence.
3067 return SDValue();
3068 }
3069
3070 // Monotonic load/stores are legal.
3071 return Op;
3072}
3073
3075 SelectionDAG &DAG) const {
3076 unsigned IntNo = Op.getConstantOperandVal(0);
3077 switch (IntNo) {
3078 default: return SDValue(); // Don't custom lower most intrinsics.
3079 case Intrinsic::thread_pointer: {
3080 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3081 return DAG.getRegister(SP::G7, PtrVT);
3082 }
3083 }
3084}
3085
3088
3089 bool hasHardQuad = Subtarget->hasHardQuad();
3090 bool isV9 = Subtarget->isV9();
3091 bool is64Bit = Subtarget->is64Bit();
3092
3093 switch (Op.getOpcode()) {
3094 default: llvm_unreachable("Should not custom lower this!");
3095
3096 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3097 Subtarget);
3098 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3099 Subtarget);
3101 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3102 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3103 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3104 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3105 hasHardQuad);
3106 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3107 hasHardQuad);
3108 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3109 hasHardQuad);
3110 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3111 hasHardQuad);
3112 case ISD::BR_CC:
3113 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3114 case ISD::SELECT_CC:
3115 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3116 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3117 case ISD::VAARG: return LowerVAARG(Op, DAG);
3119 Subtarget);
3120
3121 case ISD::LOAD: return LowerLOAD(Op, DAG);
3122 case ISD::STORE: return LowerSTORE(Op, DAG);
3123 case ISD::FADD: return LowerF128Op(Op, DAG,
3124 getLibcallName(RTLIB::ADD_F128), 2);
3125 case ISD::FSUB: return LowerF128Op(Op, DAG,
3126 getLibcallName(RTLIB::SUB_F128), 2);
3127 case ISD::FMUL: return LowerF128Op(Op, DAG,
3128 getLibcallName(RTLIB::MUL_F128), 2);
3129 case ISD::FDIV: return LowerF128Op(Op, DAG,
3130 getLibcallName(RTLIB::DIV_F128), 2);
3131 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3132 getLibcallName(RTLIB::SQRT_F128),1);
3133 case ISD::FABS:
3134 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3135 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3136 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3137 case ISD::ATOMIC_LOAD:
3138 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3140 }
3141}
3142
3144 const SDLoc &DL,
3145 SelectionDAG &DAG) const {
3146 APInt V = C->getValueAPF().bitcastToAPInt();
3147 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3148 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3149 if (DAG.getDataLayout().isLittleEndian())
3150 std::swap(Lo, Hi);
3151 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3152}
3153
3155 DAGCombinerInfo &DCI) const {
3156 SDLoc dl(N);
3157 SDValue Src = N->getOperand(0);
3158
3159 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3160 Src.getSimpleValueType() == MVT::f64)
3161 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3162
3163 return SDValue();
3164}
3165
3167 DAGCombinerInfo &DCI) const {
3168 switch (N->getOpcode()) {
3169 default:
3170 break;
3171 case ISD::BITCAST:
3172 return PerformBITCASTCombine(N, DCI);
3173 }
3174 return SDValue();
3175}
3176
3179 MachineBasicBlock *BB) const {
3180 switch (MI.getOpcode()) {
3181 default: llvm_unreachable("Unknown SELECT_CC!");
3182 case SP::SELECT_CC_Int_ICC:
3183 case SP::SELECT_CC_FP_ICC:
3184 case SP::SELECT_CC_DFP_ICC:
3185 case SP::SELECT_CC_QFP_ICC:
3186 if (Subtarget->isV9())
3187 return expandSelectCC(MI, BB, SP::BPICC);
3188 return expandSelectCC(MI, BB, SP::BCOND);
3189 case SP::SELECT_CC_Int_XCC:
3190 case SP::SELECT_CC_FP_XCC:
3191 case SP::SELECT_CC_DFP_XCC:
3192 case SP::SELECT_CC_QFP_XCC:
3193 return expandSelectCC(MI, BB, SP::BPXCC);
3194 case SP::SELECT_CC_Int_FCC:
3195 case SP::SELECT_CC_FP_FCC:
3196 case SP::SELECT_CC_DFP_FCC:
3197 case SP::SELECT_CC_QFP_FCC:
3198 if (Subtarget->isV9())
3199 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3200 return expandSelectCC(MI, BB, SP::FBCOND);
3201 }
3202}
3203
3206 unsigned BROpcode) const {
3207 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3208 DebugLoc dl = MI.getDebugLoc();
3209 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3210
3211 // To "insert" a SELECT_CC instruction, we actually have to insert the
3212 // triangle control-flow pattern. The incoming instruction knows the
3213 // destination vreg to set, the condition code register to branch on, the
3214 // true/false values to select between, and the condition code for the branch.
3215 //
3216 // We produce the following control flow:
3217 // ThisMBB
3218 // | \
3219 // | IfFalseMBB
3220 // | /
3221 // SinkMBB
3222 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3224
3225 MachineBasicBlock *ThisMBB = BB;
3226 MachineFunction *F = BB->getParent();
3227 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3228 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3229 F->insert(It, IfFalseMBB);
3230 F->insert(It, SinkMBB);
3231
3232 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3233 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3234 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3235 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3236
3237 // Set the new successors for ThisMBB.
3238 ThisMBB->addSuccessor(IfFalseMBB);
3239 ThisMBB->addSuccessor(SinkMBB);
3240
3241 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3242 .addMBB(SinkMBB)
3243 .addImm(CC);
3244
3245 // IfFalseMBB just falls through to SinkMBB.
3246 IfFalseMBB->addSuccessor(SinkMBB);
3247
3248 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3249 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3250 MI.getOperand(0).getReg())
3251 .addReg(MI.getOperand(1).getReg())
3252 .addMBB(ThisMBB)
3253 .addReg(MI.getOperand(2).getReg())
3254 .addMBB(IfFalseMBB);
3255
3256 MI.eraseFromParent(); // The pseudo instruction is gone now.
3257 return SinkMBB;
3258}
3259
3260//===----------------------------------------------------------------------===//
3261// Sparc Inline Assembly Support
3262//===----------------------------------------------------------------------===//
3263
3264/// getConstraintType - Given a constraint letter, return the type of
3265/// constraint it is for this target.
3268 if (Constraint.size() == 1) {
3269 switch (Constraint[0]) {
3270 default: break;
3271 case 'r':
3272 case 'f':
3273 case 'e':
3274 return C_RegisterClass;
3275 case 'I': // SIMM13
3276 return C_Immediate;
3277 }
3278 }
3279
3280 return TargetLowering::getConstraintType(Constraint);
3281}
3282
3285 const char *constraint) const {
3287 Value *CallOperandVal = info.CallOperandVal;
3288 // If we don't have a value, we can't do a match,
3289 // but allow it at the lowest weight.
3290 if (!CallOperandVal)
3291 return CW_Default;
3292
3293 // Look at the constraint type.
3294 switch (*constraint) {
3295 default:
3297 break;
3298 case 'I': // SIMM13
3299 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3300 if (isInt<13>(C->getSExtValue()))
3301 weight = CW_Constant;
3302 }
3303 break;
3304 }
3305 return weight;
3306}
3307
3308/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3309/// vector. If it is invalid, don't add anything to Ops.
3311 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3312 SelectionDAG &DAG) const {
3313 SDValue Result;
3314
3315 // Only support length 1 constraints for now.
3316 if (Constraint.size() > 1)
3317 return;
3318
3319 char ConstraintLetter = Constraint[0];
3320 switch (ConstraintLetter) {
3321 default: break;
3322 case 'I':
3323 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3324 if (isInt<13>(C->getSExtValue())) {
3325 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3326 Op.getValueType());
3327 break;
3328 }
3329 return;
3330 }
3331 }
3332
3333 if (Result.getNode()) {
3334 Ops.push_back(Result);
3335 return;
3336 }
3337 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3338}
3339
3340std::pair<unsigned, const TargetRegisterClass *>
3342 StringRef Constraint,
3343 MVT VT) const {
3344 if (Constraint.empty())
3345 return std::make_pair(0U, nullptr);
3346
3347 if (Constraint.size() == 1) {
3348 switch (Constraint[0]) {
3349 case 'r':
3350 if (VT == MVT::v2i32)
3351 return std::make_pair(0U, &SP::IntPairRegClass);
3352 else if (Subtarget->is64Bit())
3353 return std::make_pair(0U, &SP::I64RegsRegClass);
3354 else
3355 return std::make_pair(0U, &SP::IntRegsRegClass);
3356 case 'f':
3357 if (VT == MVT::f32 || VT == MVT::i32)
3358 return std::make_pair(0U, &SP::FPRegsRegClass);
3359 else if (VT == MVT::f64 || VT == MVT::i64)
3360 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3361 else if (VT == MVT::f128)
3362 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3363 // This will generate an error message
3364 return std::make_pair(0U, nullptr);
3365 case 'e':
3366 if (VT == MVT::f32 || VT == MVT::i32)
3367 return std::make_pair(0U, &SP::FPRegsRegClass);
3368 else if (VT == MVT::f64 || VT == MVT::i64 )
3369 return std::make_pair(0U, &SP::DFPRegsRegClass);
3370 else if (VT == MVT::f128)
3371 return std::make_pair(0U, &SP::QFPRegsRegClass);
3372 // This will generate an error message
3373 return std::make_pair(0U, nullptr);
3374 }
3375 }
3376
3377 if (Constraint.front() != '{')
3378 return std::make_pair(0U, nullptr);
3379
3380 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3381 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3382 if (RegName.empty())
3383 return std::make_pair(0U, nullptr);
3384
3385 unsigned long long RegNo;
3386 // Handle numbered register aliases.
3387 if (RegName[0] == 'r' &&
3388 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3389 // r0-r7 -> g0-g7
3390 // r8-r15 -> o0-o7
3391 // r16-r23 -> l0-l7
3392 // r24-r31 -> i0-i7
3393 if (RegNo > 31)
3394 return std::make_pair(0U, nullptr);
3395 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3396 char RegType = RegTypes[RegNo / 8];
3397 char RegIndex = '0' + (RegNo % 8);
3398 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3399 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3400 }
3401
3402 // Rewrite the fN constraint according to the value type if needed.
3403 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3404 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3405 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3407 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3408 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3410 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3411 } else {
3412 return std::make_pair(0U, nullptr);
3413 }
3414 }
3415
3416 auto ResultPair =
3418 if (!ResultPair.second)
3419 return std::make_pair(0U, nullptr);
3420
3421 // Force the use of I64Regs over IntRegs for 64-bit values.
3422 if (Subtarget->is64Bit() && VT == MVT::i64) {
3423 assert(ResultPair.second == &SP::IntRegsRegClass &&
3424 "Unexpected register class");
3425 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3426 }
3427
3428 return ResultPair;
3429}
3430
3431bool
3433 // The Sparc target isn't yet aware of offsets.
3434 return false;
3435}
3436
3439 SelectionDAG &DAG) const {
3440
3441 SDLoc dl(N);
3442
3443 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3444
3445 switch (N->getOpcode()) {
3446 default:
3447 llvm_unreachable("Do not know how to custom type legalize this operation!");
3448
3449 case ISD::FP_TO_SINT:
3450 case ISD::FP_TO_UINT:
3451 // Custom lower only if it involves f128 or i64.
3452 if (N->getOperand(0).getValueType() != MVT::f128
3453 || N->getValueType(0) != MVT::i64)
3454 return;
3455 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3456 ? RTLIB::FPTOSINT_F128_I64
3457 : RTLIB::FPTOUINT_F128_I64);
3458
3459 Results.push_back(LowerF128Op(SDValue(N, 0),
3460 DAG,
3461 getLibcallName(libCall),
3462 1));
3463 return;
3464 case ISD::READCYCLECOUNTER: {
3465 assert(Subtarget->hasLeonCycleCounter());
3466 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3467 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3468 SDValue Ops[] = { Lo, Hi };
3469 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3470 Results.push_back(Pair);
3471 Results.push_back(N->getOperand(0));
3472 return;
3473 }
3474 case ISD::SINT_TO_FP:
3475 case ISD::UINT_TO_FP:
3476 // Custom lower only if it involves f128 or i64.
3477 if (N->getValueType(0) != MVT::f128
3478 || N->getOperand(0).getValueType() != MVT::i64)
3479 return;
3480
3481 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3482 ? RTLIB::SINTTOFP_I64_F128
3483 : RTLIB::UINTTOFP_I64_F128);
3484
3485 Results.push_back(LowerF128Op(SDValue(N, 0),
3486 DAG,
3487 getLibcallName(libCall),
3488 1));
3489 return;
3490 case ISD::LOAD: {
3491 LoadSDNode *Ld = cast<LoadSDNode>(N);
3492 // Custom handling only for i64: turn i64 load into a v2i32 load,
3493 // and a bitcast.
3494 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3495 return;
3496
3497 SDLoc dl(N);
3498 SDValue LoadRes = DAG.getExtLoad(
3499 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3500 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3501 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3502
3503 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3504 Results.push_back(Res);
3505 Results.push_back(LoadRes.getValue(1));
3506 return;
3507 }
3508 }
3509}
3510
3511// Override to enable LOAD_STACK_GUARD lowering on Linux.
3513 if (!Subtarget->isTargetLinux())
3515 return true;
3516}
3517
3519 if (Subtarget->isVIS3())
3520 return VT == MVT::f32 || VT == MVT::f64;
3521 return false;
3522}
3523
3525 bool ForCodeSize) const {
3526 if (VT != MVT::f32 && VT != MVT::f64)
3527 return false;
3528 if (Subtarget->isVIS() && Imm.isZero())
3529 return true;
3530 if (Subtarget->isVIS3())
3531 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3532 Imm.getExactLog2Abs() == -1;
3533 return false;
3534}
3535
3536bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3537
3539 // We lack native cttz, however,
3540 // On 64-bit targets it is cheap to implement it in terms of popc.
3541 if (Subtarget->is64Bit() && Subtarget->usePopc())
3542 return true;
3543 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3544 return isCheapToSpeculateCtlz(Ty);
3545}
3546
3548 EVT VT) const {
3549 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3550}
3551
3553 SDNode *Node) const {
3554 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3555 // If the result is dead, replace it with %g0.
3556 if (!Node->hasAnyUseOfValue(0))
3557 MI.getOperand(0).setReg(SP::G0);
3558}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
Register const TargetRegisterInfo * TRI
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
BinOp getOperation() const
Definition: Instructions.h:819
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:198
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:727
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:72
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:720
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:229
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:578
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:813
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:868
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:839
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:498
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:719
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:499
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:707
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:808
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:493
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:511
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:777
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:581
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
const SparcRegisterInfo * getRegisterInfo() const override
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
bool isTargetLinux() const
bool is64Bit() const
const SparcInstrInfo * getInstrInfo() const override
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:34
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:151
char back() const
back - Get the last character in the string.
Definition: StringRef.h:163
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
char front() const
front - Get the first character in the string.
Definition: StringRef.h:157
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:148
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:68
R Default(T Value)
Definition: StringSwitch.h:177
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition: Type.h:162
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
self_iterator getIterator()
Definition: ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:801
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1236
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1232
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:774
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:765
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1265
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1351
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:259
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1141
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:835
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:511
@ GlobalAddress
Definition: ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:862
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:410
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1343
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:275
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:985
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:975
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:249
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1568
@ GlobalTLSAddress
Definition: ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:826
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:773
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1090
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:1002
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1187
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1162
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1166
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:778
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1347
@ UNDEF
UNDEF - An undefined node.
Definition: ISDOpcodes.h:228
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1261
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:225
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:695
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:756
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:563
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:832
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1321
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:793
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1358
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1151
@ ConstantPool
Definition: ISDOpcodes.h:92
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:870
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:960
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:908
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1292
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:730
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1318
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:299
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:552
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1372
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:941
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:838
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1256
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1180
@ BlockAddress
Definition: ISDOpcodes.h:94
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:815
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:521
@ AssertZext
Definition: ISDOpcodes.h:63
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:543
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1685
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
Definition: ISDOpcodes.h:1724
CondCodes
Definition: Sparc.h:41
@ FCC_ULE
Definition: Sparc.h:74
@ FCC_UG
Definition: Sparc.h:64
@ ICC_G
Definition: Sparc.h:46
@ REG_LEZ
Definition: Sparc.h:97
@ REG_GZ
Definition: Sparc.h:100
@ ICC_L
Definition: Sparc.h:49
@ FCC_NE
Definition: Sparc.h:68
@ ICC_CS
Definition: Sparc.h:53
@ FCC_LG
Definition: Sparc.h:67
@ ICC_LEU
Definition: Sparc.h:51
@ FCC_LE
Definition: Sparc.h:73
@ ICC_LE
Definition: Sparc.h:47
@ FCC_U
Definition: Sparc.h:62
@ ICC_GE
Definition: Sparc.h:48
@ FCC_E
Definition: Sparc.h:69
@ REG_LZ
Definition: Sparc.h:98
@ FCC_L
Definition: Sparc.h:65
@ ICC_GU
Definition: Sparc.h:50
@ FCC_O
Definition: Sparc.h:75
@ ICC_NE
Definition: Sparc.h:44
@ FCC_UE
Definition: Sparc.h:70
@ REG_NZ
Definition: Sparc.h:99
@ ICC_E
Definition: Sparc.h:45
@ FCC_GE
Definition: Sparc.h:71
@ FCC_UGE
Definition: Sparc.h:72
@ REG_Z
Definition: Sparc.h:96
@ ICC_CC
Definition: Sparc.h:52
@ REG_GEZ
Definition: Sparc.h:101
@ FCC_G
Definition: Sparc.h:63
@ FCC_UL
Definition: Sparc.h:66
@ GeneralDynamic
Definition: CodeGen.h:46
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1702
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:487
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:376
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:216
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition: KnownBits.h:304
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})