LLVM 21.0.0git
MipsISelLowering.cpp
Go to the documentation of this file.
1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
50#include "llvm/IR/CallingConv.h"
51#include "llvm/IR/Constants.h"
52#include "llvm/IR/DataLayout.h"
53#include "llvm/IR/DebugLoc.h"
55#include "llvm/IR/Function.h"
56#include "llvm/IR/GlobalValue.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/MC/MCContext.h"
69#include <algorithm>
70#include <cassert>
71#include <cctype>
72#include <cstdint>
73#include <deque>
74#include <iterator>
75#include <utility>
76#include <vector>
77
78using namespace llvm;
79
80#define DEBUG_TYPE "mips-lower"
81
82STATISTIC(NumTailCalls, "Number of tail calls");
83
84static cl::opt<bool>
85NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
86 cl::desc("MIPS: Don't trap on integer division by zero."),
87 cl::init(false));
88
90
91static const MCPhysReg Mips64DPRegs[8] = {
92 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
93 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
94};
95
96// The MIPS MSA ABI passes vector arguments in the integer register set.
97// The number of integer registers used is dependant on the ABI used.
100 EVT VT) const {
101 if (!VT.isVector())
102 return getRegisterType(Context, VT);
103
105 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
106 : MVT::i64;
107 return getRegisterType(Context, VT.getVectorElementType());
108}
109
112 EVT VT) const {
113 if (VT.isVector()) {
115 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
116 return VT.getVectorNumElements() *
118 }
119 return MipsTargetLowering::getNumRegisters(Context, VT);
120}
121
123 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
124 unsigned &NumIntermediates, MVT &RegisterVT) const {
125 if (VT.isPow2VectorType() && VT.getVectorElementType().isRound()) {
126 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
127 RegisterVT = IntermediateVT.getSimpleVT();
128 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
129 return NumIntermediates;
130 }
131 IntermediateVT = VT.getVectorElementType();
132 NumIntermediates = VT.getVectorNumElements();
133 RegisterVT = getRegisterType(Context, IntermediateVT);
134 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
135}
136
140 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
141}
142
143SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
144 SelectionDAG &DAG,
145 unsigned Flag) const {
146 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
147}
148
149SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
150 SelectionDAG &DAG,
151 unsigned Flag) const {
152 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
153}
154
155SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
156 SelectionDAG &DAG,
157 unsigned Flag) const {
158 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
159}
160
161SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
162 SelectionDAG &DAG,
163 unsigned Flag) const {
164 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
165}
166
167SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
168 SelectionDAG &DAG,
169 unsigned Flag) const {
170 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
171 N->getOffset(), Flag);
172}
173
174const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
175 switch ((MipsISD::NodeType)Opcode) {
176 case MipsISD::FIRST_NUMBER: break;
177 case MipsISD::JmpLink: return "MipsISD::JmpLink";
178 case MipsISD::TailCall: return "MipsISD::TailCall";
179 case MipsISD::Highest: return "MipsISD::Highest";
180 case MipsISD::Higher: return "MipsISD::Higher";
181 case MipsISD::Hi: return "MipsISD::Hi";
182 case MipsISD::Lo: return "MipsISD::Lo";
183 case MipsISD::GotHi: return "MipsISD::GotHi";
184 case MipsISD::TlsHi: return "MipsISD::TlsHi";
185 case MipsISD::GPRel: return "MipsISD::GPRel";
186 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
187 case MipsISD::Ret: return "MipsISD::Ret";
188 case MipsISD::ERet: return "MipsISD::ERet";
189 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
190 case MipsISD::FAbs: return "MipsISD::FAbs";
191 case MipsISD::FMS: return "MipsISD::FMS";
192 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
193 case MipsISD::FPCmp: return "MipsISD::FPCmp";
194 case MipsISD::FSELECT: return "MipsISD::FSELECT";
195 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
196 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
197 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
198 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
199 case MipsISD::MFHI: return "MipsISD::MFHI";
200 case MipsISD::MFLO: return "MipsISD::MFLO";
201 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
202 case MipsISD::Mult: return "MipsISD::Mult";
203 case MipsISD::Multu: return "MipsISD::Multu";
204 case MipsISD::MAdd: return "MipsISD::MAdd";
205 case MipsISD::MAddu: return "MipsISD::MAddu";
206 case MipsISD::MSub: return "MipsISD::MSub";
207 case MipsISD::MSubu: return "MipsISD::MSubu";
208 case MipsISD::DivRem: return "MipsISD::DivRem";
209 case MipsISD::DivRemU: return "MipsISD::DivRemU";
210 case MipsISD::DivRem16: return "MipsISD::DivRem16";
211 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
212 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
213 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
214 case MipsISD::Wrapper: return "MipsISD::Wrapper";
215 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
216 case MipsISD::Sync: return "MipsISD::Sync";
217 case MipsISD::Ext: return "MipsISD::Ext";
218 case MipsISD::Ins: return "MipsISD::Ins";
219 case MipsISD::CIns: return "MipsISD::CIns";
220 case MipsISD::LWL: return "MipsISD::LWL";
221 case MipsISD::LWR: return "MipsISD::LWR";
222 case MipsISD::SWL: return "MipsISD::SWL";
223 case MipsISD::SWR: return "MipsISD::SWR";
224 case MipsISD::LDL: return "MipsISD::LDL";
225 case MipsISD::LDR: return "MipsISD::LDR";
226 case MipsISD::SDL: return "MipsISD::SDL";
227 case MipsISD::SDR: return "MipsISD::SDR";
228 case MipsISD::EXTP: return "MipsISD::EXTP";
229 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
230 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
231 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
232 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
233 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
234 case MipsISD::SHILO: return "MipsISD::SHILO";
235 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
236 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
237 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
238 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
239 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
240 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
241 case MipsISD::DOUBLE_SELECT_I: return "MipsISD::DOUBLE_SELECT_I";
242 case MipsISD::DOUBLE_SELECT_I64: return "MipsISD::DOUBLE_SELECT_I64";
243 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
244 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
245 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
246 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
247 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
248 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
249 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
250 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
251 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
252 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
253 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
254 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
255 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
256 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
257 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
258 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
259 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
260 case MipsISD::MULT: return "MipsISD::MULT";
261 case MipsISD::MULTU: return "MipsISD::MULTU";
262 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
263 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
264 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
265 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
266 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
267 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
268 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
269 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
270 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
271 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
272 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
273 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
274 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
275 case MipsISD::VCEQ: return "MipsISD::VCEQ";
276 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
277 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
278 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
279 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
280 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
281 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
282 case MipsISD::VNOR: return "MipsISD::VNOR";
283 case MipsISD::VSHF: return "MipsISD::VSHF";
284 case MipsISD::SHF: return "MipsISD::SHF";
285 case MipsISD::ILVEV: return "MipsISD::ILVEV";
286 case MipsISD::ILVOD: return "MipsISD::ILVOD";
287 case MipsISD::ILVL: return "MipsISD::ILVL";
288 case MipsISD::ILVR: return "MipsISD::ILVR";
289 case MipsISD::PCKEV: return "MipsISD::PCKEV";
290 case MipsISD::PCKOD: return "MipsISD::PCKOD";
291 case MipsISD::INSVE: return "MipsISD::INSVE";
292 }
293 return nullptr;
294}
295
297 const MipsSubtarget &STI)
298 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
299 // Mips does not have i1 type, so use i32 for
300 // setcc operations results (slt, sgt, ...).
303 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
304 // does. Integer booleans still use 0 and 1.
308
309 // Load extented operations for i1 types must be promoted
310 for (MVT VT : MVT::integer_valuetypes()) {
314 }
315
316 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
317 // for f32, f16
318 for (MVT VT : MVT::fp_valuetypes()) {
319 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
320 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
321 }
322
323 // Set LoadExtAction for f16 vectors to Expand
325 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
326 if (F16VT.isValid())
328 }
329
330 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
331 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
332
333 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
334
335 // Used by legalize types to correctly generate the setcc result.
336 // Without this, every float setcc comes with a AND/OR with the result,
337 // we don't want this, since the fpcmp result goes to a flag register,
338 // which is used implicitly by brcond and select operations.
339 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
340
341 // Mips Custom Operations
359
360 // Lower fmin/fmax/fclass operations for MIPS R6.
361 if (Subtarget.hasMips32r6()) {
372 } else {
375 }
376
377 if (Subtarget.isGP64bit()) {
384 if (Subtarget.hasMips64r6()) {
387 } else {
390 }
395 }
396
397 if (!Subtarget.isGP64bit()) {
401 }
402
404 if (Subtarget.isGP64bit())
406
415
416 // Operations not directly supported by Mips.
430 if (Subtarget.hasCnMips()) {
433 } else {
436 }
443
444 if (!Subtarget.hasMips32r2())
446
447 if (!Subtarget.hasMips64r2())
449
466
467 // Lower f16 conversion operations into library calls
472
474
479
480 // Use the default for now
483
484 if (!Subtarget.isGP64bit()) {
487 }
488
489 if (!Subtarget.hasMips32r2()) {
492 }
493
494 // MIPS16 lacks MIPS32's clz and clo instructions.
497 if (!Subtarget.hasMips64())
499
500 if (!Subtarget.hasMips32r2())
502 if (!Subtarget.hasMips64r2())
504
506 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Legal);
507 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Legal);
508 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Legal);
509 setTruncStoreAction(MVT::i64, MVT::i32, Legal);
510 } else if (Subtarget.isGP64bit()) {
511 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
512 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
513 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
514 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
515 }
516
517 setOperationAction(ISD::TRAP, MVT::Other, Legal);
518
521
522 if (Subtarget.isGP64bit())
524 else
526
528
529 // The arguments on the stack are defined in terms of 4-byte slots on O32
530 // and 8-byte slots on N32/N64.
532 : Align(4));
533
534 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
535
537
538 isMicroMips = Subtarget.inMicroMipsMode();
539}
540
541const MipsTargetLowering *
543 const MipsSubtarget &STI) {
544 if (STI.inMips16Mode())
545 return createMips16TargetLowering(TM, STI);
546
547 return createMipsSETargetLowering(TM, STI);
548}
549
550// Create a fast isel object.
551FastISel *
553 const TargetLibraryInfo *libInfo) const {
554 const MipsTargetMachine &TM =
555 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
556
557 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
558 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
561
562 // Disable if either of the following is true:
563 // We do not generate PIC, the ABI is not O32, XGOT is being used.
564 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
566 UseFastISel = false;
567
568 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
569}
570
572 EVT VT) const {
573 if (!VT.isVector())
574 return MVT::i32;
576}
577
580 const MipsSubtarget &Subtarget) {
581 if (DCI.isBeforeLegalizeOps())
582 return SDValue();
583
584 EVT Ty = N->getValueType(0);
585 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
586 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
587 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
589 SDLoc DL(N);
590
591 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
592 N->getOperand(0), N->getOperand(1));
593 SDValue InChain = DAG.getEntryNode();
594 SDValue InGlue = DivRem;
595
596 // insert MFLO
597 if (N->hasAnyUseOfValue(0)) {
598 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
599 InGlue);
600 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
601 InChain = CopyFromLo.getValue(1);
602 InGlue = CopyFromLo.getValue(2);
603 }
604
605 // insert MFHI
606 if (N->hasAnyUseOfValue(1)) {
607 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
608 HI, Ty, InGlue);
609 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
610 }
611
612 return SDValue();
613}
614
616 switch (CC) {
617 default: llvm_unreachable("Unknown fp condition code!");
618 case ISD::SETEQ:
619 case ISD::SETOEQ: return Mips::FCOND_OEQ;
620 case ISD::SETUNE: return Mips::FCOND_UNE;
621 case ISD::SETLT:
622 case ISD::SETOLT: return Mips::FCOND_OLT;
623 case ISD::SETGT:
624 case ISD::SETOGT: return Mips::FCOND_OGT;
625 case ISD::SETLE:
626 case ISD::SETOLE: return Mips::FCOND_OLE;
627 case ISD::SETGE:
628 case ISD::SETOGE: return Mips::FCOND_OGE;
629 case ISD::SETULT: return Mips::FCOND_ULT;
630 case ISD::SETULE: return Mips::FCOND_ULE;
631 case ISD::SETUGT: return Mips::FCOND_UGT;
632 case ISD::SETUGE: return Mips::FCOND_UGE;
633 case ISD::SETUO: return Mips::FCOND_UN;
634 case ISD::SETO: return Mips::FCOND_OR;
635 case ISD::SETNE:
636 case ISD::SETONE: return Mips::FCOND_ONE;
637 case ISD::SETUEQ: return Mips::FCOND_UEQ;
638 }
639}
640
641/// This function returns true if the floating point conditional branches and
642/// conditional moves which use condition code CC should be inverted.
644 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
645 return false;
646
648 "Illegal Condition Code");
649
650 return true;
651}
652
653// Creates and returns an FPCmp node from a setcc node.
654// Returns Op if setcc is not a floating point comparison.
656 // must be a SETCC node
657 if (Op.getOpcode() != ISD::SETCC)
658 return Op;
659
660 SDValue LHS = Op.getOperand(0);
661
662 if (!LHS.getValueType().isFloatingPoint())
663 return Op;
664
665 SDValue RHS = Op.getOperand(1);
666 SDLoc DL(Op);
667
668 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
669 // node if necessary.
670 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
671
672 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
673 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
674}
675
676// Creates and returns a CMovFPT/F node.
678 SDValue False, const SDLoc &DL) {
679 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
680 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
681 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
682
683 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
684 True.getValueType(), True, FCC0, False, Cond);
685}
686
689 const MipsSubtarget &Subtarget) {
690 if (DCI.isBeforeLegalizeOps())
691 return SDValue();
692
693 SDValue SetCC = N->getOperand(0);
694
695 if ((SetCC.getOpcode() != ISD::SETCC) ||
696 !SetCC.getOperand(0).getValueType().isInteger())
697 return SDValue();
698
699 SDValue False = N->getOperand(2);
700 EVT FalseTy = False.getValueType();
701
702 if (!FalseTy.isInteger())
703 return SDValue();
704
705 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
706
707 // If the RHS (False) is 0, we swap the order of the operands
708 // of ISD::SELECT (obviously also inverting the condition) so that we can
709 // take advantage of conditional moves using the $0 register.
710 // Example:
711 // return (a != 0) ? x : 0;
712 // load $reg, x
713 // movz $reg, $0, a
714 if (!FalseC)
715 return SDValue();
716
717 const SDLoc DL(N);
718
719 if (!FalseC->getZExtValue()) {
720 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
721 SDValue True = N->getOperand(1);
722
723 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
724 SetCC.getOperand(1),
726
727 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
728 }
729
730 // If both operands are integer constants there's a possibility that we
731 // can do some interesting optimizations.
732 SDValue True = N->getOperand(1);
733 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
734
735 if (!TrueC || !True.getValueType().isInteger())
736 return SDValue();
737
738 // We'll also ignore MVT::i64 operands as this optimizations proves
739 // to be ineffective because of the required sign extensions as the result
740 // of a SETCC operator is always MVT::i32 for non-vector types.
741 if (True.getValueType() == MVT::i64)
742 return SDValue();
743
744 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
745
746 // 1) (a < x) ? y : y-1
747 // slti $reg1, a, x
748 // addiu $reg2, $reg1, y-1
749 if (Diff == 1)
750 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
751
752 // 2) (a < x) ? y-1 : y
753 // slti $reg1, a, x
754 // xor $reg1, $reg1, 1
755 // addiu $reg2, $reg1, y-1
756 if (Diff == -1) {
757 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
758 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
759 SetCC.getOperand(1),
761 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
762 }
763
764 // Could not optimize.
765 return SDValue();
766}
767
770 const MipsSubtarget &Subtarget) {
771 if (DCI.isBeforeLegalizeOps())
772 return SDValue();
773
774 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
775
776 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
777 if (!FalseC || FalseC->getZExtValue())
778 return SDValue();
779
780 // Since RHS (False) is 0, we swap the order of the True/False operands
781 // (obviously also inverting the condition) so that we can
782 // take advantage of conditional moves using the $0 register.
783 // Example:
784 // return (a != 0) ? x : 0;
785 // load $reg, x
786 // movz $reg, $0, a
787 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
789
790 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
791 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
792 ValueIfFalse, FCC, ValueIfTrue, Glue);
793}
794
797 const MipsSubtarget &Subtarget) {
798 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
799 return SDValue();
800
801 SDValue FirstOperand = N->getOperand(0);
802 unsigned FirstOperandOpc = FirstOperand.getOpcode();
803 SDValue Mask = N->getOperand(1);
804 EVT ValTy = N->getValueType(0);
805 SDLoc DL(N);
806
807 uint64_t Pos = 0;
808 unsigned SMPos, SMSize;
809 ConstantSDNode *CN;
810 SDValue NewOperand;
811 unsigned Opc;
812
813 // Op's second operand must be a shifted mask.
814 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
815 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
816 return SDValue();
817
818 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
819 // Pattern match EXT.
820 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
821 // => ext $dst, $src, pos, size
822
823 // The second operand of the shift must be an immediate.
824 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
825 return SDValue();
826
827 Pos = CN->getZExtValue();
828
829 // Return if the shifted mask does not start at bit 0 or the sum of its size
830 // and Pos exceeds the word's size.
831 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
832 return SDValue();
833
834 Opc = MipsISD::Ext;
835 NewOperand = FirstOperand.getOperand(0);
836 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
837 // Pattern match CINS.
838 // $dst = and (shl $src , pos), mask
839 // => cins $dst, $src, pos, size
840 // mask is a shifted mask with consecutive 1's, pos = shift amount,
841 // size = population count.
842
843 // The second operand of the shift must be an immediate.
844 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
845 return SDValue();
846
847 Pos = CN->getZExtValue();
848
849 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
850 Pos + SMSize > ValTy.getSizeInBits())
851 return SDValue();
852
853 NewOperand = FirstOperand.getOperand(0);
854 // SMSize is 'location' (position) in this case, not size.
855 SMSize--;
856 Opc = MipsISD::CIns;
857 } else {
858 // Pattern match EXT.
859 // $dst = and $src, (2**size - 1) , if size > 16
860 // => ext $dst, $src, pos, size , pos = 0
861
862 // If the mask is <= 0xffff, andi can be used instead.
863 if (CN->getZExtValue() <= 0xffff)
864 return SDValue();
865
866 // Return if the mask doesn't start at position 0.
867 if (SMPos)
868 return SDValue();
869
870 Opc = MipsISD::Ext;
871 NewOperand = FirstOperand;
872 }
873 return DAG.getNode(Opc, DL, ValTy, NewOperand,
874 DAG.getConstant(Pos, DL, MVT::i32),
875 DAG.getConstant(SMSize, DL, MVT::i32));
876}
877
880 const MipsSubtarget &Subtarget) {
881 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
882 return SDValue();
883
884 SDValue FirstOperand = N->getOperand(0), SecondOperand = N->getOperand(1);
885 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
886 ConstantSDNode *CN, *CN1;
887
888 if ((FirstOperand.getOpcode() == ISD::AND &&
889 SecondOperand.getOpcode() == ISD::SHL) ||
890 (FirstOperand.getOpcode() == ISD::SHL &&
891 SecondOperand.getOpcode() == ISD::AND)) {
892 // Pattern match INS.
893 // $dst = or (and $src1, (2**size0 - 1)), (shl $src2, size0)
894 // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos;
895 // Or:
896 // $dst = or (shl $src2, size0), (and $src1, (2**size0 - 1))
897 // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos;
898 SDValue AndOperand0 = FirstOperand.getOpcode() == ISD::AND
899 ? FirstOperand.getOperand(0)
900 : SecondOperand.getOperand(0);
901 SDValue ShlOperand0 = FirstOperand.getOpcode() == ISD::AND
902 ? SecondOperand.getOperand(0)
903 : FirstOperand.getOperand(0);
904 SDValue AndMask = FirstOperand.getOpcode() == ISD::AND
905 ? FirstOperand.getOperand(1)
906 : SecondOperand.getOperand(1);
907 if (!(CN = dyn_cast<ConstantSDNode>(AndMask)) ||
908 !isShiftedMask_64(CN->getZExtValue(), SMPos0, SMSize0))
909 return SDValue();
910
911 SDValue ShlShift = FirstOperand.getOpcode() == ISD::AND
912 ? SecondOperand.getOperand(1)
913 : FirstOperand.getOperand(1);
914 if (!(CN = dyn_cast<ConstantSDNode>(ShlShift)))
915 return SDValue();
916 uint64_t ShlShiftValue = CN->getZExtValue();
917
918 if (SMPos0 != 0 || SMSize0 != ShlShiftValue)
919 return SDValue();
920
921 SDLoc DL(N);
922 EVT ValTy = N->getValueType(0);
923 SMPos1 = ShlShiftValue;
924 assert(SMPos1 < ValTy.getSizeInBits());
925 SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1;
926 return DAG.getNode(MipsISD::Ins, DL, ValTy, ShlOperand0,
927 DAG.getConstant(SMPos1, DL, MVT::i32),
928 DAG.getConstant(SMSize1, DL, MVT::i32), AndOperand0);
929 }
930
931 // See if Op's first operand matches (and $src1 , mask0).
932 if (FirstOperand.getOpcode() != ISD::AND)
933 return SDValue();
934
935 // Pattern match INS.
936 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
937 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
938 // => ins $dst, $src, size, pos, $src1
939 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
940 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
941 return SDValue();
942
943 // See if Op's second operand matches (and (shl $src, pos), mask1).
944 if (SecondOperand.getOpcode() == ISD::AND &&
945 SecondOperand.getOperand(0).getOpcode() == ISD::SHL) {
946
947 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand.getOperand(1))) ||
948 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
949 return SDValue();
950
951 // The shift masks must have the same position and size.
952 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
953 return SDValue();
954
955 SDValue Shl = SecondOperand.getOperand(0);
956
957 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
958 return SDValue();
959
960 unsigned Shamt = CN->getZExtValue();
961
962 // Return if the shift amount and the first bit position of mask are not the
963 // same.
964 EVT ValTy = N->getValueType(0);
965 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
966 return SDValue();
967
968 SDLoc DL(N);
969 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
970 DAG.getConstant(SMPos0, DL, MVT::i32),
971 DAG.getConstant(SMSize0, DL, MVT::i32),
972 FirstOperand.getOperand(0));
973 } else {
974 // Pattern match DINS.
975 // $dst = or (and $src, mask0), mask1
976 // where mask0 = ((1 << SMSize0) -1) << SMPos0
977 // => dins $dst, $src, pos, size
978 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
979 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
980 (SMSize0 + SMPos0 <= 32))) {
981 // Check if AND instruction has constant as argument
982 bool isConstCase = SecondOperand.getOpcode() != ISD::AND;
983 if (SecondOperand.getOpcode() == ISD::AND) {
984 if (!(CN1 = dyn_cast<ConstantSDNode>(SecondOperand->getOperand(1))))
985 return SDValue();
986 } else {
987 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
988 return SDValue();
989 }
990 // Don't generate INS if constant OR operand doesn't fit into bits
991 // cleared by constant AND operand.
992 if (CN->getSExtValue() & CN1->getSExtValue())
993 return SDValue();
994
995 SDLoc DL(N);
996 EVT ValTy = N->getOperand(0)->getValueType(0);
997 SDValue Const1;
998 SDValue SrlX;
999 if (!isConstCase) {
1000 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
1001 SrlX = DAG.getNode(ISD::SRL, DL, SecondOperand->getValueType(0),
1002 SecondOperand, Const1);
1003 }
1004 return DAG.getNode(
1005 MipsISD::Ins, DL, N->getValueType(0),
1006 isConstCase
1007 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
1008 : SrlX,
1009 DAG.getConstant(SMPos0, DL, MVT::i32),
1010 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
1011 : SMSize0,
1012 DL, MVT::i32),
1013 FirstOperand->getOperand(0));
1014 }
1015 return SDValue();
1016 }
1017}
1018
1020 const MipsSubtarget &Subtarget) {
1021 // ROOTNode must have a multiplication as an operand for the match to be
1022 // successful.
1023 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
1024 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
1025 return SDValue();
1026
1027 // In the case where we have a multiplication as the left operand of
1028 // of a subtraction, we can't combine into a MipsISD::MSub node as the
1029 // the instruction definition of msub(u) places the multiplication on
1030 // on the right.
1031 if (ROOTNode->getOpcode() == ISD::SUB &&
1032 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
1033 return SDValue();
1034
1035 // We don't handle vector types here.
1036 if (ROOTNode->getValueType(0).isVector())
1037 return SDValue();
1038
1039 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
1040 // arithmetic. E.g.
1041 // (add (mul a b) c) =>
1042 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
1043 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
1044 // or
1045 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
1046 //
1047 // The overhead of setting up the Hi/Lo registers and reassembling the
1048 // result makes this a dubious optimzation for MIPS64. The core of the
1049 // problem is that Hi/Lo contain the upper and lower 32 bits of the
1050 // operand and result.
1051 //
1052 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
1053 // density than doing it naively, 5 for MIPS64. Additionally, using
1054 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
1055 // extended operands, not true 64 bit values.
1056 //
1057 // FIXME: For the moment, disable this completely for MIPS64.
1058 if (Subtarget.hasMips64())
1059 return SDValue();
1060
1061 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1062 ? ROOTNode->getOperand(0)
1063 : ROOTNode->getOperand(1);
1064
1065 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1066 ? ROOTNode->getOperand(1)
1067 : ROOTNode->getOperand(0);
1068
1069 // Transform this to a MADD only if the user of this node is the add.
1070 // If there are other users of the mul, this function returns here.
1071 if (!Mult.hasOneUse())
1072 return SDValue();
1073
1074 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1075 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1076 // of the multiply must have 32 or more sign bits, otherwise we cannot
1077 // perform this optimization. We have to check this here as we're performing
1078 // this optimization pre-legalization.
1079 SDValue MultLHS = Mult->getOperand(0);
1080 SDValue MultRHS = Mult->getOperand(1);
1081
1082 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1083 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1084 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1085 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1086
1087 if (!IsSigned && !IsUnsigned)
1088 return SDValue();
1089
1090 // Initialize accumulator.
1091 SDLoc DL(ROOTNode);
1092 SDValue BottomHalf, TopHalf;
1093 std::tie(BottomHalf, TopHalf) =
1094 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1095 SDValue ACCIn =
1096 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1097
1098 // Create MipsMAdd(u) / MipsMSub(u) node.
1099 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1100 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1101 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1102 SDValue MAddOps[3] = {
1103 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1104 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1105 SDValue MAdd = CurDAG.getNode(Opcode, DL, MVT::Untyped, MAddOps);
1106
1107 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1108 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1109 SDValue Combined =
1110 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1111 return Combined;
1112}
1113
1116 const MipsSubtarget &Subtarget) {
1117 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1118 if (DCI.isBeforeLegalizeOps()) {
1119 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1120 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1121 return performMADD_MSUBCombine(N, DAG, Subtarget);
1122
1123 return SDValue();
1124 }
1125
1126 return SDValue();
1127}
1128
1131 const MipsSubtarget &Subtarget) {
1132 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1133 if (DCI.isBeforeLegalizeOps()) {
1134 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1135 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1136 return performMADD_MSUBCombine(N, DAG, Subtarget);
1137
1138 return SDValue();
1139 }
1140
1141 // When loading from a jump table, push the Lo node to the position that
1142 // allows folding it into a load immediate.
1143 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1144 // (add (add abs_lo(tjt), v1), v0) => (add (add v0, v1), abs_lo(tjt))
1145 SDValue InnerAdd = N->getOperand(1);
1146 SDValue Index = N->getOperand(0);
1147 if (InnerAdd.getOpcode() != ISD::ADD)
1148 std::swap(InnerAdd, Index);
1149 if (InnerAdd.getOpcode() != ISD::ADD)
1150 return SDValue();
1151
1152 SDValue Lo = InnerAdd.getOperand(0);
1153 SDValue Other = InnerAdd.getOperand(1);
1154 if (Lo.getOpcode() != MipsISD::Lo)
1155 std::swap(Lo, Other);
1156
1157 if ((Lo.getOpcode() != MipsISD::Lo) ||
1158 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1159 return SDValue();
1160
1161 EVT ValTy = N->getValueType(0);
1162 SDLoc DL(N);
1163
1164 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, Index, Other);
1165 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1166}
1167
1170 const MipsSubtarget &Subtarget) {
1171 // Pattern match CINS.
1172 // $dst = shl (and $src , imm), pos
1173 // => cins $dst, $src, pos, size
1174
1175 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1176 return SDValue();
1177
1178 SDValue FirstOperand = N->getOperand(0);
1179 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1180 SDValue SecondOperand = N->getOperand(1);
1181 EVT ValTy = N->getValueType(0);
1182 SDLoc DL(N);
1183
1184 uint64_t Pos = 0;
1185 unsigned SMPos, SMSize;
1186 ConstantSDNode *CN;
1187 SDValue NewOperand;
1188
1189 // The second operand of the shift must be an immediate.
1190 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1191 return SDValue();
1192
1193 Pos = CN->getZExtValue();
1194
1195 if (Pos >= ValTy.getSizeInBits())
1196 return SDValue();
1197
1198 if (FirstOperandOpc != ISD::AND)
1199 return SDValue();
1200
1201 // AND's second operand must be a shifted mask.
1202 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1203 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1204 return SDValue();
1205
1206 // Return if the shifted mask does not start at bit 0 or the sum of its size
1207 // and Pos exceeds the word's size.
1208 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1209 return SDValue();
1210
1211 NewOperand = FirstOperand.getOperand(0);
1212 // SMSize is 'location' (position) in this case, not size.
1213 SMSize--;
1214
1215 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1216 DAG.getConstant(Pos, DL, MVT::i32),
1217 DAG.getConstant(SMSize, DL, MVT::i32));
1218}
1219
1221 const {
1222 SelectionDAG &DAG = DCI.DAG;
1223 unsigned Opc = N->getOpcode();
1224
1225 switch (Opc) {
1226 default: break;
1227 case ISD::SDIVREM:
1228 case ISD::UDIVREM:
1229 return performDivRemCombine(N, DAG, DCI, Subtarget);
1230 case ISD::SELECT:
1231 return performSELECTCombine(N, DAG, DCI, Subtarget);
1232 case MipsISD::CMovFP_F:
1233 case MipsISD::CMovFP_T:
1234 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1235 case ISD::AND:
1236 return performANDCombine(N, DAG, DCI, Subtarget);
1237 case ISD::OR:
1238 return performORCombine(N, DAG, DCI, Subtarget);
1239 case ISD::ADD:
1240 return performADDCombine(N, DAG, DCI, Subtarget);
1241 case ISD::SHL:
1242 return performSHLCombine(N, DAG, DCI, Subtarget);
1243 case ISD::SUB:
1244 return performSUBCombine(N, DAG, DCI, Subtarget);
1245 }
1246
1247 return SDValue();
1248}
1249
1251 return Subtarget.hasMips32();
1252}
1253
1255 return Subtarget.hasMips32();
1256}
1257
1259 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1260 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1261 // double-word variants.
1262 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1263 return C->getAPIntValue().ule(15);
1264
1265 return false;
1266}
1267
1269 const SDNode *N, CombineLevel Level) const {
1270 assert(((N->getOpcode() == ISD::SHL &&
1271 N->getOperand(0).getOpcode() == ISD::SRL) ||
1272 (N->getOpcode() == ISD::SRL &&
1273 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1274 "Expected shift-shift mask");
1275
1276 if (N->getOperand(0).getValueType().isVector())
1277 return false;
1278 return true;
1279}
1280
1281void
1284 SelectionDAG &DAG) const {
1285 return LowerOperationWrapper(N, Results, DAG);
1286}
1287
1290{
1291 switch (Op.getOpcode())
1292 {
1293 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1294 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1295 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1296 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1297 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1298 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1299 case ISD::SELECT: return lowerSELECT(Op, DAG);
1300 case ISD::SETCC: return lowerSETCC(Op, DAG);
1301 case ISD::VASTART: return lowerVASTART(Op, DAG);
1302 case ISD::VAARG: return lowerVAARG(Op, DAG);
1303 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1304 case ISD::FABS: return lowerFABS(Op, DAG);
1305 case ISD::FCANONICALIZE:
1306 return lowerFCANONICALIZE(Op, DAG);
1307 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1308 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1309 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1310 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1311 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1312 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1313 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1314 case ISD::LOAD: return lowerLOAD(Op, DAG);
1315 case ISD::STORE: return lowerSTORE(Op, DAG);
1316 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1317 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1318 }
1319 return SDValue();
1320}
1321
1322//===----------------------------------------------------------------------===//
1323// Lower helper functions
1324//===----------------------------------------------------------------------===//
1325
1326// addLiveIn - This helper function adds the specified physical register to the
1327// MachineFunction as a live in value. It also creates a corresponding
1328// virtual register for it.
1329static unsigned
1330addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1331{
1333 MF.getRegInfo().addLiveIn(PReg, VReg);
1334 return VReg;
1335}
1336
1339 const TargetInstrInfo &TII,
1340 bool Is64Bit, bool IsMicroMips) {
1341 if (NoZeroDivCheck)
1342 return &MBB;
1343
1344 // Insert instruction "teq $divisor_reg, $zero, 7".
1347 MachineOperand &Divisor = MI.getOperand(2);
1348 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1349 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1350 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1351 .addReg(Mips::ZERO)
1352 .addImm(7);
1353
1354 // Use the 32-bit sub-register if this is a 64-bit division.
1355 if (Is64Bit)
1356 MIB->getOperand(0).setSubReg(Mips::sub_32);
1357
1358 // Clear Divisor's kill flag.
1359 Divisor.setIsKill(false);
1360
1361 // We would normally delete the original instruction here but in this case
1362 // we only needed to inject an additional instruction rather than replace it.
1363
1364 return &MBB;
1365}
1366
1369 MachineBasicBlock *BB) const {
1370 switch (MI.getOpcode()) {
1371 default:
1372 llvm_unreachable("Unexpected instr type to insert");
1373 case Mips::ATOMIC_LOAD_ADD_I8:
1374 return emitAtomicBinaryPartword(MI, BB, 1);
1375 case Mips::ATOMIC_LOAD_ADD_I16:
1376 return emitAtomicBinaryPartword(MI, BB, 2);
1377 case Mips::ATOMIC_LOAD_ADD_I32:
1378 return emitAtomicBinary(MI, BB);
1379 case Mips::ATOMIC_LOAD_ADD_I64:
1380 return emitAtomicBinary(MI, BB);
1381
1382 case Mips::ATOMIC_LOAD_AND_I8:
1383 return emitAtomicBinaryPartword(MI, BB, 1);
1384 case Mips::ATOMIC_LOAD_AND_I16:
1385 return emitAtomicBinaryPartword(MI, BB, 2);
1386 case Mips::ATOMIC_LOAD_AND_I32:
1387 return emitAtomicBinary(MI, BB);
1388 case Mips::ATOMIC_LOAD_AND_I64:
1389 return emitAtomicBinary(MI, BB);
1390
1391 case Mips::ATOMIC_LOAD_OR_I8:
1392 return emitAtomicBinaryPartword(MI, BB, 1);
1393 case Mips::ATOMIC_LOAD_OR_I16:
1394 return emitAtomicBinaryPartword(MI, BB, 2);
1395 case Mips::ATOMIC_LOAD_OR_I32:
1396 return emitAtomicBinary(MI, BB);
1397 case Mips::ATOMIC_LOAD_OR_I64:
1398 return emitAtomicBinary(MI, BB);
1399
1400 case Mips::ATOMIC_LOAD_XOR_I8:
1401 return emitAtomicBinaryPartword(MI, BB, 1);
1402 case Mips::ATOMIC_LOAD_XOR_I16:
1403 return emitAtomicBinaryPartword(MI, BB, 2);
1404 case Mips::ATOMIC_LOAD_XOR_I32:
1405 return emitAtomicBinary(MI, BB);
1406 case Mips::ATOMIC_LOAD_XOR_I64:
1407 return emitAtomicBinary(MI, BB);
1408
1409 case Mips::ATOMIC_LOAD_NAND_I8:
1410 return emitAtomicBinaryPartword(MI, BB, 1);
1411 case Mips::ATOMIC_LOAD_NAND_I16:
1412 return emitAtomicBinaryPartword(MI, BB, 2);
1413 case Mips::ATOMIC_LOAD_NAND_I32:
1414 return emitAtomicBinary(MI, BB);
1415 case Mips::ATOMIC_LOAD_NAND_I64:
1416 return emitAtomicBinary(MI, BB);
1417
1418 case Mips::ATOMIC_LOAD_SUB_I8:
1419 return emitAtomicBinaryPartword(MI, BB, 1);
1420 case Mips::ATOMIC_LOAD_SUB_I16:
1421 return emitAtomicBinaryPartword(MI, BB, 2);
1422 case Mips::ATOMIC_LOAD_SUB_I32:
1423 return emitAtomicBinary(MI, BB);
1424 case Mips::ATOMIC_LOAD_SUB_I64:
1425 return emitAtomicBinary(MI, BB);
1426
1427 case Mips::ATOMIC_SWAP_I8:
1428 return emitAtomicBinaryPartword(MI, BB, 1);
1429 case Mips::ATOMIC_SWAP_I16:
1430 return emitAtomicBinaryPartword(MI, BB, 2);
1431 case Mips::ATOMIC_SWAP_I32:
1432 return emitAtomicBinary(MI, BB);
1433 case Mips::ATOMIC_SWAP_I64:
1434 return emitAtomicBinary(MI, BB);
1435
1436 case Mips::ATOMIC_CMP_SWAP_I8:
1437 return emitAtomicCmpSwapPartword(MI, BB, 1);
1438 case Mips::ATOMIC_CMP_SWAP_I16:
1439 return emitAtomicCmpSwapPartword(MI, BB, 2);
1440 case Mips::ATOMIC_CMP_SWAP_I32:
1441 return emitAtomicCmpSwap(MI, BB);
1442 case Mips::ATOMIC_CMP_SWAP_I64:
1443 return emitAtomicCmpSwap(MI, BB);
1444
1445 case Mips::ATOMIC_LOAD_MIN_I8:
1446 return emitAtomicBinaryPartword(MI, BB, 1);
1447 case Mips::ATOMIC_LOAD_MIN_I16:
1448 return emitAtomicBinaryPartword(MI, BB, 2);
1449 case Mips::ATOMIC_LOAD_MIN_I32:
1450 return emitAtomicBinary(MI, BB);
1451 case Mips::ATOMIC_LOAD_MIN_I64:
1452 return emitAtomicBinary(MI, BB);
1453
1454 case Mips::ATOMIC_LOAD_MAX_I8:
1455 return emitAtomicBinaryPartword(MI, BB, 1);
1456 case Mips::ATOMIC_LOAD_MAX_I16:
1457 return emitAtomicBinaryPartword(MI, BB, 2);
1458 case Mips::ATOMIC_LOAD_MAX_I32:
1459 return emitAtomicBinary(MI, BB);
1460 case Mips::ATOMIC_LOAD_MAX_I64:
1461 return emitAtomicBinary(MI, BB);
1462
1463 case Mips::ATOMIC_LOAD_UMIN_I8:
1464 return emitAtomicBinaryPartword(MI, BB, 1);
1465 case Mips::ATOMIC_LOAD_UMIN_I16:
1466 return emitAtomicBinaryPartword(MI, BB, 2);
1467 case Mips::ATOMIC_LOAD_UMIN_I32:
1468 return emitAtomicBinary(MI, BB);
1469 case Mips::ATOMIC_LOAD_UMIN_I64:
1470 return emitAtomicBinary(MI, BB);
1471
1472 case Mips::ATOMIC_LOAD_UMAX_I8:
1473 return emitAtomicBinaryPartword(MI, BB, 1);
1474 case Mips::ATOMIC_LOAD_UMAX_I16:
1475 return emitAtomicBinaryPartword(MI, BB, 2);
1476 case Mips::ATOMIC_LOAD_UMAX_I32:
1477 return emitAtomicBinary(MI, BB);
1478 case Mips::ATOMIC_LOAD_UMAX_I64:
1479 return emitAtomicBinary(MI, BB);
1480
1481 case Mips::PseudoSDIV:
1482 case Mips::PseudoUDIV:
1483 case Mips::DIV:
1484 case Mips::DIVU:
1485 case Mips::MOD:
1486 case Mips::MODU:
1487 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1488 false);
1489 case Mips::SDIV_MM_Pseudo:
1490 case Mips::UDIV_MM_Pseudo:
1491 case Mips::SDIV_MM:
1492 case Mips::UDIV_MM:
1493 case Mips::DIV_MMR6:
1494 case Mips::DIVU_MMR6:
1495 case Mips::MOD_MMR6:
1496 case Mips::MODU_MMR6:
1497 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1498 case Mips::PseudoDSDIV:
1499 case Mips::PseudoDUDIV:
1500 case Mips::DDIV:
1501 case Mips::DDIVU:
1502 case Mips::DMOD:
1503 case Mips::DMODU:
1504 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1505
1506 case Mips::PseudoSELECT_I:
1507 case Mips::PseudoSELECT_I64:
1508 case Mips::PseudoSELECT_S:
1509 case Mips::PseudoSELECT_D32:
1510 case Mips::PseudoSELECT_D64:
1511 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1512 case Mips::PseudoSELECTFP_F_I:
1513 case Mips::PseudoSELECTFP_F_I64:
1514 case Mips::PseudoSELECTFP_F_S:
1515 case Mips::PseudoSELECTFP_F_D32:
1516 case Mips::PseudoSELECTFP_F_D64:
1517 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1518 case Mips::PseudoSELECTFP_T_I:
1519 case Mips::PseudoSELECTFP_T_I64:
1520 case Mips::PseudoSELECTFP_T_S:
1521 case Mips::PseudoSELECTFP_T_D32:
1522 case Mips::PseudoSELECTFP_T_D64:
1523 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1524 case Mips::PseudoD_SELECT_I:
1525 case Mips::PseudoD_SELECT_I64:
1526 return emitPseudoD_SELECT(MI, BB);
1527 case Mips::LDR_W:
1528 return emitLDR_W(MI, BB);
1529 case Mips::LDR_D:
1530 return emitLDR_D(MI, BB);
1531 case Mips::STR_W:
1532 return emitSTR_W(MI, BB);
1533 case Mips::STR_D:
1534 return emitSTR_D(MI, BB);
1535 }
1536}
1537
1538// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1539// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1541MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1542 MachineBasicBlock *BB) const {
1543
1544 MachineFunction *MF = BB->getParent();
1545 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1547 DebugLoc DL = MI.getDebugLoc();
1548
1549 unsigned AtomicOp;
1550 bool NeedsAdditionalReg = false;
1551 switch (MI.getOpcode()) {
1552 case Mips::ATOMIC_LOAD_ADD_I32:
1553 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1554 break;
1555 case Mips::ATOMIC_LOAD_SUB_I32:
1556 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1557 break;
1558 case Mips::ATOMIC_LOAD_AND_I32:
1559 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1560 break;
1561 case Mips::ATOMIC_LOAD_OR_I32:
1562 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1563 break;
1564 case Mips::ATOMIC_LOAD_XOR_I32:
1565 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1566 break;
1567 case Mips::ATOMIC_LOAD_NAND_I32:
1568 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1569 break;
1570 case Mips::ATOMIC_SWAP_I32:
1571 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1572 break;
1573 case Mips::ATOMIC_LOAD_ADD_I64:
1574 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1575 break;
1576 case Mips::ATOMIC_LOAD_SUB_I64:
1577 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1578 break;
1579 case Mips::ATOMIC_LOAD_AND_I64:
1580 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1581 break;
1582 case Mips::ATOMIC_LOAD_OR_I64:
1583 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1584 break;
1585 case Mips::ATOMIC_LOAD_XOR_I64:
1586 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1587 break;
1588 case Mips::ATOMIC_LOAD_NAND_I64:
1589 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1590 break;
1591 case Mips::ATOMIC_SWAP_I64:
1592 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1593 break;
1594 case Mips::ATOMIC_LOAD_MIN_I32:
1595 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1596 NeedsAdditionalReg = true;
1597 break;
1598 case Mips::ATOMIC_LOAD_MAX_I32:
1599 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1600 NeedsAdditionalReg = true;
1601 break;
1602 case Mips::ATOMIC_LOAD_UMIN_I32:
1603 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1604 NeedsAdditionalReg = true;
1605 break;
1606 case Mips::ATOMIC_LOAD_UMAX_I32:
1607 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1608 NeedsAdditionalReg = true;
1609 break;
1610 case Mips::ATOMIC_LOAD_MIN_I64:
1611 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1612 NeedsAdditionalReg = true;
1613 break;
1614 case Mips::ATOMIC_LOAD_MAX_I64:
1615 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1616 NeedsAdditionalReg = true;
1617 break;
1618 case Mips::ATOMIC_LOAD_UMIN_I64:
1619 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1620 NeedsAdditionalReg = true;
1621 break;
1622 case Mips::ATOMIC_LOAD_UMAX_I64:
1623 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1624 NeedsAdditionalReg = true;
1625 break;
1626 default:
1627 llvm_unreachable("Unknown pseudo atomic for replacement!");
1628 }
1629
1630 Register OldVal = MI.getOperand(0).getReg();
1631 Register Ptr = MI.getOperand(1).getReg();
1632 Register Incr = MI.getOperand(2).getReg();
1633 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1634
1636
1637 // The scratch registers here with the EarlyClobber | Define | Implicit
1638 // flags is used to persuade the register allocator and the machine
1639 // verifier to accept the usage of this register. This has to be a real
1640 // register which has an UNDEF value but is dead after the instruction which
1641 // is unique among the registers chosen for the instruction.
1642
1643 // The EarlyClobber flag has the semantic properties that the operand it is
1644 // attached to is clobbered before the rest of the inputs are read. Hence it
1645 // must be unique among the operands to the instruction.
1646 // The Define flag is needed to coerce the machine verifier that an Undef
1647 // value isn't a problem.
1648 // The Dead flag is needed as the value in scratch isn't used by any other
1649 // instruction. Kill isn't used as Dead is more precise.
1650 // The implicit flag is here due to the interaction between the other flags
1651 // and the machine verifier.
1652
1653 // For correctness purpose, a new pseudo is introduced here. We need this
1654 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1655 // that is spread over >1 basic blocks. A register allocator which
1656 // introduces (or any codegen infact) a store, can violate the expectations
1657 // of the hardware.
1658 //
1659 // An atomic read-modify-write sequence starts with a linked load
1660 // instruction and ends with a store conditional instruction. The atomic
1661 // read-modify-write sequence fails if any of the following conditions
1662 // occur between the execution of ll and sc:
1663 // * A coherent store is completed by another process or coherent I/O
1664 // module into the block of synchronizable physical memory containing
1665 // the word. The size and alignment of the block is
1666 // implementation-dependent.
1667 // * A coherent store is executed between an LL and SC sequence on the
1668 // same processor to the block of synchornizable physical memory
1669 // containing the word.
1670 //
1671
1672 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1673 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1674
1675 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1676 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1677
1679 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1681 .addReg(PtrCopy)
1682 .addReg(IncrCopy)
1685 if (NeedsAdditionalReg) {
1686 Register Scratch2 =
1687 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1690 }
1691
1692 MI.eraseFromParent();
1693
1694 return BB;
1695}
1696
1697MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1698 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1699 unsigned SrcReg) const {
1701 const DebugLoc &DL = MI.getDebugLoc();
1702
1703 if (Subtarget.hasMips32r2() && Size == 1) {
1704 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1705 return BB;
1706 }
1707
1708 if (Subtarget.hasMips32r2() && Size == 2) {
1709 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1710 return BB;
1711 }
1712
1713 MachineFunction *MF = BB->getParent();
1715 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1716 Register ScrReg = RegInfo.createVirtualRegister(RC);
1717
1718 assert(Size < 32);
1719 int64_t ShiftImm = 32 - (Size * 8);
1720
1721 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1722 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1723
1724 return BB;
1725}
1726
1727MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1728 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1729 assert((Size == 1 || Size == 2) &&
1730 "Unsupported size for EmitAtomicBinaryPartial.");
1731
1732 MachineFunction *MF = BB->getParent();
1734 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1735 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1736 const TargetRegisterClass *RCp =
1737 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1739 DebugLoc DL = MI.getDebugLoc();
1740
1741 Register Dest = MI.getOperand(0).getReg();
1742 Register Ptr = MI.getOperand(1).getReg();
1743 Register Incr = MI.getOperand(2).getReg();
1744
1745 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1746 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1747 Register Mask = RegInfo.createVirtualRegister(RC);
1748 Register Mask2 = RegInfo.createVirtualRegister(RC);
1749 Register Incr2 = RegInfo.createVirtualRegister(RC);
1750 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1751 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1752 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1753 Register Scratch = RegInfo.createVirtualRegister(RC);
1754 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1755 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1756
1757 unsigned AtomicOp = 0;
1758 bool NeedsAdditionalReg = false;
1759 switch (MI.getOpcode()) {
1760 case Mips::ATOMIC_LOAD_NAND_I8:
1761 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1762 break;
1763 case Mips::ATOMIC_LOAD_NAND_I16:
1764 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1765 break;
1766 case Mips::ATOMIC_SWAP_I8:
1767 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1768 break;
1769 case Mips::ATOMIC_SWAP_I16:
1770 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1771 break;
1772 case Mips::ATOMIC_LOAD_ADD_I8:
1773 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1774 break;
1775 case Mips::ATOMIC_LOAD_ADD_I16:
1776 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1777 break;
1778 case Mips::ATOMIC_LOAD_SUB_I8:
1779 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1780 break;
1781 case Mips::ATOMIC_LOAD_SUB_I16:
1782 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1783 break;
1784 case Mips::ATOMIC_LOAD_AND_I8:
1785 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1786 break;
1787 case Mips::ATOMIC_LOAD_AND_I16:
1788 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1789 break;
1790 case Mips::ATOMIC_LOAD_OR_I8:
1791 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1792 break;
1793 case Mips::ATOMIC_LOAD_OR_I16:
1794 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1795 break;
1796 case Mips::ATOMIC_LOAD_XOR_I8:
1797 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1798 break;
1799 case Mips::ATOMIC_LOAD_XOR_I16:
1800 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1801 break;
1802 case Mips::ATOMIC_LOAD_MIN_I8:
1803 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1804 NeedsAdditionalReg = true;
1805 break;
1806 case Mips::ATOMIC_LOAD_MIN_I16:
1807 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1808 NeedsAdditionalReg = true;
1809 break;
1810 case Mips::ATOMIC_LOAD_MAX_I8:
1811 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1812 NeedsAdditionalReg = true;
1813 break;
1814 case Mips::ATOMIC_LOAD_MAX_I16:
1815 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1816 NeedsAdditionalReg = true;
1817 break;
1818 case Mips::ATOMIC_LOAD_UMIN_I8:
1819 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1820 NeedsAdditionalReg = true;
1821 break;
1822 case Mips::ATOMIC_LOAD_UMIN_I16:
1823 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1824 NeedsAdditionalReg = true;
1825 break;
1826 case Mips::ATOMIC_LOAD_UMAX_I8:
1827 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1828 NeedsAdditionalReg = true;
1829 break;
1830 case Mips::ATOMIC_LOAD_UMAX_I16:
1831 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1832 NeedsAdditionalReg = true;
1833 break;
1834 default:
1835 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1836 }
1837
1838 // insert new blocks after the current block
1839 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1840 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1842 MF->insert(It, exitMBB);
1843
1844 // Transfer the remainder of BB and its successor edges to exitMBB.
1845 exitMBB->splice(exitMBB->begin(), BB,
1846 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1848
1850
1851 // thisMBB:
1852 // addiu masklsb2,$0,-4 # 0xfffffffc
1853 // and alignedaddr,ptr,masklsb2
1854 // andi ptrlsb2,ptr,3
1855 // sll shiftamt,ptrlsb2,3
1856 // ori maskupper,$0,255 # 0xff
1857 // sll mask,maskupper,shiftamt
1858 // nor mask2,$0,mask
1859 // sll incr2,incr,shiftamt
1860
1861 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1862 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1863 .addReg(ABI.GetNullPtr()).addImm(-4);
1864 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1865 .addReg(Ptr).addReg(MaskLSB2);
1866 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1867 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1868 if (Subtarget.isLittle()) {
1869 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1870 } else {
1871 Register Off = RegInfo.createVirtualRegister(RC);
1872 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1873 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1874 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1875 }
1876 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1877 .addReg(Mips::ZERO).addImm(MaskImm);
1878 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1879 .addReg(MaskUpper).addReg(ShiftAmt);
1880 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1881 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1882
1883
1884 // The purposes of the flags on the scratch registers is explained in
1885 // emitAtomicBinary. In summary, we need a scratch register which is going to
1886 // be undef, that is unique among registers chosen for the instruction.
1887
1889 BuildMI(BB, DL, TII->get(AtomicOp))
1891 .addReg(AlignedAddr)
1892 .addReg(Incr2)
1893 .addReg(Mask)
1894 .addReg(Mask2)
1895 .addReg(ShiftAmt)
1902 if (NeedsAdditionalReg) {
1903 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1906 }
1907
1908 MI.eraseFromParent(); // The instruction is gone now.
1909
1910 return exitMBB;
1911}
1912
1913// Lower atomic compare and swap to a pseudo instruction, taking care to
1914// define a scratch register for the pseudo instruction's expansion. The
1915// instruction is expanded after the register allocator as to prevent
1916// the insertion of stores between the linked load and the store conditional.
1917
1919MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1920 MachineBasicBlock *BB) const {
1921
1922 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1923 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1924 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1925
1926 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1927
1928 MachineFunction *MF = BB->getParent();
1932 DebugLoc DL = MI.getDebugLoc();
1933
1934 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1935 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1936 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1937 Register Dest = MI.getOperand(0).getReg();
1938 Register Ptr = MI.getOperand(1).getReg();
1939 Register OldVal = MI.getOperand(2).getReg();
1940 Register NewVal = MI.getOperand(3).getReg();
1941
1942 Register Scratch = MRI.createVirtualRegister(RC);
1944
1945 // We need to create copies of the various registers and kill them at the
1946 // atomic pseudo. If the copies are not made, when the atomic is expanded
1947 // after fast register allocation, the spills will end up outside of the
1948 // blocks that their values are defined in, causing livein errors.
1949
1950 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1951 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1952 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1953
1954 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1955 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1956 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1957
1958 // The purposes of the flags on the scratch registers is explained in
1959 // emitAtomicBinary. In summary, we need a scratch register which is going to
1960 // be undef, that is unique among registers chosen for the instruction.
1961
1962 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1964 .addReg(PtrCopy, RegState::Kill)
1965 .addReg(OldValCopy, RegState::Kill)
1966 .addReg(NewValCopy, RegState::Kill)
1969
1970 MI.eraseFromParent(); // The instruction is gone now.
1971
1972 return BB;
1973}
1974
1975MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1976 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1977 assert((Size == 1 || Size == 2) &&
1978 "Unsupported size for EmitAtomicCmpSwapPartial.");
1979
1980 MachineFunction *MF = BB->getParent();
1982 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1983 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1984 const TargetRegisterClass *RCp =
1985 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1987 DebugLoc DL = MI.getDebugLoc();
1988
1989 Register Dest = MI.getOperand(0).getReg();
1990 Register Ptr = MI.getOperand(1).getReg();
1991 Register CmpVal = MI.getOperand(2).getReg();
1992 Register NewVal = MI.getOperand(3).getReg();
1993
1994 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1995 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1996 Register Mask = RegInfo.createVirtualRegister(RC);
1997 Register Mask2 = RegInfo.createVirtualRegister(RC);
1998 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1999 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
2000 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
2001 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
2002 Register MaskUpper = RegInfo.createVirtualRegister(RC);
2003 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
2004 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
2005 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
2006 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
2007 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
2008
2009 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
2010 // flags are used to coerce the register allocator and the machine verifier to
2011 // accept the usage of these registers.
2012 // The EarlyClobber flag has the semantic properties that the operand it is
2013 // attached to is clobbered before the rest of the inputs are read. Hence it
2014 // must be unique among the operands to the instruction.
2015 // The Define flag is needed to coerce the machine verifier that an Undef
2016 // value isn't a problem.
2017 // The Dead flag is needed as the value in scratch isn't used by any other
2018 // instruction. Kill isn't used as Dead is more precise.
2019 Register Scratch = RegInfo.createVirtualRegister(RC);
2020 Register Scratch2 = RegInfo.createVirtualRegister(RC);
2021
2022 // insert new blocks after the current block
2023 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2024 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2026 MF->insert(It, exitMBB);
2027
2028 // Transfer the remainder of BB and its successor edges to exitMBB.
2029 exitMBB->splice(exitMBB->begin(), BB,
2030 std::next(MachineBasicBlock::iterator(MI)), BB->end());
2032
2034
2035 // thisMBB:
2036 // addiu masklsb2,$0,-4 # 0xfffffffc
2037 // and alignedaddr,ptr,masklsb2
2038 // andi ptrlsb2,ptr,3
2039 // xori ptrlsb2,ptrlsb2,3 # Only for BE
2040 // sll shiftamt,ptrlsb2,3
2041 // ori maskupper,$0,255 # 0xff
2042 // sll mask,maskupper,shiftamt
2043 // nor mask2,$0,mask
2044 // andi maskedcmpval,cmpval,255
2045 // sll shiftedcmpval,maskedcmpval,shiftamt
2046 // andi maskednewval,newval,255
2047 // sll shiftednewval,maskednewval,shiftamt
2048 int64_t MaskImm = (Size == 1) ? 255 : 65535;
2049 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
2050 .addReg(ABI.GetNullPtr()).addImm(-4);
2051 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
2052 .addReg(Ptr).addReg(MaskLSB2);
2053 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
2054 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
2055 if (Subtarget.isLittle()) {
2056 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
2057 } else {
2058 Register Off = RegInfo.createVirtualRegister(RC);
2059 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
2060 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
2061 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
2062 }
2063 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
2064 .addReg(Mips::ZERO).addImm(MaskImm);
2065 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
2066 .addReg(MaskUpper).addReg(ShiftAmt);
2067 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
2068 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2069 .addReg(CmpVal).addImm(MaskImm);
2070 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2071 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2072 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2073 .addReg(NewVal).addImm(MaskImm);
2074 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2075 .addReg(MaskedNewVal).addReg(ShiftAmt);
2076
2077 // The purposes of the flags on the scratch registers are explained in
2078 // emitAtomicBinary. In summary, we need a scratch register which is going to
2079 // be undef, that is unique among the register chosen for the instruction.
2080
2081 BuildMI(BB, DL, TII->get(AtomicOp))
2083 .addReg(AlignedAddr)
2084 .addReg(Mask)
2085 .addReg(ShiftedCmpVal)
2086 .addReg(Mask2)
2087 .addReg(ShiftedNewVal)
2088 .addReg(ShiftAmt)
2093
2094 MI.eraseFromParent(); // The instruction is gone now.
2095
2096 return exitMBB;
2097}
2098
2099SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2100 // The first operand is the chain, the second is the condition, the third is
2101 // the block to branch to if the condition is true.
2102 SDValue Chain = Op.getOperand(0);
2103 SDValue Dest = Op.getOperand(2);
2104 SDLoc DL(Op);
2105
2107 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2108
2109 // Return if flag is not set by a floating point comparison.
2110 if (CondRes.getOpcode() != MipsISD::FPCmp)
2111 return Op;
2112
2113 SDValue CCNode = CondRes.getOperand(2);
2116 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2117 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2118 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2119 FCC0, Dest, CondRes);
2120}
2121
2122SDValue MipsTargetLowering::
2123lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2124{
2126 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2127
2128 // Return if flag is not set by a floating point comparison.
2129 if (Cond.getOpcode() != MipsISD::FPCmp)
2130 return Op;
2131
2132 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2133 SDLoc(Op));
2134}
2135
2136SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2138 SDValue Cond = createFPCmp(DAG, Op);
2139
2140 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2141 "Floating point operand expected.");
2142
2143 SDLoc DL(Op);
2144 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2145 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2146
2147 return createCMovFP(DAG, Cond, True, False, DL);
2148}
2149
2150SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2151 SelectionDAG &DAG) const {
2152 EVT Ty = Op.getValueType();
2153 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2154 const GlobalValue *GV = N->getGlobal();
2155
2156 if (GV->hasDLLImportStorageClass()) {
2158 "Windows is the only supported COFF target");
2159 return getDllimportVariable(
2160 N, SDLoc(N), Ty, DAG, DAG.getEntryNode(),
2162 }
2163
2164 if (!isPositionIndependent()) {
2165 const MipsTargetObjectFile *TLOF =
2166 static_cast<const MipsTargetObjectFile *>(
2168 const GlobalObject *GO = GV->getAliaseeObject();
2169 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2170 // %gp_rel relocation
2171 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2172
2173 // %hi/%lo relocation
2174 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2175 // %highest/%higher/%hi/%lo relocation
2176 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2177 }
2178
2179 // Every other architecture would use shouldAssumeDSOLocal in here, but
2180 // mips is special.
2181 // * In PIC code mips requires got loads even for local statics!
2182 // * To save on got entries, for local statics the got entry contains the
2183 // page and an additional add instruction takes care of the low bits.
2184 // * It is legal to access a hidden symbol with a non hidden undefined,
2185 // so one cannot guarantee that all access to a hidden symbol will know
2186 // it is hidden.
2187 // * Mips linkers don't support creating a page and a full got entry for
2188 // the same symbol.
2189 // * Given all that, we have to use a full got entry for hidden symbols :-(
2190 if (GV->hasLocalLinkage())
2191 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2192
2193 if (Subtarget.useXGOT())
2194 return getAddrGlobalLargeGOT(
2196 DAG.getEntryNode(),
2197 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2198
2199 return getAddrGlobal(
2200 N, SDLoc(N), Ty, DAG,
2202 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2203}
2204
2205SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2206 SelectionDAG &DAG) const {
2207 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2208 EVT Ty = Op.getValueType();
2209
2210 if (!isPositionIndependent())
2211 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2212 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2213
2214 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2215}
2216
2217SDValue MipsTargetLowering::
2218lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2219{
2220 // If the relocation model is PIC, use the General Dynamic TLS Model or
2221 // Local Dynamic TLS model, otherwise use the Initial Exec or
2222 // Local Exec TLS Model.
2223
2224 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2225 if (DAG.getTarget().useEmulatedTLS())
2226 return LowerToTLSEmulatedModel(GA, DAG);
2227
2228 SDLoc DL(GA);
2229 const GlobalValue *GV = GA->getGlobal();
2230 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2231
2233
2234 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2235 // General Dynamic and Local Dynamic TLS Model.
2236 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2238
2239 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2241 getGlobalReg(DAG, PtrVT), TGA);
2242 unsigned PtrSize = PtrVT.getSizeInBits();
2243 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2244
2245 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2246
2248 ArgListEntry Entry;
2249 Entry.Node = Argument;
2250 Entry.Ty = PtrTy;
2251 Args.push_back(Entry);
2252
2254 CLI.setDebugLoc(DL)
2255 .setChain(DAG.getEntryNode())
2256 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2257 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2258
2259 SDValue Ret = CallResult.first;
2260
2261 if (model != TLSModel::LocalDynamic)
2262 return Ret;
2263
2264 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2266 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2267 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2269 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2270 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2271 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2272 }
2273
2275 if (model == TLSModel::InitialExec) {
2276 // Initial Exec TLS Model
2277 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2279 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2280 TGA);
2281 Offset =
2282 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2283 } else {
2284 // Local Exec TLS Model
2285 assert(model == TLSModel::LocalExec);
2286 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2288 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2290 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2291 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2292 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2293 }
2294
2296 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2297}
2298
2299SDValue MipsTargetLowering::
2300lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2301{
2302 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2303 EVT Ty = Op.getValueType();
2304
2305 if (!isPositionIndependent())
2306 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2307 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2308
2309 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2310}
2311
2312SDValue MipsTargetLowering::
2313lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2314{
2315 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2316 EVT Ty = Op.getValueType();
2317
2318 if (!isPositionIndependent()) {
2319 const MipsTargetObjectFile *TLOF =
2320 static_cast<const MipsTargetObjectFile *>(
2322
2323 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2325 // %gp_rel relocation
2326 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2327
2328 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2329 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2330 }
2331
2332 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2333}
2334
2335SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2337 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2338
2339 SDLoc DL(Op);
2340 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2342
2343 // vastart just stores the address of the VarArgsFrameIndex slot into the
2344 // memory location argument.
2345 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2346 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2347 MachinePointerInfo(SV));
2348}
2349
2350SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2351 SDNode *Node = Op.getNode();
2352 EVT VT = Node->getValueType(0);
2353 SDValue Chain = Node->getOperand(0);
2354 SDValue VAListPtr = Node->getOperand(1);
2355 const Align Align =
2356 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2357 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2358 SDLoc DL(Node);
2359 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2360
2361 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2362 VAListPtr, MachinePointerInfo(SV));
2363 SDValue VAList = VAListLoad;
2364
2365 // Re-align the pointer if necessary.
2366 // It should only ever be necessary for 64-bit types on O32 since the minimum
2367 // argument alignment is the same as the maximum type alignment for N32/N64.
2368 //
2369 // FIXME: We currently align too often. The code generator doesn't notice
2370 // when the pointer is still aligned from the last va_arg (or pair of
2371 // va_args for the i64 on O32 case).
2373 VAList = DAG.getNode(
2374 ISD::ADD, DL, VAList.getValueType(), VAList,
2375 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2376
2377 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
2378 DAG.getSignedConstant(-(int64_t)Align.value(), DL,
2379 VAList.getValueType()));
2380 }
2381
2382 // Increment the pointer, VAList, to the next vaarg.
2383 auto &TD = DAG.getDataLayout();
2384 unsigned ArgSizeInBytes =
2386 SDValue Tmp3 =
2387 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2388 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2389 DL, VAList.getValueType()));
2390 // Store the incremented VAList to the legalized pointer
2391 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2392 MachinePointerInfo(SV));
2393
2394 // In big-endian mode we must adjust the pointer when the load size is smaller
2395 // than the argument slot size. We must also reduce the known alignment to
2396 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2397 // the correct half of the slot, and reduce the alignment from 8 (slot
2398 // alignment) down to 4 (type alignment).
2399 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2400 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2401 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2402 DAG.getIntPtrConstant(Adjustment, DL));
2403 }
2404 // Load the actual argument out of the pointer VAList
2405 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2406}
2407
2409 bool HasExtractInsert) {
2410 EVT TyX = Op.getOperand(0).getValueType();
2411 EVT TyY = Op.getOperand(1).getValueType();
2412 SDLoc DL(Op);
2413 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2414 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2415 SDValue Res;
2416
2417 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2418 // to i32.
2419 SDValue X = (TyX == MVT::f32) ?
2420 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2421 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2422 Const1);
2423 SDValue Y = (TyY == MVT::f32) ?
2424 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2425 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2426 Const1);
2427
2428 if (HasExtractInsert) {
2429 // ext E, Y, 31, 1 ; extract bit31 of Y
2430 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2431 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2432 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2433 } else {
2434 // sll SllX, X, 1
2435 // srl SrlX, SllX, 1
2436 // srl SrlY, Y, 31
2437 // sll SllY, SrlX, 31
2438 // or Or, SrlX, SllY
2439 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2440 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2441 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2442 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2443 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2444 }
2445
2446 if (TyX == MVT::f32)
2447 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2448
2449 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2450 Op.getOperand(0),
2451 DAG.getConstant(0, DL, MVT::i32));
2452 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2453}
2454
2456 bool HasExtractInsert) {
2457 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2458 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2459 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2460 SDLoc DL(Op);
2461 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2462
2463 // Bitcast to integer nodes.
2464 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2465 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2466
2467 if (HasExtractInsert) {
2468 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2469 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2470 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2471 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2472
2473 if (WidthX > WidthY)
2474 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2475 else if (WidthY > WidthX)
2476 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2477
2478 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2479 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2480 X);
2481 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2482 }
2483
2484 // (d)sll SllX, X, 1
2485 // (d)srl SrlX, SllX, 1
2486 // (d)srl SrlY, Y, width(Y)-1
2487 // (d)sll SllY, SrlX, width(Y)-1
2488 // or Or, SrlX, SllY
2489 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2490 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2491 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2492 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2493
2494 if (WidthX > WidthY)
2495 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2496 else if (WidthY > WidthX)
2497 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2498
2499 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2500 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2501 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2502 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2503}
2504
2505SDValue
2506MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2507 if (Subtarget.isGP64bit())
2509
2511}
2512
2513SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2514 bool HasExtractInsert) const {
2515 SDLoc DL(Op);
2516 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2517
2519 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2520
2521 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2522 // to i32.
2523 SDValue X = (Op.getValueType() == MVT::f32)
2524 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2525 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2526 Op.getOperand(0), Const1);
2527
2528 // Clear MSB.
2529 if (HasExtractInsert)
2530 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2531 DAG.getRegister(Mips::ZERO, MVT::i32),
2532 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2533 else {
2534 // TODO: Provide DAG patterns which transform (and x, cst)
2535 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2536 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2537 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2538 }
2539
2540 if (Op.getValueType() == MVT::f32)
2541 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2542
2543 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2544 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2545 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2546 // place.
2547 SDValue LowX =
2548 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2549 DAG.getConstant(0, DL, MVT::i32));
2550 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2551}
2552
2553SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2554 bool HasExtractInsert) const {
2555 SDLoc DL(Op);
2556 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2557
2559 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2560
2561 // Bitcast to integer node.
2562 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2563
2564 // Clear MSB.
2565 if (HasExtractInsert)
2566 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2567 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2568 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2569 else {
2570 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2571 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2572 }
2573
2574 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2575}
2576
2577SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2578 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2579 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2580
2581 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2582}
2583
2584SDValue MipsTargetLowering::lowerFCANONICALIZE(SDValue Op,
2585 SelectionDAG &DAG) const {
2586 SDLoc DL(Op);
2587 EVT VT = Op.getValueType();
2588 SDValue Operand = Op.getOperand(0);
2589 SDNodeFlags Flags = Op->getFlags();
2590
2591 if (Flags.hasNoNaNs() || DAG.isKnownNeverNaN(Operand))
2592 return Operand;
2593
2594 SDValue Quiet = DAG.getNode(ISD::FADD, DL, VT, Operand, Operand);
2595 return DAG.getSelectCC(DL, Operand, Operand, Quiet, Operand, ISD::SETUO);
2596}
2597
2598SDValue MipsTargetLowering::
2599lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2600 // check the depth
2601 if (Op.getConstantOperandVal(0) != 0) {
2602 DAG.getContext()->emitError(
2603 "return address can be determined only for current frame");
2604 return SDValue();
2605 }
2606
2608 MFI.setFrameAddressIsTaken(true);
2609 EVT VT = Op.getValueType();
2610 SDLoc DL(Op);
2611 SDValue FrameAddr = DAG.getCopyFromReg(
2612 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2613 return FrameAddr;
2614}
2615
2616SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2617 SelectionDAG &DAG) const {
2619 return SDValue();
2620
2621 // check the depth
2622 if (Op.getConstantOperandVal(0) != 0) {
2623 DAG.getContext()->emitError(
2624 "return address can be determined only for current frame");
2625 return SDValue();
2626 }
2627
2629 MachineFrameInfo &MFI = MF.getFrameInfo();
2630 MVT VT = Op.getSimpleValueType();
2631 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2632 MFI.setReturnAddressIsTaken(true);
2633
2634 // Return RA, which contains the return address. Mark it an implicit live-in.
2636 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2637}
2638
2639// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2640// generated from __builtin_eh_return (offset, handler)
2641// The effect of this is to adjust the stack pointer by "offset"
2642// and then branch to "handler".
2643SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2644 const {
2647
2648 MipsFI->setCallsEhReturn();
2649 SDValue Chain = Op.getOperand(0);
2650 SDValue Offset = Op.getOperand(1);
2651 SDValue Handler = Op.getOperand(2);
2652 SDLoc DL(Op);
2653 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2654
2655 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2656 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2657 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2658 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2659 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2660 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2661 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2662 DAG.getRegister(OffsetReg, Ty),
2663 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2664 Chain.getValue(1));
2665}
2666
2667SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2668 SelectionDAG &DAG) const {
2669 // FIXME: Need pseudo-fence for 'singlethread' fences
2670 // FIXME: Set SType for weaker fences where supported/appropriate.
2671 unsigned SType = 0;
2672 SDLoc DL(Op);
2673 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2674 DAG.getConstant(SType, DL, MVT::i32));
2675}
2676
2677SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2678 SelectionDAG &DAG) const {
2679 SDLoc DL(Op);
2680 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2681
2682 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2683 SDValue Shamt = Op.getOperand(2);
2684 // if shamt < (VT.bits):
2685 // lo = (shl lo, shamt)
2686 // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
2687 // else:
2688 // lo = 0
2689 // hi = (shl lo, shamt[4:0])
2690 SDValue Not =
2691 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2692 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2693 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2694 DAG.getConstant(1, DL, VT));
2695 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2696 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2697 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2698 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2699 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2700 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2701 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2702 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2703 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2704
2705 SDValue Ops[2] = {Lo, Hi};
2706 return DAG.getMergeValues(Ops, DL);
2707}
2708
2709SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2710 bool IsSRA) const {
2711 SDLoc DL(Op);
2712 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2713 SDValue Shamt = Op.getOperand(2);
2714 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2715
2716 // if shamt < (VT.bits):
2717 // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
2718 // if isSRA:
2719 // hi = (sra hi, shamt)
2720 // else:
2721 // hi = (srl hi, shamt)
2722 // else:
2723 // if isSRA:
2724 // lo = (sra hi, shamt[4:0])
2725 // hi = (sra hi, 31)
2726 // else:
2727 // lo = (srl hi, shamt[4:0])
2728 // hi = 0
2729 SDValue Not =
2730 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2731 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2732 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2733 DAG.getConstant(1, DL, VT));
2734 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2735 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2736 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2737 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2738 DL, VT, Hi, Shamt);
2739 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2740 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2741 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2742 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2743
2744 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2745 SDVTList VTList = DAG.getVTList(VT, VT);
2748 DL, VTList, Cond, ShiftRightHi,
2749 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2750 ShiftRightHi);
2751 }
2752
2753 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2754 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2755 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2756
2757 SDValue Ops[2] = {Lo, Hi};
2758 return DAG.getMergeValues(Ops, DL);
2759}
2760
2761static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2762 SDValue Chain, SDValue Src, unsigned Offset) {
2763 SDValue Ptr = LD->getBasePtr();
2764 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2765 EVT BasePtrVT = Ptr.getValueType();
2766 SDLoc DL(LD);
2767 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2768
2769 if (Offset)
2770 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2771 DAG.getConstant(Offset, DL, BasePtrVT));
2772
2773 SDValue Ops[] = { Chain, Ptr, Src };
2774 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2775 LD->getMemOperand());
2776}
2777
2778// Expand an unaligned 32 or 64-bit integer load node.
2780 LoadSDNode *LD = cast<LoadSDNode>(Op);
2781 EVT MemVT = LD->getMemoryVT();
2782
2784 return Op;
2785
2786 // Return if load is aligned or if MemVT is neither i32 nor i64.
2787 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2788 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2789 return SDValue();
2790
2791 bool IsLittle = Subtarget.isLittle();
2792 EVT VT = Op.getValueType();
2793 ISD::LoadExtType ExtType = LD->getExtensionType();
2794 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2795
2796 assert((VT == MVT::i32) || (VT == MVT::i64));
2797
2798 // Expand
2799 // (set dst, (i64 (load baseptr)))
2800 // to
2801 // (set tmp, (ldl (add baseptr, 7), undef))
2802 // (set dst, (ldr baseptr, tmp))
2803 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2804 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2805 IsLittle ? 7 : 0);
2806 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2807 IsLittle ? 0 : 7);
2808 }
2809
2810 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2811 IsLittle ? 3 : 0);
2812 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2813 IsLittle ? 0 : 3);
2814
2815 // Expand
2816 // (set dst, (i32 (load baseptr))) or
2817 // (set dst, (i64 (sextload baseptr))) or
2818 // (set dst, (i64 (extload baseptr)))
2819 // to
2820 // (set tmp, (lwl (add baseptr, 3), undef))
2821 // (set dst, (lwr baseptr, tmp))
2822 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2823 (ExtType == ISD::EXTLOAD))
2824 return LWR;
2825
2826 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2827
2828 // Expand
2829 // (set dst, (i64 (zextload baseptr)))
2830 // to
2831 // (set tmp0, (lwl (add baseptr, 3), undef))
2832 // (set tmp1, (lwr baseptr, tmp0))
2833 // (set tmp2, (shl tmp1, 32))
2834 // (set dst, (srl tmp2, 32))
2835 SDLoc DL(LD);
2836 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2837 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2838 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2839 SDValue Ops[] = { SRL, LWR.getValue(1) };
2840 return DAG.getMergeValues(Ops, DL);
2841}
2842
2843static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2844 SDValue Chain, unsigned Offset) {
2845 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2846 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2847 SDLoc DL(SD);
2848 SDVTList VTList = DAG.getVTList(MVT::Other);
2849
2850 if (Offset)
2851 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2852 DAG.getConstant(Offset, DL, BasePtrVT));
2853
2854 SDValue Ops[] = { Chain, Value, Ptr };
2855 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2856 SD->getMemOperand());
2857}
2858
2859// Expand an unaligned 32 or 64-bit integer store node.
2861 bool IsLittle) {
2862 SDValue Value = SD->getValue(), Chain = SD->getChain();
2863 EVT VT = Value.getValueType();
2864
2865 // Expand
2866 // (store val, baseptr) or
2867 // (truncstore val, baseptr)
2868 // to
2869 // (swl val, (add baseptr, 3))
2870 // (swr val, baseptr)
2871 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2872 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2873 IsLittle ? 3 : 0);
2874 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2875 }
2876
2877 assert(VT == MVT::i64);
2878
2879 // Expand
2880 // (store val, baseptr)
2881 // to
2882 // (sdl val, (add baseptr, 7))
2883 // (sdr val, baseptr)
2884 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2885 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2886}
2887
2888// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2890 bool SingleFloat) {
2891 SDValue Val = SD->getValue();
2892
2893 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2894 (Val.getValueSizeInBits() > 32 && SingleFloat))
2895 return SDValue();
2896
2898 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2899 Val.getOperand(0));
2900 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2901 SD->getPointerInfo(), SD->getAlign(),
2902 SD->getMemOperand()->getFlags());
2903}
2904
2906 StoreSDNode *SD = cast<StoreSDNode>(Op);
2907 EVT MemVT = SD->getMemoryVT();
2908
2909 // Lower unaligned integer stores.
2911 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2912 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2913 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2914
2916}
2917
2918SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2919 SelectionDAG &DAG) const {
2920
2921 // Return a fixed StackObject with offset 0 which points to the old stack
2922 // pointer.
2924 EVT ValTy = Op->getValueType(0);
2925 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2926 return DAG.getFrameIndex(FI, ValTy);
2927}
2928
2929SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2930 SelectionDAG &DAG) const {
2931 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2932 return SDValue();
2933
2934 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2935 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2936 Op.getOperand(0));
2937 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2938}
2939
2940//===----------------------------------------------------------------------===//
2941// Calling Convention Implementation
2942//===----------------------------------------------------------------------===//
2943
2944//===----------------------------------------------------------------------===//
2945// TODO: Implement a generic logic using tblgen that can support this.
2946// Mips O32 ABI rules:
2947// ---
2948// i32 - Passed in A0, A1, A2, A3 and stack
2949// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2950// an argument. Otherwise, passed in A1, A2, A3 and stack.
2951// f64 - Only passed in two aliased f32 registers if no int reg has been used
2952// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2953// not used, it must be shadowed. If only A3 is available, shadow it and
2954// go to stack.
2955// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2956// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2957// with the remainder spilled to the stack.
2958// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2959// spilling the remainder to the stack.
2960//
2961// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2962//===----------------------------------------------------------------------===//
2963
2964static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2965 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2966 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2967 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2969
2970 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2971
2972 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2973
2974 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2975
2976 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2977
2978 // Do not process byval args here.
2979 if (ArgFlags.isByVal())
2980 return true;
2981
2982 // Promote i8 and i16
2983 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2984 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2985 LocVT = MVT::i32;
2986 if (ArgFlags.isSExt())
2987 LocInfo = CCValAssign::SExtUpper;
2988 else if (ArgFlags.isZExt())
2989 LocInfo = CCValAssign::ZExtUpper;
2990 else
2991 LocInfo = CCValAssign::AExtUpper;
2992 }
2993 }
2994
2995 // Promote i8 and i16
2996 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2997 LocVT = MVT::i32;
2998 if (ArgFlags.isSExt())
2999 LocInfo = CCValAssign::SExt;
3000 else if (ArgFlags.isZExt())
3001 LocInfo = CCValAssign::ZExt;
3002 else
3003 LocInfo = CCValAssign::AExt;
3004 }
3005
3006 unsigned Reg;
3007
3008 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
3009 // is true: function is vararg, argument is 3rd or higher, there is previous
3010 // argument which is not f32 or f64.
3011 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
3012 State.getFirstUnallocated(F32Regs) != ValNo;
3013 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
3014 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
3015 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
3016
3017 // The MIPS vector ABI for floats passes them in a pair of registers
3018 if (ValVT == MVT::i32 && isVectorFloat) {
3019 // This is the start of an vector that was scalarized into an unknown number
3020 // of components. It doesn't matter how many there are. Allocate one of the
3021 // notional 8 byte aligned registers which map onto the argument stack, and
3022 // shadow the register lost to alignment requirements.
3023 if (ArgFlags.isSplit()) {
3024 Reg = State.AllocateReg(FloatVectorIntRegs);
3025 if (Reg == Mips::A2)
3026 State.AllocateReg(Mips::A1);
3027 else if (Reg == 0)
3028 State.AllocateReg(Mips::A3);
3029 } else {
3030 // If we're an intermediate component of the split, we can just attempt to
3031 // allocate a register directly.
3032 Reg = State.AllocateReg(IntRegs);
3033 }
3034 } else if (ValVT == MVT::i32 ||
3035 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
3036 Reg = State.AllocateReg(IntRegs);
3037 // If this is the first part of an i64 arg,
3038 // the allocated register must be either A0 or A2.
3039 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
3040 Reg = State.AllocateReg(IntRegs);
3041 LocVT = MVT::i32;
3042 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
3043 // Allocate int register and shadow next int register. If first
3044 // available register is Mips::A1 or Mips::A3, shadow it too.
3045 Reg = State.AllocateReg(IntRegs);
3046 if (Reg == Mips::A1 || Reg == Mips::A3)
3047 Reg = State.AllocateReg(IntRegs);
3048
3049 if (Reg) {
3050 LocVT = MVT::i32;
3051
3052 State.addLoc(
3053 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3054 MCRegister HiReg = State.AllocateReg(IntRegs);
3055 assert(HiReg);
3056 State.addLoc(
3057 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
3058 return false;
3059 }
3060 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
3061 // we are guaranteed to find an available float register
3062 if (ValVT == MVT::f32) {
3063 Reg = State.AllocateReg(F32Regs);
3064 // Shadow int register
3065 State.AllocateReg(IntRegs);
3066 } else {
3067 Reg = State.AllocateReg(F64Regs);
3068 // Shadow int registers
3069 MCRegister Reg2 = State.AllocateReg(IntRegs);
3070 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
3071 State.AllocateReg(IntRegs);
3072 State.AllocateReg(IntRegs);
3073 }
3074 } else
3075 llvm_unreachable("Cannot handle this ValVT.");
3076
3077 if (!Reg) {
3078 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
3079 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
3080 } else
3081 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3082
3083 return false;
3084}
3085
3086static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
3087 MVT LocVT, CCValAssign::LocInfo LocInfo,
3088 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3089 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3090
3091 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3092}
3093
3094static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3095 MVT LocVT, CCValAssign::LocInfo LocInfo,
3096 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3097 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3098
3099 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3100}
3101
3102static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3103 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3105
3106#include "MipsGenCallingConv.inc"
3107
3109 return CC_Mips_FixedArg;
3110 }
3111
3113 return RetCC_Mips;
3114 }
3115//===----------------------------------------------------------------------===//
3116// Call Calling Convention Implementation
3117//===----------------------------------------------------------------------===//
3118
3119SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3120 SDValue Chain, SDValue Arg,
3121 const SDLoc &DL, bool IsTailCall,
3122 SelectionDAG &DAG) const {
3123 if (!IsTailCall) {
3124 SDValue PtrOff =
3125 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3127 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3128 }
3129
3131 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3132 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3133 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3135}
3136
3139 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3140 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3141 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3142 SDValue Chain) const {
3143 // Insert node "GP copy globalreg" before call to function.
3144 //
3145 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3146 // in PIC mode) allow symbols to be resolved via lazy binding.
3147 // The lazy binding stub requires GP to point to the GOT.
3148 // Note that we don't need GP to point to the GOT for indirect calls
3149 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3150 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3151 // used for the function (that is, Mips linker doesn't generate lazy binding
3152 // stub for a function whose address is taken in the program).
3153 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3154 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3155 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3156 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3157 }
3158
3159 // Build a sequence of copy-to-reg nodes chained together with token
3160 // chain and flag operands which copy the outgoing args into registers.
3161 // The InGlue in necessary since all emitted instructions must be
3162 // stuck together.
3163 SDValue InGlue;
3164
3165 for (auto &R : RegsToPass) {
3166 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3167 InGlue = Chain.getValue(1);
3168 }
3169
3170 // Add argument registers to the end of the list so that they are
3171 // known live into the call.
3172 for (auto &R : RegsToPass)
3173 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3174
3175 // Add a register mask operand representing the call-preserved registers.
3177 const uint32_t *Mask =
3178 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3179 assert(Mask && "Missing call preserved mask for calling convention");
3181 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3182 StringRef Sym = G->getGlobal()->getName();
3183 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3184 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3186 }
3187 }
3188 }
3189 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3190
3191 if (InGlue.getNode())
3192 Ops.push_back(InGlue);
3193}
3194
3196 SDNode *Node) const {
3197 switch (MI.getOpcode()) {
3198 default:
3199 return;
3200 case Mips::JALR:
3201 case Mips::JALRPseudo:
3202 case Mips::JALR64:
3203 case Mips::JALR64Pseudo:
3204 case Mips::JALR16_MM:
3205 case Mips::JALRC16_MMR6:
3206 case Mips::TAILCALLREG:
3207 case Mips::TAILCALLREG64:
3208 case Mips::TAILCALLR6REG:
3209 case Mips::TAILCALL64R6REG:
3210 case Mips::TAILCALLREG_MM:
3211 case Mips::TAILCALLREG_MMR6: {
3212 if (!EmitJalrReloc ||
3215 Node->getNumOperands() < 1 ||
3216 Node->getOperand(0).getNumOperands() < 2) {
3217 return;
3218 }
3219 // We are after the callee address, set by LowerCall().
3220 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3221 // symbol.
3222 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3223 StringRef Sym;
3224 if (const GlobalAddressSDNode *G =
3225 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3226 // We must not emit the R_MIPS_JALR relocation against data symbols
3227 // since this will cause run-time crashes if the linker replaces the
3228 // call instruction with a relative branch to the data symbol.
3229 if (!isa<Function>(G->getGlobal())) {
3230 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3231 << G->getGlobal()->getName() << "\n");
3232 return;
3233 }
3234 Sym = G->getGlobal()->getName();
3235 }
3236 else if (const ExternalSymbolSDNode *ES =
3237 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3238 Sym = ES->getSymbol();
3239 }
3240
3241 if (Sym.empty())
3242 return;
3243
3244 MachineFunction *MF = MI.getParent()->getParent();
3246 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3248 }
3249 }
3250}
3251
3252/// LowerCall - functions arguments are copied from virtual regs to
3253/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3254SDValue
3255MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3256 SmallVectorImpl<SDValue> &InVals) const {
3257 SelectionDAG &DAG = CLI.DAG;
3258 SDLoc DL = CLI.DL;
3260 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3262 SDValue Chain = CLI.Chain;
3263 SDValue Callee = CLI.Callee;
3264 bool &IsTailCall = CLI.IsTailCall;
3265 CallingConv::ID CallConv = CLI.CallConv;
3266 bool IsVarArg = CLI.IsVarArg;
3267
3269 MachineFrameInfo &MFI = MF.getFrameInfo();
3271 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3272 bool IsPIC = isPositionIndependent();
3273
3274 // Analyze operands of the call, assigning locations to each operand.
3276 MipsCCState CCInfo(
3277 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3279
3280 const ExternalSymbolSDNode *ES =
3281 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3282
3283 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3284 // is during the lowering of a call with a byval argument which produces
3285 // a call to memcpy. For the O32 case, this causes the caller to allocate
3286 // stack space for the reserved argument area for the callee, then recursively
3287 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3288 // ABIs mandate that the callee allocates the reserved argument area. We do
3289 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3290 //
3291 // If the callee has a byval argument and memcpy is used, we are mandated
3292 // to already have produced a reserved argument area for the callee for O32.
3293 // Therefore, the reserved argument area can be reused for both calls.
3294 //
3295 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3296 // present, as we have yet to hook that node onto the chain.
3297 //
3298 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3299 // case. GCC does a similar trick, in that wherever possible, it calculates
3300 // the maximum out going argument area (including the reserved area), and
3301 // preallocates the stack space on entrance to the caller.
3302 //
3303 // FIXME: We should do the same for efficiency and space.
3304
3305 // Note: The check on the calling convention below must match
3306 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3307 bool MemcpyInByVal = ES && StringRef(ES->getSymbol()) == "memcpy" &&
3308 CallConv != CallingConv::Fast &&
3309 Chain.getOpcode() == ISD::CALLSEQ_START;
3310
3311 // Allocate the reserved argument area. It seems strange to do this from the
3312 // caller side but removing it breaks the frame size calculation.
3313 unsigned ReservedArgArea =
3314 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3315 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3316
3317 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3318 ES ? ES->getSymbol() : nullptr);
3319
3320 // Get a count of how many bytes are to be pushed on the stack.
3321 unsigned StackSize = CCInfo.getStackSize();
3322
3323 // Call site info for function parameters tracking.
3325
3326 // Check if it's really possible to do a tail call. Restrict it to functions
3327 // that are part of this compilation unit.
3328 bool InternalLinkage = false;
3329 if (IsTailCall) {
3330 IsTailCall = isEligibleForTailCallOptimization(
3331 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3332 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3333 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3334 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3335 G->getGlobal()->hasPrivateLinkage() ||
3336 G->getGlobal()->hasHiddenVisibility() ||
3337 G->getGlobal()->hasProtectedVisibility());
3338 }
3339 }
3340 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3341 report_fatal_error("failed to perform tail call elimination on a call "
3342 "site marked musttail");
3343
3344 if (IsTailCall)
3345 ++NumTailCalls;
3346
3347 // Chain is the output chain of the last Load/Store or CopyToReg node.
3348 // ByValChain is the output chain of the last Memcpy node created for copying
3349 // byval arguments to the stack.
3350 unsigned StackAlignment = TFL->getStackAlignment();
3351 StackSize = alignTo(StackSize, StackAlignment);
3352
3353 if (!(IsTailCall || MemcpyInByVal))
3354 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3355
3357 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3359
3360 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3361 SmallVector<SDValue, 8> MemOpChains;
3362
3363 CCInfo.rewindByValRegsInfo();
3364
3365 // Walk the register/memloc assignments, inserting copies/loads.
3366 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3367 SDValue Arg = OutVals[OutIdx];
3368 CCValAssign &VA = ArgLocs[i];
3369 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3370 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3371 bool UseUpperBits = false;
3372
3373 // ByVal Arg.
3374 if (Flags.isByVal()) {
3375 unsigned FirstByValReg, LastByValReg;
3376 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3377 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3378
3379 assert(Flags.getByValSize() &&
3380 "ByVal args of size 0 should have been ignored by front-end.");
3381 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3382 assert(!IsTailCall &&
3383 "Do not tail-call optimize if there is a byval argument.");
3384 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3385 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3386 VA);
3387 CCInfo.nextInRegsParam();
3388 continue;
3389 }
3390
3391 // Promote the value if needed.
3392 switch (VA.getLocInfo()) {
3393 default:
3394 llvm_unreachable("Unknown loc info!");
3395 case CCValAssign::Full:
3396 if (VA.isRegLoc()) {
3397 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3398 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3399 (ValVT == MVT::i64 && LocVT == MVT::f64))
3400 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3401 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3403 Arg, DAG.getConstant(0, DL, MVT::i32));
3405 Arg, DAG.getConstant(1, DL, MVT::i32));
3406 if (!Subtarget.isLittle())
3407 std::swap(Lo, Hi);
3408
3409 assert(VA.needsCustom());
3410
3411 Register LocRegLo = VA.getLocReg();
3412 Register LocRegHigh = ArgLocs[++i].getLocReg();
3413 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3414 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3415 continue;
3416 }
3417 }
3418 break;
3419 case CCValAssign::BCvt:
3420 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3421 break;
3423 UseUpperBits = true;
3424 [[fallthrough]];
3425 case CCValAssign::SExt:
3426 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3427 break;
3429 UseUpperBits = true;
3430 [[fallthrough]];
3431 case CCValAssign::ZExt:
3432 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3433 break;
3435 UseUpperBits = true;
3436 [[fallthrough]];
3437 case CCValAssign::AExt:
3438 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3439 break;
3440 }
3441
3442 if (UseUpperBits) {
3443 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3444 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3445 Arg = DAG.getNode(
3446 ISD::SHL, DL, VA.getLocVT(), Arg,
3447 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3448 }
3449
3450 // Arguments that can be passed on register must be kept at
3451 // RegsToPass vector
3452 if (VA.isRegLoc()) {
3453 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3454
3455 // If the parameter is passed through reg $D, which splits into
3456 // two physical registers, avoid creating call site info.
3457 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3458 continue;
3459
3460 // Collect CSInfo about which register passes which parameter.
3461 const TargetOptions &Options = DAG.getTarget().Options;
3462 if (Options.EmitCallSiteInfo)
3463 CSInfo.ArgRegPairs.emplace_back(VA.getLocReg(), i);
3464
3465 continue;
3466 }
3467
3468 // Register can't get to this point...
3469 assert(VA.isMemLoc());
3470
3471 // emit ISD::STORE whichs stores the
3472 // parameter value to a stack Location
3473 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3474 Chain, Arg, DL, IsTailCall, DAG));
3475 }
3476
3477 // Transform all store nodes into one single node because all store
3478 // nodes are independent of each other.
3479 if (!MemOpChains.empty())
3480 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3481
3482 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3483 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3484 // node so that legalize doesn't hack it.
3485
3486 EVT Ty = Callee.getValueType();
3487 bool GlobalOrExternal = false, IsCallReloc = false;
3488
3489 // The long-calls feature is ignored in case of PIC.
3490 // While we do not support -mshared / -mno-shared properly,
3491 // ignore long-calls in case of -mabicalls too.
3492 if (!Subtarget.isABICalls() && !IsPIC) {
3493 // If the function should be called using "long call",
3494 // get its address into a register to prevent using
3495 // of the `jal` instruction for the direct call.
3496 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3497 if (Subtarget.useLongCalls())
3499 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3500 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3501 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3502 bool UseLongCalls = Subtarget.useLongCalls();
3503 // If the function has long-call/far/near attribute
3504 // it overrides command line switch pased to the backend.
3505 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3506 if (F->hasFnAttribute("long-call"))
3507 UseLongCalls = true;
3508 else if (F->hasFnAttribute("short-call"))
3509 UseLongCalls = false;
3510 }
3511 if (UseLongCalls)
3513 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3514 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3515 }
3516 }
3517
3518 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3519 if (Subtarget.isTargetCOFF() &&
3520 G->getGlobal()->hasDLLImportStorageClass()) {
3522 "Windows is the only supported COFF target");
3523 auto PtrInfo = MachinePointerInfo();
3524 Callee = DAG.getLoad(Ty, DL, Chain,
3525 getDllimportSymbol(G, SDLoc(G), Ty, DAG), PtrInfo);
3526 } else if (IsPIC) {
3527 const GlobalValue *Val = G->getGlobal();
3528 InternalLinkage = Val->hasInternalLinkage();
3529
3530 if (InternalLinkage)
3531 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3532 else if (Subtarget.useXGOT()) {
3534 MipsII::MO_CALL_LO16, Chain,
3535 FuncInfo->callPtrInfo(MF, Val));
3536 IsCallReloc = true;
3537 } else {
3538 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3539 FuncInfo->callPtrInfo(MF, Val));
3540 IsCallReloc = true;
3541 }
3542 } else
3543 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3544 getPointerTy(DAG.getDataLayout()), 0,
3546 GlobalOrExternal = true;
3547 }
3548 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3549 const char *Sym = S->getSymbol();
3550
3551 if (!IsPIC) // static
3554 else if (Subtarget.useXGOT()) {
3556 MipsII::MO_CALL_LO16, Chain,
3557 FuncInfo->callPtrInfo(MF, Sym));
3558 IsCallReloc = true;
3559 } else { // PIC
3560 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3561 FuncInfo->callPtrInfo(MF, Sym));
3562 IsCallReloc = true;
3563 }
3564
3565 GlobalOrExternal = true;
3566 }
3567
3568 SmallVector<SDValue, 8> Ops(1, Chain);
3569 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3570
3571 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3572 IsCallReloc, CLI, Callee, Chain);
3573
3574 if (IsTailCall) {
3576 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3577 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3578 return Ret;
3579 }
3580
3581 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3582 SDValue InGlue = Chain.getValue(1);
3583
3584 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3585
3586 // Create the CALLSEQ_END node in the case of where it is not a call to
3587 // memcpy.
3588 if (!(MemcpyInByVal)) {
3589 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3590 InGlue = Chain.getValue(1);
3591 }
3592
3593 // Handle result values, copying them out of physregs into vregs that we
3594 // return.
3595 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3596 InVals, CLI);
3597}
3598
3599/// LowerCallResult - Lower the result values of a call into the
3600/// appropriate copies out of appropriate physical registers.
3601SDValue MipsTargetLowering::LowerCallResult(
3602 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3603 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3606 // Assign locations to each value returned by this call.
3608 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3609 *DAG.getContext());
3610
3611 const ExternalSymbolSDNode *ES =
3612 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3613 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3614 ES ? ES->getSymbol() : nullptr);
3615
3616 // Copy all of the result registers out of their specified physreg.
3617 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3618 CCValAssign &VA = RVLocs[i];
3619 assert(VA.isRegLoc() && "Can only return in registers!");
3620
3621 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3622 RVLocs[i].getLocVT(), InGlue);
3623 Chain = Val.getValue(1);
3624 InGlue = Val.getValue(2);
3625
3626 if (VA.isUpperBitsInLoc()) {
3627 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3628 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3629 unsigned Shift =
3631 Val = DAG.getNode(
3632 Shift, DL, VA.getLocVT(), Val,
3633 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3634 }
3635
3636 switch (VA.getLocInfo()) {
3637 default:
3638 llvm_unreachable("Unknown loc info!");
3639 case CCValAssign::Full:
3640 break;
3641 case CCValAssign::BCvt:
3642 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3643 break;
3644 case CCValAssign::AExt:
3646 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3647 break;
3648 case CCValAssign::ZExt:
3650 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3651 DAG.getValueType(VA.getValVT()));
3652 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3653 break;
3654 case CCValAssign::SExt:
3656 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3657 DAG.getValueType(VA.getValVT()));
3658 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3659 break;
3660 }
3661
3662 InVals.push_back(Val);
3663 }
3664
3665 return Chain;
3666}
3667
3669 EVT ArgVT, const SDLoc &DL,
3670 SelectionDAG &DAG) {
3671 MVT LocVT = VA.getLocVT();
3672 EVT ValVT = VA.getValVT();
3673
3674 // Shift into the upper bits if necessary.
3675 switch (VA.getLocInfo()) {
3676 default:
3677 break;
3681 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3682 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3683 unsigned Opcode =
3685 Val = DAG.getNode(
3686 Opcode, DL, VA.getLocVT(), Val,
3687 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3688 break;
3689 }
3690 }
3691
3692 // If this is an value smaller than the argument slot size (32-bit for O32,
3693 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3694 // size. Extract the value and insert any appropriate assertions regarding
3695 // sign/zero extension.
3696 switch (VA.getLocInfo()) {
3697 default:
3698 llvm_unreachable("Unknown loc info!");
3699 case CCValAssign::Full:
3700 break;
3702 case CCValAssign::AExt:
3703 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3704 break;
3706 case CCValAssign::SExt:
3707 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3708 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3709 break;
3711 case CCValAssign::ZExt:
3712 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3713 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3714 break;
3715 case CCValAssign::BCvt:
3716 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3717 break;
3718 }
3719
3720 return Val;
3721}
3722
3723//===----------------------------------------------------------------------===//
3724// Formal Arguments Calling Convention Implementation
3725//===----------------------------------------------------------------------===//
3726/// LowerFormalArguments - transform physical registers into virtual registers
3727/// and generate load operations for arguments places on the stack.
3728SDValue MipsTargetLowering::LowerFormalArguments(
3729 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3730 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3731 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3733 MachineFrameInfo &MFI = MF.getFrameInfo();
3735
3736 MipsFI->setVarArgsFrameIndex(0);
3737
3738 // Used with vargs to acumulate store chains.
3739 std::vector<SDValue> OutChains;
3740
3741 // Assign locations to all of the incoming arguments.
3743 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3744 *DAG.getContext());
3745 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3747 Function::const_arg_iterator FuncArg = Func.arg_begin();
3748
3749 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3751 "Functions with the interrupt attribute cannot have arguments!");
3752
3753 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3754 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3755 CCInfo.getInRegsParamsCount() > 0);
3756
3757 unsigned CurArgIdx = 0;
3758 CCInfo.rewindByValRegsInfo();
3759
3760 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3761 CCValAssign &VA = ArgLocs[i];
3762 if (Ins[InsIdx].isOrigArg()) {
3763 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3764 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3765 }
3766 EVT ValVT = VA.getValVT();
3767 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3768 bool IsRegLoc = VA.isRegLoc();
3769
3770 if (Flags.isByVal()) {
3771 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3772 unsigned FirstByValReg, LastByValReg;
3773 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3774 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3775
3776 assert(Flags.getByValSize() &&
3777 "ByVal args of size 0 should have been ignored by front-end.");
3778 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3779 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3780 FirstByValReg, LastByValReg, VA, CCInfo);
3781 CCInfo.nextInRegsParam();
3782 continue;
3783 }
3784
3785 // Arguments stored on registers
3786 if (IsRegLoc) {
3787 MVT RegVT = VA.getLocVT();
3788 Register ArgReg = VA.getLocReg();
3789 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3790
3791 // Transform the arguments stored on
3792 // physical registers into virtual ones
3793 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3794 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3795
3796 ArgValue =
3797 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3798
3799 // Handle floating point arguments passed in integer registers and
3800 // long double arguments passed in floating point registers.
3801 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3802 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3803 (RegVT == MVT::f64 && ValVT == MVT::i64))
3804 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3805 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3806 ValVT == MVT::f64) {
3807 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3808 CCValAssign &NextVA = ArgLocs[++i];
3809 unsigned Reg2 =
3810 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3811 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3812 if (!Subtarget.isLittle())
3813 std::swap(ArgValue, ArgValue2);
3814 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3815 ArgValue, ArgValue2);
3816 }
3817
3818 InVals.push_back(ArgValue);
3819 } else { // VA.isRegLoc()
3820 MVT LocVT = VA.getLocVT();
3821
3822 assert(!VA.needsCustom() && "unexpected custom memory argument");
3823
3824 // Only arguments pased on the stack should make it here.
3825 assert(VA.isMemLoc());
3826
3827 // The stack pointer offset is relative to the caller stack frame.
3828 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3829 VA.getLocMemOffset(), true);
3830
3831 // Create load nodes to retrieve arguments from the stack
3832 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3833 SDValue ArgValue = DAG.getLoad(
3834 LocVT, DL, Chain, FIN,
3836 OutChains.push_back(ArgValue.getValue(1));
3837
3838 ArgValue =
3839 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3840
3841 InVals.push_back(ArgValue);
3842 }
3843 }
3844
3845 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3846
3847 if (ArgLocs[i].needsCustom()) {
3848 ++i;
3849 continue;
3850 }
3851
3852 // The mips ABIs for returning structs by value requires that we copy
3853 // the sret argument into $v0 for the return. Save the argument into
3854 // a virtual register so that we can access it from the return points.
3855 if (Ins[InsIdx].Flags.isSRet()) {
3856 unsigned Reg = MipsFI->getSRetReturnReg();
3857 if (!Reg) {
3859 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3860 MipsFI->setSRetReturnReg(Reg);
3861 }
3862 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3863 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3864 break;
3865 }
3866 }
3867
3868 if (IsVarArg)
3869 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3870
3871 // All stores are grouped in one node to allow the matching between
3872 // the size of Ins and InVals. This only happens when on varg functions
3873 if (!OutChains.empty()) {
3874 OutChains.push_back(Chain);
3875 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3876 }
3877
3878 return Chain;
3879}
3880
3881//===----------------------------------------------------------------------===//
3882// Return Value Calling Convention Implementation
3883//===----------------------------------------------------------------------===//
3884
3885bool
3886MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3887 MachineFunction &MF, bool IsVarArg,
3889 LLVMContext &Context, const Type *RetTy) const {
3891 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3892 return CCInfo.CheckCallReturn(Outs, RetCC_Mips, RetTy);
3893}
3894
3895bool MipsTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,
3896 bool IsSigned) const {
3897 if ((ABI.IsN32() || ABI.IsN64()) && Ty->isIntegerTy(32))
3898 return true;
3899
3900 return IsSigned;
3901}
3902
3903SDValue
3904MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3905 const SDLoc &DL,
3906 SelectionDAG &DAG) const {
3909
3910 MipsFI->setISR();
3911
3912 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3913}
3914
3915SDValue
3916MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3917 bool IsVarArg,
3919 const SmallVectorImpl<SDValue> &OutVals,
3920 const SDLoc &DL, SelectionDAG &DAG) const {
3921 // CCValAssign - represent the assignment of
3922 // the return value to a location
3925
3926 // CCState - Info about the registers and stack slot.
3927 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3928
3929 // Analyze return values.
3930 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3931
3932 SDValue Glue;
3933 SmallVector<SDValue, 4> RetOps(1, Chain);
3934
3935 // Copy the result values into the output registers.
3936 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3937 SDValue Val = OutVals[i];
3938 CCValAssign &VA = RVLocs[i];
3939 assert(VA.isRegLoc() && "Can only return in registers!");
3940 bool UseUpperBits = false;
3941
3942 switch (VA.getLocInfo()) {
3943 default:
3944 llvm_unreachable("Unknown loc info!");
3945 case CCValAssign::Full:
3946 break;
3947 case CCValAssign::BCvt:
3948 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3949 break;
3951 UseUpperBits = true;
3952 [[fallthrough]];
3953 case CCValAssign::AExt:
3954 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3955 break;
3957 UseUpperBits = true;
3958 [[fallthrough]];
3959 case CCValAssign::ZExt:
3960 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3961 break;
3963 UseUpperBits = true;
3964 [[fallthrough]];
3965 case CCValAssign::SExt:
3966 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3967 break;
3968 }
3969
3970 if (UseUpperBits) {
3971 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3972 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3973 Val = DAG.getNode(
3974 ISD::SHL, DL, VA.getLocVT(), Val,
3975 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3976 }
3977
3978 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3979
3980 // Guarantee that all emitted copies are stuck together with flags.
3981 Glue = Chain.getValue(1);
3982 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3983 }
3984
3985 // The mips ABIs for returning structs by value requires that we copy
3986 // the sret argument into $v0 for the return. We saved the argument into
3987 // a virtual register in the entry block, so now we copy the value out
3988 // and into $v0.
3989 if (MF.getFunction().hasStructRetAttr()) {
3991 unsigned Reg = MipsFI->getSRetReturnReg();
3992
3993 if (!Reg)
3994 llvm_unreachable("sret virtual register not created in the entry block");
3995 SDValue Val =
3996 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3997 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3998
3999 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
4000 Glue = Chain.getValue(1);
4001 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
4002 }
4003
4004 RetOps[0] = Chain; // Update chain.
4005
4006 // Add the glue if we have it.
4007 if (Glue.getNode())
4008 RetOps.push_back(Glue);
4009
4010 // ISRs must use "eret".
4011 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
4012 return LowerInterruptReturn(RetOps, DL, DAG);
4013
4014 // Standard return on Mips is a "jr $ra"
4015 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
4016}
4017
4018//===----------------------------------------------------------------------===//
4019// Mips Inline Assembly Support
4020//===----------------------------------------------------------------------===//
4021
4022/// getConstraintType - Given a constraint letter, return the type of
4023/// constraint it is for this target.
4025MipsTargetLowering::getConstraintType(StringRef Constraint) const {
4026 // Mips specific constraints
4027 // GCC config/mips/constraints.md
4028 //
4029 // 'd' : An address register. Equivalent to r
4030 // unless generating MIPS16 code.
4031 // 'y' : Equivalent to r; retained for
4032 // backwards compatibility.
4033 // 'c' : A register suitable for use in an indirect
4034 // jump. This will always be $25 for -mabicalls.
4035 // 'l' : The lo register. 1 word storage.
4036 // 'x' : The hilo register pair. Double word storage.
4037 if (Constraint.size() == 1) {
4038 switch (Constraint[0]) {
4039 default : break;
4040 case 'd':
4041 case 'y':
4042 case 'f':
4043 case 'c':
4044 case 'l':
4045 case 'x':
4046 return C_RegisterClass;
4047 case 'R':
4048 return C_Memory;
4049 }
4050 }
4051
4052 if (Constraint == "ZC")
4053 return C_Memory;
4054
4055 return TargetLowering::getConstraintType(Constraint);
4056}
4057
4058/// Examine constraint type and operand type and determine a weight value.
4059/// This object must already have been set up with the operand type
4060/// and the current alternative constraint selected.
4062MipsTargetLowering::getSingleConstraintMatchWeight(
4063 AsmOperandInfo &info, const char *constraint) const {
4065 Value *CallOperandVal = info.CallOperandVal;
4066 // If we don't have a value, we can't do a match,
4067 // but allow it at the lowest weight.
4068 if (!CallOperandVal)
4069 return CW_Default;
4070 Type *type = CallOperandVal->getType();
4071 // Look at the constraint type.
4072 switch (*constraint) {
4073 default:
4075 break;
4076 case 'd':
4077 case 'y':
4078 if (type->isIntegerTy())
4079 weight = CW_Register;
4080 break;
4081 case 'f': // FPU or MSA register
4082 if (Subtarget.hasMSA() && type->isVectorTy() &&
4083 type->getPrimitiveSizeInBits().getFixedValue() == 128)
4084 weight = CW_Register;
4085 else if (type->isFloatTy())
4086 weight = CW_Register;
4087 break;
4088 case 'c': // $25 for indirect jumps
4089 case 'l': // lo register
4090 case 'x': // hilo register pair
4091 if (type->isIntegerTy())
4092 weight = CW_SpecificReg;
4093 break;
4094 case 'I': // signed 16 bit immediate
4095 case 'J': // integer zero
4096 case 'K': // unsigned 16 bit immediate
4097 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4098 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4099 case 'O': // signed 15 bit immediate (+- 16383)
4100 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4101 if (isa<ConstantInt>(CallOperandVal))
4102 weight = CW_Constant;
4103 break;
4104 case 'R':
4105 weight = CW_Memory;
4106 break;
4107 }
4108 return weight;
4109}
4110
4111/// This is a helper function to parse a physical register string and split it
4112/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4113/// that is returned indicates whether parsing was successful. The second flag
4114/// is true if the numeric part exists.
4115static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4116 unsigned long long &Reg) {
4117 if (C.front() != '{' || C.back() != '}')
4118 return std::make_pair(false, false);
4119
4120 // Search for the first numeric character.
4121 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4122 I = std::find_if(B, E, isdigit);
4123
4124 Prefix = StringRef(B, I - B);
4125
4126 // The second flag is set to false if no numeric characters were found.
4127 if (I == E)
4128 return std::make_pair(true, false);
4129
4130 // Parse the numeric characters.
4131 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4132 true);
4133}
4134
4136 ISD::NodeType) const {
4137 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4138 EVT MinVT = getRegisterType(Cond ? MVT::i64 : MVT::i32);
4139 return VT.bitsLT(MinVT) ? MinVT : VT;
4140}
4141
4142std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4143parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4144 const TargetRegisterInfo *TRI =
4146 const TargetRegisterClass *RC;
4147 StringRef Prefix;
4148 unsigned long long Reg;
4149
4150 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4151
4152 if (!R.first)
4153 return std::make_pair(0U, nullptr);
4154
4155 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4156 // No numeric characters follow "hi" or "lo".
4157 if (R.second)
4158 return std::make_pair(0U, nullptr);
4159
4160 RC = TRI->getRegClass(Prefix == "hi" ?
4161 Mips::HI32RegClassID : Mips::LO32RegClassID);
4162 return std::make_pair(*(RC->begin()), RC);
4163 } else if (Prefix.starts_with("$msa")) {
4164 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4165
4166 // No numeric characters follow the name.
4167 if (R.second)
4168 return std::make_pair(0U, nullptr);
4169
4171 .Case("$msair", Mips::MSAIR)
4172 .Case("$msacsr", Mips::MSACSR)
4173 .Case("$msaaccess", Mips::MSAAccess)
4174 .Case("$msasave", Mips::MSASave)
4175 .Case("$msamodify", Mips::MSAModify)
4176 .Case("$msarequest", Mips::MSARequest)
4177 .Case("$msamap", Mips::MSAMap)
4178 .Case("$msaunmap", Mips::MSAUnmap)
4179 .Default(0);
4180
4181 if (!Reg)
4182 return std::make_pair(0U, nullptr);
4183
4184 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4185 return std::make_pair(Reg, RC);
4186 }
4187
4188 if (!R.second)
4189 return std::make_pair(0U, nullptr);
4190
4191 if (Prefix == "$f") { // Parse $f0-$f31.
4192 // If the size of FP registers is 64-bit or Reg is an even number, select
4193 // the 64-bit register class. Otherwise, select the 32-bit register class.
4194 if (VT == MVT::Other)
4195 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4196
4197 RC = getRegClassFor(VT);
4198
4199 if (RC == &Mips::AFGR64RegClass) {
4200 assert(Reg % 2 == 0);
4201 Reg >>= 1;
4202 }
4203 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4204 RC = TRI->getRegClass(Mips::FCCRegClassID);
4205 else if (Prefix == "$w") { // Parse $w0-$w31.
4206 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4207 } else { // Parse $0-$31.
4208 assert(Prefix == "$");
4209 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4210 }
4211
4212 assert(Reg < RC->getNumRegs());
4213 return std::make_pair(*(RC->begin() + Reg), RC);
4214}
4215
4216/// Given a register class constraint, like 'r', if this corresponds directly
4217/// to an LLVM register class, return a register of 0 and the register class
4218/// pointer.
4219std::pair<unsigned, const TargetRegisterClass *>
4220MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4221 StringRef Constraint,
4222 MVT VT) const {
4223 if (Constraint.size() == 1) {
4224 switch (Constraint[0]) {
4225 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4226 case 'y': // Same as 'r'. Exists for compatibility.
4227 case 'r':
4228 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4229 VT == MVT::i1) ||
4230 (VT == MVT::f32 && Subtarget.useSoftFloat())) {
4231 if (Subtarget.inMips16Mode())
4232 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4233 return std::make_pair(0U, &Mips::GPR32RegClass);
4234 }
4235 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4237 return std::make_pair(0U, &Mips::GPR32RegClass);
4238 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4240 return std::make_pair(0U, &Mips::GPR64RegClass);
4241 // This will generate an error message
4242 return std::make_pair(0U, nullptr);
4243 case 'f': // FPU or MSA register
4244 if (VT == MVT::v16i8)
4245 return std::make_pair(0U, &Mips::MSA128BRegClass);
4246 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4247 return std::make_pair(0U, &Mips::MSA128HRegClass);
4248 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4249 return std::make_pair(0U, &Mips::MSA128WRegClass);
4250 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4251 return std::make_pair(0U, &Mips::MSA128DRegClass);
4252 else if (VT == MVT::f32)
4253 return std::make_pair(0U, &Mips::FGR32RegClass);
4254 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4255 if (Subtarget.isFP64bit())
4256 return std::make_pair(0U, &Mips::FGR64RegClass);
4257 return std::make_pair(0U, &Mips::AFGR64RegClass);
4258 }
4259 break;
4260 case 'c': // register suitable for indirect jump
4261 if (VT == MVT::i32)
4262 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4263 if (VT == MVT::i64)
4264 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4265 // This will generate an error message
4266 return std::make_pair(0U, nullptr);
4267 case 'l': // use the `lo` register to store values
4268 // that are no bigger than a word
4269 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4270 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4271 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4272 case 'x': // use the concatenated `hi` and `lo` registers
4273 // to store doubleword values
4274 // Fixme: Not triggering the use of both hi and low
4275 // This will generate an error message
4276 return std::make_pair(0U, nullptr);
4277 }
4278 }
4279
4280 if (!Constraint.empty()) {
4281 std::pair<unsigned, const TargetRegisterClass *> R;
4282 R = parseRegForInlineAsmConstraint(Constraint, VT);
4283
4284 if (R.second)
4285 return R;
4286 }
4287
4288 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4289}
4290
4291/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4292/// vector. If it is invalid, don't add anything to Ops.
4293void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4294 StringRef Constraint,
4295 std::vector<SDValue> &Ops,
4296 SelectionDAG &DAG) const {
4297 SDLoc DL(Op);
4299
4300 // Only support length 1 constraints for now.
4301 if (Constraint.size() > 1)
4302 return;
4303
4304 char ConstraintLetter = Constraint[0];
4305 switch (ConstraintLetter) {
4306 default: break; // This will fall through to the generic implementation
4307 case 'I': // Signed 16 bit constant
4308 // If this fails, the parent routine will give an error
4309 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4310 EVT Type = Op.getValueType();
4311 int64_t Val = C->getSExtValue();
4312 if (isInt<16>(Val)) {
4314 break;
4315 }
4316 }
4317 return;
4318 case 'J': // integer zero
4319 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4320 EVT Type = Op.getValueType();
4321 int64_t Val = C->getZExtValue();
4322 if (Val == 0) {
4323 Result = DAG.getTargetConstant(0, DL, Type);
4324 break;
4325 }
4326 }
4327 return;
4328 case 'K': // unsigned 16 bit immediate
4329 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4330 EVT Type = Op.getValueType();
4331 uint64_t Val = (uint64_t)C->getZExtValue();
4332 if (isUInt<16>(Val)) {
4333 Result = DAG.getTargetConstant(Val, DL, Type);
4334 break;
4335 }
4336 }
4337 return;
4338 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4339 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4340 EVT Type = Op.getValueType();
4341 int64_t Val = C->getSExtValue();
4342 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4344 break;
4345 }
4346 }
4347 return;
4348 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4349 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4350 EVT Type = Op.getValueType();
4351 int64_t Val = C->getSExtValue();
4352 if ((Val >= -65535) && (Val <= -1)) {
4354 break;
4355 }
4356 }
4357 return;
4358 case 'O': // signed 15 bit immediate
4359 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4360 EVT Type = Op.getValueType();
4361 int64_t Val = C->getSExtValue();
4362 if ((isInt<15>(Val))) {
4364 break;
4365 }
4366 }
4367 return;
4368 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4369 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4370 EVT Type = Op.getValueType();
4371 int64_t Val = C->getSExtValue();
4372 if ((Val <= 65535) && (Val >= 1)) {
4373 Result = DAG.getTargetConstant(Val, DL, Type);
4374 break;
4375 }
4376 }
4377 return;
4378 }
4379
4380 if (Result.getNode()) {
4381 Ops.push_back(Result);
4382 return;
4383 }
4384
4385 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4386}
4387
4388bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4389 const AddrMode &AM, Type *Ty,
4390 unsigned AS,
4391 Instruction *I) const {
4392 // No global is ever allowed as a base.
4393 if (AM.BaseGV)
4394 return false;
4395
4396 switch (AM.Scale) {
4397 case 0: // "r+i" or just "i", depending on HasBaseReg.
4398 break;
4399 case 1:
4400 if (!AM.HasBaseReg) // allow "r+i".
4401 break;
4402 return false; // disallow "r+r" or "r+r+i".
4403 default:
4404 return false;
4405 }
4406
4407 return true;
4408}
4409
4410bool
4411MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4412 // The Mips target isn't yet aware of offsets.
4413 return false;
4414}
4415
4416EVT MipsTargetLowering::getOptimalMemOpType(
4417 const MemOp &Op, const AttributeList &FuncAttributes) const {
4418 if (Subtarget.hasMips64())
4419 return MVT::i64;
4420
4421 return MVT::i32;
4422}
4423
4424bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4425 bool ForCodeSize) const {
4426 if (VT != MVT::f32 && VT != MVT::f64)
4427 return false;
4428 if (Imm.isNegZero())
4429 return false;
4430 return Imm.isZero();
4431}
4432
4433unsigned MipsTargetLowering::getJumpTableEncoding() const {
4434
4435 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4436 if (ABI.IsN64() && isPositionIndependent())
4438
4440}
4441
4442bool MipsTargetLowering::useSoftFloat() const {
4443 return Subtarget.useSoftFloat();
4444}
4445
4446void MipsTargetLowering::copyByValRegs(
4447 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4448 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4449 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4450 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4451 MipsCCState &State) const {
4453 MachineFrameInfo &MFI = MF.getFrameInfo();
4454 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4455 unsigned NumRegs = LastReg - FirstReg;
4456 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4457 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4458 int FrameObjOffset;
4459 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4460
4461 if (RegAreaSize)
4462 FrameObjOffset =
4464 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4465 else
4466 FrameObjOffset = VA.getLocMemOffset();
4467
4468 // Create frame object.
4469 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4470 // Make the fixed object stored to mutable so that the load instructions
4471 // referencing it have their memory dependencies added.
4472 // Set the frame object as isAliased which clears the underlying objects
4473 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4474 // stores as dependencies for loads referencing this fixed object.
4475 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4476 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4477 InVals.push_back(FIN);
4478
4479 if (!NumRegs)
4480 return;
4481
4482 // Copy arg registers.
4483 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4484 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4485
4486 for (unsigned I = 0; I < NumRegs; ++I) {
4487 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4488 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4489 unsigned Offset = I * GPRSizeInBytes;
4490 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4491 DAG.getConstant(Offset, DL, PtrTy));
4492 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4493 StorePtr, MachinePointerInfo(FuncArg, Offset));
4494 OutChains.push_back(Store);
4495 }
4496}
4497
4498// Copy byVal arg to registers and stack.
4499void MipsTargetLowering::passByValArg(
4500 SDValue Chain, const SDLoc &DL,
4501 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4502 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4503 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4504 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4505 const CCValAssign &VA) const {
4506 unsigned ByValSizeInBytes = Flags.getByValSize();
4507 unsigned OffsetInBytes = 0; // From beginning of struct
4508 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4509 Align Alignment =
4510 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4511 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4512 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4513 unsigned NumRegs = LastReg - FirstReg;
4514
4515 if (NumRegs) {
4517 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4518 unsigned I = 0;
4519
4520 // Copy words to registers.
4521 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4522 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4523 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4524 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4525 MachinePointerInfo(), Alignment);
4526 MemOpChains.push_back(LoadVal.getValue(1));
4527 unsigned ArgReg = ArgRegs[FirstReg + I];
4528 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4529 }
4530
4531 // Return if the struct has been fully copied.
4532 if (ByValSizeInBytes == OffsetInBytes)
4533 return;
4534
4535 // Copy the remainder of the byval argument with sub-word loads and shifts.
4536 if (LeftoverBytes) {
4537 SDValue Val;
4538
4539 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4540 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4541 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4542
4543 if (RemainingSizeInBytes < LoadSizeInBytes)
4544 continue;
4545
4546 // Load subword.
4547 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4548 DAG.getConstant(OffsetInBytes, DL,
4549 PtrTy));
4550 SDValue LoadVal = DAG.getExtLoad(
4551 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4552 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
4553 MemOpChains.push_back(LoadVal.getValue(1));
4554
4555 // Shift the loaded value.
4556 unsigned Shamt;
4557
4558 if (isLittle)
4559 Shamt = TotalBytesLoaded * 8;
4560 else
4561 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4562
4563 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4564 DAG.getConstant(Shamt, DL, MVT::i32));
4565
4566 if (Val.getNode())
4567 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4568 else
4569 Val = Shift;
4570
4571 OffsetInBytes += LoadSizeInBytes;
4572 TotalBytesLoaded += LoadSizeInBytes;
4573 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4574 }
4575
4576 unsigned ArgReg = ArgRegs[FirstReg + I];
4577 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4578 return;
4579 }
4580 }
4581
4582 // Copy remainder of byval arg to it with memcpy.
4583 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4584 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4585 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4586 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4588 Chain = DAG.getMemcpy(
4589 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4590 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4591 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
4592 MemOpChains.push_back(Chain);
4593}
4594
4595void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4596 SDValue Chain, const SDLoc &DL,
4597 SelectionDAG &DAG,
4598 CCState &State) const {
4600 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4601 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4602 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4603 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4605 MachineFrameInfo &MFI = MF.getFrameInfo();
4607
4608 // Offset of the first variable argument from stack pointer.
4609 int VaArgOffset;
4610
4611 if (ArgRegs.size() == Idx)
4612 VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes);
4613 else {
4614 VaArgOffset =
4616 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4617 }
4618
4619 // Record the frame index of the first variable argument
4620 // which is a value necessary to VASTART.
4621 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4622 MipsFI->setVarArgsFrameIndex(FI);
4623
4624 // Copy the integer registers that have not been used for argument passing
4625 // to the argument register save area. For O32, the save area is allocated
4626 // in the caller's stack frame, while for N32/64, it is allocated in the
4627 // callee's stack frame.
4628 for (unsigned I = Idx; I < ArgRegs.size();
4629 ++I, VaArgOffset += RegSizeInBytes) {
4630 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4631 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4632 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4633 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4634 SDValue Store =
4635 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4636 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4637 (Value *)nullptr);
4638 OutChains.push_back(Store);
4639 }
4640}
4641
4643 Align Alignment) const {
4645
4646 assert(Size && "Byval argument's size shouldn't be 0.");
4647
4648 Alignment = std::min(Alignment, TFL->getStackAlign());
4649
4650 unsigned FirstReg = 0;
4651 unsigned NumRegs = 0;
4652
4653 if (State->getCallingConv() != CallingConv::Fast) {
4654 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4656 // FIXME: The O32 case actually describes no shadow registers.
4657 const MCPhysReg *ShadowRegs =
4658 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4659
4660 // We used to check the size as well but we can't do that anymore since
4661 // CCState::HandleByVal() rounds up the size after calling this function.
4662 assert(
4663 Alignment >= Align(RegSizeInBytes) &&
4664 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4665
4666 FirstReg = State->getFirstUnallocated(IntArgRegs);
4667
4668 // If Alignment > RegSizeInBytes, the first arg register must be even.
4669 // FIXME: This condition happens to do the right thing but it's not the
4670 // right way to test it. We want to check that the stack frame offset
4671 // of the register is aligned.
4672 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4673 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4674 ++FirstReg;
4675 }
4676
4677 // Mark the registers allocated.
4678 Size = alignTo(Size, RegSizeInBytes);
4679 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4680 Size -= RegSizeInBytes, ++I, ++NumRegs)
4681 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4682 }
4683
4684 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4685}
4686
4687MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4689 bool isFPCmp,
4690 unsigned Opc) const {
4692 "Subtarget already supports SELECT nodes with the use of"
4693 "conditional-move instructions.");
4694
4695 const TargetInstrInfo *TII =
4697 DebugLoc DL = MI.getDebugLoc();
4698
4699 // To "insert" a SELECT instruction, we actually have to insert the
4700 // diamond control-flow pattern. The incoming instruction knows the
4701 // destination vreg to set, the condition code register to branch on, the
4702 // true/false values to select between, and a branch opcode to use.
4703 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4705
4706 // thisMBB:
4707 // ...
4708 // TrueVal = ...
4709 // setcc r1, r2, r3
4710 // bNE r1, r0, copy1MBB
4711 // fallthrough --> copy0MBB
4712 MachineBasicBlock *thisMBB = BB;
4713 MachineFunction *F = BB->getParent();
4714 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4715 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4716 F->insert(It, copy0MBB);
4717 F->insert(It, sinkMBB);
4718
4719 // Transfer the remainder of BB and its successor edges to sinkMBB.
4720 sinkMBB->splice(sinkMBB->begin(), BB,
4721 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4723
4724 // Next, add the true and fallthrough blocks as its successors.
4725 BB->addSuccessor(copy0MBB);
4726 BB->addSuccessor(sinkMBB);
4727
4728 if (isFPCmp) {
4729 // bc1[tf] cc, sinkMBB
4730 BuildMI(BB, DL, TII->get(Opc))
4731 .addReg(MI.getOperand(1).getReg())
4732 .addMBB(sinkMBB);
4733 } else {
4734 // bne rs, $0, sinkMBB
4735 BuildMI(BB, DL, TII->get(Opc))
4736 .addReg(MI.getOperand(1).getReg())
4737 .addReg(Mips::ZERO)
4738 .addMBB(sinkMBB);
4739 }
4740
4741 // copy0MBB:
4742 // %FalseValue = ...
4743 // # fallthrough to sinkMBB
4744 BB = copy0MBB;
4745
4746 // Update machine-CFG edges
4747 BB->addSuccessor(sinkMBB);
4748
4749 // sinkMBB:
4750 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4751 // ...
4752 BB = sinkMBB;
4753
4754 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4755 .addReg(MI.getOperand(2).getReg())
4756 .addMBB(thisMBB)
4757 .addReg(MI.getOperand(3).getReg())
4758 .addMBB(copy0MBB);
4759
4760 MI.eraseFromParent(); // The pseudo instruction is gone now.
4761
4762 return BB;
4763}
4764
4766MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4767 MachineBasicBlock *BB) const {
4769 "Subtarget already supports SELECT nodes with the use of"
4770 "conditional-move instructions.");
4771
4773 DebugLoc DL = MI.getDebugLoc();
4774
4775 // D_SELECT substitutes two SELECT nodes that goes one after another and
4776 // have the same condition operand. On machines which don't have
4777 // conditional-move instruction, it reduces unnecessary branch instructions
4778 // which are result of using two diamond patterns that are result of two
4779 // SELECT pseudo instructions.
4780 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4782
4783 // thisMBB:
4784 // ...
4785 // TrueVal = ...
4786 // setcc r1, r2, r3
4787 // bNE r1, r0, copy1MBB
4788 // fallthrough --> copy0MBB
4789 MachineBasicBlock *thisMBB = BB;
4790 MachineFunction *F = BB->getParent();
4791 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4792 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4793 F->insert(It, copy0MBB);
4794 F->insert(It, sinkMBB);
4795
4796 // Transfer the remainder of BB and its successor edges to sinkMBB.
4797 sinkMBB->splice(sinkMBB->begin(), BB,
4798 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4800
4801 // Next, add the true and fallthrough blocks as its successors.
4802 BB->addSuccessor(copy0MBB);
4803 BB->addSuccessor(sinkMBB);
4804
4805 // bne rs, $0, sinkMBB
4806 BuildMI(BB, DL, TII->get(Mips::BNE))
4807 .addReg(MI.getOperand(2).getReg())
4808 .addReg(Mips::ZERO)
4809 .addMBB(sinkMBB);
4810
4811 // copy0MBB:
4812 // %FalseValue = ...
4813 // # fallthrough to sinkMBB
4814 BB = copy0MBB;
4815
4816 // Update machine-CFG edges
4817 BB->addSuccessor(sinkMBB);
4818
4819 // sinkMBB:
4820 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4821 // ...
4822 BB = sinkMBB;
4823
4824 // Use two PHI nodes to select two reults
4825 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4826 .addReg(MI.getOperand(3).getReg())
4827 .addMBB(thisMBB)
4828 .addReg(MI.getOperand(5).getReg())
4829 .addMBB(copy0MBB);
4830 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4831 .addReg(MI.getOperand(4).getReg())
4832 .addMBB(thisMBB)
4833 .addReg(MI.getOperand(6).getReg())
4834 .addMBB(copy0MBB);
4835
4836 MI.eraseFromParent(); // The pseudo instruction is gone now.
4837
4838 return BB;
4839}
4840
4841// FIXME? Maybe this could be a TableGen attribute on some registers and
4842// this table could be generated automatically from RegInfo.
4845 const MachineFunction &MF) const {
4846 // The Linux kernel uses $28 and sp.
4847 if (Subtarget.isGP64bit()) {
4849 .Case("$28", Mips::GP_64)
4850 .Case("sp", Mips::SP_64)
4851 .Default(Register());
4852 if (Reg)
4853 return Reg;
4854 } else {
4856 .Case("$28", Mips::GP)
4857 .Case("sp", Mips::SP)
4858 .Default(Register());
4859 if (Reg)
4860 return Reg;
4861 }
4862 report_fatal_error("Invalid register name global variable");
4863}
4864
4865MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4866 MachineBasicBlock *BB) const {
4867 MachineFunction *MF = BB->getParent();
4870 const bool IsLittle = Subtarget.isLittle();
4871 DebugLoc DL = MI.getDebugLoc();
4872
4873 Register Dest = MI.getOperand(0).getReg();
4874 Register Address = MI.getOperand(1).getReg();
4875 unsigned Imm = MI.getOperand(2).getImm();
4876
4878
4880 // Mips release 6 can load from adress that is not naturally-aligned.
4881 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4882 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4883 .addDef(Temp)
4884 .addUse(Address)
4885 .addImm(Imm);
4886 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4887 } else {
4888 // Mips release 5 needs to use instructions that can load from an unaligned
4889 // memory address.
4890 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4891 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4892 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4893 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4894 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4895 .addDef(LoadHalf)
4896 .addUse(Address)
4897 .addImm(Imm + (IsLittle ? 0 : 3))
4898 .addUse(Undef);
4899 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4900 .addDef(LoadFull)
4901 .addUse(Address)
4902 .addImm(Imm + (IsLittle ? 3 : 0))
4903 .addUse(LoadHalf);
4904 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4905 }
4906
4907 MI.eraseFromParent();
4908 return BB;
4909}
4910
4911MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4912 MachineBasicBlock *BB) const {
4913 MachineFunction *MF = BB->getParent();
4916 const bool IsLittle = Subtarget.isLittle();
4917 DebugLoc DL = MI.getDebugLoc();
4918
4919 Register Dest = MI.getOperand(0).getReg();
4920 Register Address = MI.getOperand(1).getReg();
4921 unsigned Imm = MI.getOperand(2).getImm();
4922
4924
4926 // Mips release 6 can load from adress that is not naturally-aligned.
4927 if (Subtarget.isGP64bit()) {
4928 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4929 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4930 .addDef(Temp)
4931 .addUse(Address)
4932 .addImm(Imm);
4933 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4934 } else {
4935 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4936 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4937 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4938 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4939 .addDef(Lo)
4940 .addUse(Address)
4941 .addImm(Imm + (IsLittle ? 0 : 4));
4942 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4943 .addDef(Hi)
4944 .addUse(Address)
4945 .addImm(Imm + (IsLittle ? 4 : 0));
4946 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4947 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4948 .addUse(Wtemp)
4949 .addUse(Hi)
4950 .addImm(1);
4951 }
4952 } else {
4953 // Mips release 5 needs to use instructions that can load from an unaligned
4954 // memory address.
4955 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4956 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4957 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4958 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4959 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4960 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4961 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4962 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4963 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4964 .addDef(LoHalf)
4965 .addUse(Address)
4966 .addImm(Imm + (IsLittle ? 0 : 7))
4967 .addUse(LoUndef);
4968 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4969 .addDef(LoFull)
4970 .addUse(Address)
4971 .addImm(Imm + (IsLittle ? 3 : 4))
4972 .addUse(LoHalf);
4973 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4974 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4975 .addDef(HiHalf)
4976 .addUse(Address)
4977 .addImm(Imm + (IsLittle ? 4 : 3))
4978 .addUse(HiUndef);
4979 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4980 .addDef(HiFull)
4981 .addUse(Address)
4982 .addImm(Imm + (IsLittle ? 7 : 0))
4983 .addUse(HiHalf);
4984 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4985 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4986 .addUse(Wtemp)
4987 .addUse(HiFull)
4988 .addImm(1);
4989 }
4990
4991 MI.eraseFromParent();
4992 return BB;
4993}
4994
4995MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4996 MachineBasicBlock *BB) const {
4997 MachineFunction *MF = BB->getParent();
5000 const bool IsLittle = Subtarget.isLittle();
5001 DebugLoc DL = MI.getDebugLoc();
5002
5003 Register StoreVal = MI.getOperand(0).getReg();
5004 Register Address = MI.getOperand(1).getReg();
5005 unsigned Imm = MI.getOperand(2).getImm();
5006
5008
5010 // Mips release 6 can store to adress that is not naturally-aligned.
5011 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5012 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5013 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
5014 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5015 .addDef(Tmp)
5016 .addUse(BitcastW)
5017 .addImm(0);
5018 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5019 .addUse(Tmp)
5020 .addUse(Address)
5021 .addImm(Imm);
5022 } else {
5023 // Mips release 5 needs to use instructions that can store to an unaligned
5024 // memory address.
5025 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5026 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5027 .addDef(Tmp)
5028 .addUse(StoreVal)
5029 .addImm(0);
5030 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5031 .addUse(Tmp)
5032 .addUse(Address)
5033 .addImm(Imm + (IsLittle ? 0 : 3));
5034 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5035 .addUse(Tmp)
5036 .addUse(Address)
5037 .addImm(Imm + (IsLittle ? 3 : 0));
5038 }
5039
5040 MI.eraseFromParent();
5041
5042 return BB;
5043}
5044
5045MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
5046 MachineBasicBlock *BB) const {
5047 MachineFunction *MF = BB->getParent();
5050 const bool IsLittle = Subtarget.isLittle();
5051 DebugLoc DL = MI.getDebugLoc();
5052
5053 Register StoreVal = MI.getOperand(0).getReg();
5054 Register Address = MI.getOperand(1).getReg();
5055 unsigned Imm = MI.getOperand(2).getImm();
5056
5058
5060 // Mips release 6 can store to adress that is not naturally-aligned.
5061 if (Subtarget.isGP64bit()) {
5062 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
5063 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
5064 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
5065 .addDef(BitcastD)
5066 .addUse(StoreVal);
5067 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
5068 .addDef(Lo)
5069 .addUse(BitcastD)
5070 .addImm(0);
5071 BuildMI(*BB, I, DL, TII->get(Mips::SD))
5072 .addUse(Lo)
5073 .addUse(Address)
5074 .addImm(Imm);
5075 } else {
5076 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5077 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5078 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5079 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
5080 .addDef(BitcastW)
5081 .addUse(StoreVal);
5082 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5083 .addDef(Lo)
5084 .addUse(BitcastW)
5085 .addImm(0);
5086 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5087 .addDef(Hi)
5088 .addUse(BitcastW)
5089 .addImm(1);
5090 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5091 .addUse(Lo)
5092 .addUse(Address)
5093 .addImm(Imm + (IsLittle ? 0 : 4));
5094 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5095 .addUse(Hi)
5096 .addUse(Address)
5097 .addImm(Imm + (IsLittle ? 4 : 0));
5098 }
5099 } else {
5100 // Mips release 5 needs to use instructions that can store to an unaligned
5101 // memory address.
5102 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5103 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5104 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5105 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
5106 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5107 .addDef(Lo)
5108 .addUse(Bitcast)
5109 .addImm(0);
5110 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5111 .addDef(Hi)
5112 .addUse(Bitcast)
5113 .addImm(1);
5114 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5115 .addUse(Lo)
5116 .addUse(Address)
5117 .addImm(Imm + (IsLittle ? 0 : 3));
5118 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5119 .addUse(Lo)
5120 .addUse(Address)
5121 .addImm(Imm + (IsLittle ? 3 : 0));
5122 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5123 .addUse(Hi)
5124 .addUse(Address)
5125 .addImm(Imm + (IsLittle ? 4 : 7));
5126 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5127 .addUse(Hi)
5128 .addUse(Address)
5129 .addImm(Imm + (IsLittle ? 7 : 4));
5130 }
5131
5132 MI.eraseFromParent();
5133 return BB;
5134}
unsigned const MachineRegisterInfo * MRI
static SDValue performSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
If the operand is a bitwise AND with a constant RHS, and the shift has a constant RHS and is the only...
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:282
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
lazy value info
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
cl::opt< bool > EmitJalrReloc
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const MCPhysReg F32Regs[64]
Value * RHS
Value * LHS
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
const T * data() const
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isVarArg() const
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
int64_t getLocMemOffset() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
A debug info location.
Definition: DebugLoc.h:33
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:700
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:279
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:400
bool hasInternalLinkage() const
Definition: GlobalValue.h:527
Class to represent integer types.
Definition: DerivedTypes.h:42
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool IsN64() const
Definition: MipsABIInfo.h:42
ArrayRef< MCPhysReg > GetVarArgRegs() const
The registers to use for the variable argument list.
Definition: MipsABIInfo.cpp:40
bool ArePtrs64bit() const
Definition: MipsABIInfo.h:73
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
Definition: MipsABIInfo.cpp:48
unsigned GetPtrAddiuOp() const
unsigned GetPtrAndOp() const
ArrayRef< MCPhysReg > GetByValArgRegs() const
The registers to use for byval arguments.
Definition: MipsABIInfo.cpp:32
unsigned GetNullPtr() const
Definition: MipsABIInfo.cpp:89
bool IsN32() const
Definition: MipsABIInfo.h:41
bool IsO32() const
Definition: MipsABIInfo.h:40
bool WasOriginalArgVectorFloat(unsigned ValNo) const
Definition: MipsCCState.h:210
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
Definition: MipsCCState.cpp:70
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
bool hasMips32r6() const
bool hasMips4() const
bool hasMips64r2() const
bool isFP64bit() const
bool isLittle() const
bool inMicroMipsMode() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool hasMips64r6() const
bool inMips16Mode() const
bool hasMips64() const
bool hasMips32() const
bool hasSym32() const
bool useXGOT() const
bool inAbs2008Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool isABICalls() const
bool hasCnMips() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool isGP64bit() const
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool hasMips32r2() const
bool isTargetCOFF() const
bool isTargetWindows() const
bool hasMSA() const
bool isSingleFloat() const
bool isABI_O32() const
bool useLongCalls() const
unsigned getGPRSizeInBytes() const
bool inMips16HardFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
const MipsABIInfo & ABI
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue getDllimportVariable(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue getDllimportSymbol(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:751
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:802
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:761
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:828
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:497
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:713
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:498
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:701
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:797
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:492
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
Definition: SelectionDAG.h:510
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:580
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:68
R Default(T Value)
Definition: StringSwitch.h:177
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
TargetOptions Options
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ Entry
Definition: COFF.h:844
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1312
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:498
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1304
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:964
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:141
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ TargetJumpTable
Definition: ISDOpcodes.h:173
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:515
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:522
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1308
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
Definition: ISDOpcodes.h:1044
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1031
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition: ISDOpcodes.h:135
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1217
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ AssertZext
Definition: ISDOpcodes.h:62
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
Definition: ISDOpcodes.h:1211
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1590
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ MO_GOT_CALL
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:44
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
Definition: MipsBaseInfo.h:95
@ MO_GOTTPREL
MO_GOTTPREL - Represents the offset from the thread pointer (Initial.
Definition: MipsBaseInfo.h:69
@ MO_GOT_HI16
MO_GOT_HI16/LO16, MO_CALL_HI16/LO16 - Relocations used for large GOTs.
Definition: MipsBaseInfo.h:89
@ MO_TLSLDM
MO_TLSLDM - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:63
@ MO_TLSGD
MO_TLSGD - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:58
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
Not(const Pred &P) -> Not< Pred >
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:286
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:404
@ Other
Any other memory.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:488
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:295
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:465
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
Definition: ValueTypes.h:59
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
bool isRound() const
Return true if the size is a power-of-two number of bytes.
Definition: ValueTypes.h:243
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
Align getNonZeroOrigAlign() const
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals