LLVM 22.0.0git
XCoreISelLowering.cpp
Go to the documentation of this file.
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the XCoreTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "XCoreISelLowering.h"
14#include "XCore.h"
16#include "XCoreSubtarget.h"
17#include "XCoreTargetMachine.h"
26#include "llvm/IR/CallingConv.h"
27#include "llvm/IR/Constants.h"
29#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsXCore.h"
33#include "llvm/Support/Debug.h"
37#include <algorithm>
38
39using namespace llvm;
40
41#define DEBUG_TYPE "xcore-lower"
42
44 const XCoreSubtarget &Subtarget)
45 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
46
47 // Set up the register classes.
48 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
49
50 // Compute derived properties from the register classes
52
54
56
57 // Use i32 for setcc operations results (slt, sgt, ...).
59 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
60
61 // XCore does not have the NodeTypes below.
64
65 // 64bit
75
76 // Bit Manipulation
81
83
84 // Jump tables.
86
89
90 // Conversion of i64 -> double produces constantpool nodes
92
93 // Loads
94 for (MVT VT : MVT::integer_valuetypes()) {
98
100 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
101 }
102
103 // Custom expand misaligned loads / stores.
106
107 // Varargs
112
113 // Dynamic stack
117
118 // Exception handling
121
123
124 // TRAMPOLINE is custom lowered.
127
128 // We want to custom lower some of our intrinsics.
130
134
135 // We have target-specific dag combine patterns for the following nodes:
138
141
142 // This target doesn't implement native atomics.
144}
145
147 if (Val.getOpcode() != ISD::LOAD)
148 return false;
149
150 EVT VT1 = Val.getValueType();
151 if (!VT1.isSimple() || !VT1.isInteger() ||
152 !VT2.isSimple() || !VT2.isInteger())
153 return false;
154
155 switch (VT1.getSimpleVT().SimpleTy) {
156 default: break;
157 case MVT::i8:
158 return true;
159 }
160
161 return false;
162}
163
166 switch (Op.getOpcode())
167 {
168 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
169 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
170 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
171 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
172 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
173 case ISD::LOAD: return LowerLOAD(Op, DAG);
174 case ISD::STORE: return LowerSTORE(Op, DAG);
175 case ISD::VAARG: return LowerVAARG(Op, DAG);
176 case ISD::VASTART: return LowerVASTART(Op, DAG);
177 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
178 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
179 // FIXME: Remove these when LegalizeDAGTypes lands.
180 case ISD::ADD:
181 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
182 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
183 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
184 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
185 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
186 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
187 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
189 return LowerATOMIC_FENCE(Op, DAG);
190 default:
191 llvm_unreachable("unimplemented operand");
192 }
193}
194
195/// ReplaceNodeResults - Replace the results of node with an illegal result
196/// type with new values built out of custom code.
199 SelectionDAG &DAG) const {
200 switch (N->getOpcode()) {
201 default:
202 llvm_unreachable("Don't know how to custom expand this!");
203 case ISD::ADD:
204 case ISD::SUB:
205 Results.push_back(ExpandADDSUB(N, DAG));
206 return;
207 }
208}
209
210//===----------------------------------------------------------------------===//
211// Misc Lower Operation implementation
212//===----------------------------------------------------------------------===//
213
214SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
215 const GlobalValue *GV,
216 SelectionDAG &DAG) const {
217 // FIXME there is no actual debug info here
218 SDLoc dl(GA);
219
220 if (GV->getValueType()->isFunctionTy())
221 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
222
223 const auto *GVar = dyn_cast<GlobalVariable>(GV);
224 if ((GV->hasSection() && GV->getSection().starts_with(".cp.")) ||
225 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
226 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
227
228 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
229}
230
231static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
233 return true;
234
235 Type *ObjType = GV->getValueType();
236 if (!ObjType->isSized())
237 return false;
238
239 auto &DL = GV->getDataLayout();
240 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
241 return ObjSize < CodeModelLargeSize && ObjSize != 0;
242}
243
244SDValue XCoreTargetLowering::
245LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
246{
247 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
248 const GlobalValue *GV = GN->getGlobal();
249 SDLoc DL(GN);
250 int64_t Offset = GN->getOffset();
251 if (IsSmallObject(GV, *this)) {
252 // We can only fold positive offsets that are a multiple of the word size.
253 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
254 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
255 GA = getGlobalAddressWrapper(GA, GV, DAG);
256 // Handle the rest of the offset.
257 if (Offset != FoldedOffset) {
258 SDValue Remaining =
259 DAG.getSignedConstant(Offset - FoldedOffset, DL, MVT::i32);
260 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
261 }
262 return GA;
263 } else {
264 // Ideally we would not fold in offset with an index <= 11.
265 Type *Ty = Type::getInt32Ty(*DAG.getContext());
266 Constant *Idx = ConstantInt::get(Ty, Offset);
268 Type::getInt8Ty(*DAG.getContext()), const_cast<GlobalValue *>(GV), Idx);
269 SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
270 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
271 DAG.getEntryNode(), CP, MachinePointerInfo());
272 }
273}
274
275SDValue XCoreTargetLowering::
276LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
277{
278 SDLoc DL(Op);
279 auto PtrVT = getPointerTy(DAG.getDataLayout());
280 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
281 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
282
283 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
284}
285
286SDValue XCoreTargetLowering::
287LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
288{
289 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
290 // FIXME there isn't really debug info here
291 SDLoc dl(CP);
292 EVT PtrVT = Op.getValueType();
293 SDValue Res;
294 if (CP->isMachineConstantPoolEntry()) {
295 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
296 CP->getAlign(), CP->getOffset());
297 } else {
298 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
299 CP->getOffset());
300 }
301 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
302}
303
306}
307
308SDValue XCoreTargetLowering::
309LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
310{
311 SDValue Chain = Op.getOperand(0);
312 SDValue Table = Op.getOperand(1);
313 SDValue Index = Op.getOperand(2);
314 SDLoc dl(Op);
315 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
316 unsigned JTI = JT->getIndex();
318 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
319 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
320
321 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
322 if (NumEntries <= 32) {
323 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
324 }
325 assert((NumEntries >> 31) == 0);
326 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
327 DAG.getConstant(1, dl, MVT::i32));
328 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
329 ScaledIndex);
330}
331
332SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
333 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
334 SelectionDAG &DAG) const {
335 auto PtrVT = getPointerTy(DAG.getDataLayout());
336 if ((Offset & 0x3) == 0) {
337 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
338 }
339 // Lower to pair of consecutive word aligned loads plus some bit shifting.
340 int32_t HighOffset = alignTo(Offset, 4);
341 int32_t LowOffset = HighOffset - 4;
342 SDValue LowAddr, HighAddr;
343 if (GlobalAddressSDNode *GASD =
344 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
345 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
346 LowOffset);
347 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
348 HighOffset);
349 } else {
350 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
351 DAG.getConstant(LowOffset, DL, MVT::i32));
352 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
353 DAG.getConstant(HighOffset, DL, MVT::i32));
354 }
355 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
356 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
357
358 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
359 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
360 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
361 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
362 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
363 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
364 High.getValue(1));
365 SDValue Ops[] = { Result, Chain };
366 return DAG.getMergeValues(Ops, DL);
367}
368
370{
371 KnownBits Known = DAG.computeKnownBits(Value);
372 return Known.countMinTrailingZeros() >= 2;
373}
374
375SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
376 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
378 LoadSDNode *LD = cast<LoadSDNode>(Op);
379 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
380 "Unexpected extension type");
381 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
382
384 LD->getMemoryVT(), *LD->getMemOperand()))
385 return SDValue();
386
387 SDValue Chain = LD->getChain();
388 SDValue BasePtr = LD->getBasePtr();
389 SDLoc DL(Op);
390
391 if (!LD->isVolatile()) {
392 const GlobalValue *GV;
393 int64_t Offset = 0;
394 if (DAG.isBaseWithConstantOffset(BasePtr) &&
395 isWordAligned(BasePtr->getOperand(0), DAG)) {
396 SDValue NewBasePtr = BasePtr->getOperand(0);
397 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
398 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
399 Offset, DAG);
400 }
401 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
402 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) {
403 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
404 BasePtr->getValueType(0));
405 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
406 Offset, DAG);
407 }
408 }
409
410 if (LD->getAlign() == Align(2)) {
411 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
412 LD->getPointerInfo(), MVT::i16, Align(2),
413 LD->getMemOperand()->getFlags());
414 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
415 DAG.getConstant(2, DL, MVT::i32));
416 SDValue High =
417 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
418 LD->getPointerInfo().getWithOffset(2), MVT::i16,
419 Align(2), LD->getMemOperand()->getFlags());
420 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
421 DAG.getConstant(16, DL, MVT::i32));
422 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
423 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
424 High.getValue(1));
425 SDValue Ops[] = { Result, Chain };
426 return DAG.getMergeValues(Ops, DL);
427 }
428
429 // Lower to a call to __misaligned_load(BasePtr).
430 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
432 Args.emplace_back(BasePtr, IntPtrTy);
433
435 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
436 CallingConv::C, IntPtrTy,
437 DAG.getExternalSymbol("__misaligned_load",
439 std::move(Args));
440
441 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
442 SDValue Ops[] = { CallResult.first, CallResult.second };
443 return DAG.getMergeValues(Ops, DL);
444}
445
446SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
448 StoreSDNode *ST = cast<StoreSDNode>(Op);
449 assert(!ST->isTruncatingStore() && "Unexpected store type");
450 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
451
453 ST->getMemoryVT(), *ST->getMemOperand()))
454 return SDValue();
455
456 SDValue Chain = ST->getChain();
457 SDValue BasePtr = ST->getBasePtr();
458 SDValue Value = ST->getValue();
459 SDLoc dl(Op);
460
461 if (ST->getAlign() == Align(2)) {
462 SDValue Low = Value;
463 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
464 DAG.getConstant(16, dl, MVT::i32));
465 SDValue StoreLow =
466 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(),
467 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
468 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
469 DAG.getConstant(2, dl, MVT::i32));
470 SDValue StoreHigh = DAG.getTruncStore(
471 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
472 MVT::i16, Align(2), ST->getMemOperand()->getFlags());
473 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
474 }
475
476 // Lower to a call to __misaligned_store(BasePtr, Value).
477 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
479 Args.emplace_back(BasePtr, IntPtrTy);
480 Args.emplace_back(Value, IntPtrTy);
481
483 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
485 DAG.getExternalSymbol("__misaligned_store",
487 std::move(Args));
488
489 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
490 return CallResult.second;
491}
492
493SDValue XCoreTargetLowering::
494LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
495{
496 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
497 "Unexpected operand to lower!");
498 SDLoc dl(Op);
499 SDValue LHS = Op.getOperand(0);
500 SDValue RHS = Op.getOperand(1);
501 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
502 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
503 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
504 LHS, RHS);
505 SDValue Lo(Hi.getNode(), 1);
506 SDValue Ops[] = { Lo, Hi };
507 return DAG.getMergeValues(Ops, dl);
508}
509
510SDValue XCoreTargetLowering::
511LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
512{
513 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
514 "Unexpected operand to lower!");
515 SDLoc dl(Op);
516 SDValue LHS = Op.getOperand(0);
517 SDValue RHS = Op.getOperand(1);
518 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
519 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
520 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
521 Zero, Zero);
522 SDValue Lo(Hi.getNode(), 1);
523 SDValue Ops[] = { Lo, Hi };
524 return DAG.getMergeValues(Ops, dl);
525}
526
527/// isADDADDMUL - Return whether Op is in a form that is equivalent to
528/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
529/// each intermediate result in the calculation must also have a single use.
530/// If the Op is in the correct form the constituent parts are written to Mul0,
531/// Mul1, Addend0 and Addend1.
532static bool
533isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
534 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
535{
536 if (Op.getOpcode() != ISD::ADD)
537 return false;
538 SDValue N0 = Op.getOperand(0);
539 SDValue N1 = Op.getOperand(1);
540 SDValue AddOp;
541 SDValue OtherOp;
542 if (N0.getOpcode() == ISD::ADD) {
543 AddOp = N0;
544 OtherOp = N1;
545 } else if (N1.getOpcode() == ISD::ADD) {
546 AddOp = N1;
547 OtherOp = N0;
548 } else {
549 return false;
550 }
551 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
552 return false;
553 if (OtherOp.getOpcode() == ISD::MUL) {
554 // add(add(a,b),mul(x,y))
555 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
556 return false;
557 Mul0 = OtherOp.getOperand(0);
558 Mul1 = OtherOp.getOperand(1);
559 Addend0 = AddOp.getOperand(0);
560 Addend1 = AddOp.getOperand(1);
561 return true;
562 }
563 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
564 // add(add(mul(x,y),a),b)
565 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
566 return false;
567 Mul0 = AddOp.getOperand(0).getOperand(0);
568 Mul1 = AddOp.getOperand(0).getOperand(1);
569 Addend0 = AddOp.getOperand(1);
570 Addend1 = OtherOp;
571 return true;
572 }
573 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
574 // add(add(a,mul(x,y)),b)
575 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
576 return false;
577 Mul0 = AddOp.getOperand(1).getOperand(0);
578 Mul1 = AddOp.getOperand(1).getOperand(1);
579 Addend0 = AddOp.getOperand(0);
580 Addend1 = OtherOp;
581 return true;
582 }
583 return false;
584}
585
586SDValue XCoreTargetLowering::
587TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
588{
589 SDValue Mul;
591 if (N->getOperand(0).getOpcode() == ISD::MUL) {
592 Mul = N->getOperand(0);
593 Other = N->getOperand(1);
594 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
595 Mul = N->getOperand(1);
596 Other = N->getOperand(0);
597 } else {
598 return SDValue();
599 }
600 SDLoc dl(N);
601 SDValue LL, RL, AddendL, AddendH;
602 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
603 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
604 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
605 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
606 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
607 Other, DAG.getConstant(0, dl, MVT::i32));
608 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
609 Other, DAG.getConstant(1, dl, MVT::i32));
610 APInt HighMask = APInt::getHighBitsSet(64, 32);
611 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
612 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
613 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
614 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
615 // The inputs are both zero-extended.
616 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
617 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
618 AddendL, LL, RL);
619 SDValue Lo(Hi.getNode(), 1);
620 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
621 }
622 if (LHSSB > 32 && RHSSB > 32) {
623 // The inputs are both sign-extended.
624 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
625 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
626 AddendL, LL, RL);
627 SDValue Lo(Hi.getNode(), 1);
628 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
629 }
630 SDValue LH, RH;
631 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
632 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
633 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
634 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
635 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
636 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
637 AddendL, LL, RL);
638 SDValue Lo(Hi.getNode(), 1);
639 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
640 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
641 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
642 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
643 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
644}
645
646SDValue XCoreTargetLowering::
647ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
648{
649 assert(N->getValueType(0) == MVT::i64 &&
650 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
651 "Unknown operand to lower!");
652
653 if (N->getOpcode() == ISD::ADD)
654 if (SDValue Result = TryExpandADDWithMul(N, DAG))
655 return Result;
656
657 SDLoc dl(N);
658
659 // Extract components
660 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
661 N->getOperand(0),
662 DAG.getConstant(0, dl, MVT::i32));
663 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664 N->getOperand(0),
665 DAG.getConstant(1, dl, MVT::i32));
666 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
667 N->getOperand(1),
668 DAG.getConstant(0, dl, MVT::i32));
669 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
670 N->getOperand(1),
671 DAG.getConstant(1, dl, MVT::i32));
672
673 // Expand
674 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
675 XCoreISD::LSUB;
676 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
677 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
678 LHSL, RHSL, Zero);
679 SDValue Carry(Lo.getNode(), 1);
680
681 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
682 LHSH, RHSH, Carry);
683 SDValue Ignored(Hi.getNode(), 1);
684 // Merge the pieces
685 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
686}
687
688SDValue XCoreTargetLowering::
689LowerVAARG(SDValue Op, SelectionDAG &DAG) const
690{
691 // Whist llvm does not support aggregate varargs we can ignore
692 // the possibility of the ValueType being an implicit byVal vararg.
693 SDNode *Node = Op.getNode();
694 EVT VT = Node->getValueType(0); // not an aggregate
695 SDValue InChain = Node->getOperand(0);
696 SDValue VAListPtr = Node->getOperand(1);
697 EVT PtrVT = VAListPtr.getValueType();
698 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
699 SDLoc dl(Node);
700 SDValue VAList =
701 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
702 // Increment the pointer, VAList, to the next vararg
703 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
704 DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
705 dl));
706 // Store the incremented VAList to the legalized pointer
707 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
709 // Load the actual argument out of the pointer VAList
710 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
711}
712
713SDValue XCoreTargetLowering::
714LowerVASTART(SDValue Op, SelectionDAG &DAG) const
715{
716 SDLoc dl(Op);
717 // vastart stores the address of the VarArgsFrameIndex slot into the
718 // memory location argument
721 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
722 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
724}
725
726SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
727 SelectionDAG &DAG) const {
728 // This nodes represent llvm.frameaddress on the DAG.
729 // It takes one operand, the index of the frame address to return.
730 // An index of zero corresponds to the current function's frame address.
731 // An index of one to the parent's frame address, and so on.
732 // Depths > 0 not supported yet!
733 if (Op.getConstantOperandVal(0) > 0)
734 return SDValue();
735
737 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
738 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
739 RegInfo->getFrameRegister(MF), MVT::i32);
740}
741
742SDValue XCoreTargetLowering::
743LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
744 // This nodes represent llvm.returnaddress on the DAG.
745 // It takes one operand, the index of the return address to return.
746 // An index of zero corresponds to the current function's return address.
747 // An index of one to the parent's return address, and so on.
748 // Depths > 0 not supported yet!
749 if (Op.getConstantOperandVal(0) > 0)
750 return SDValue();
751
754 int FI = XFI->createLRSpillSlot(MF);
755 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
756 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
757 DAG.getEntryNode(), FIN,
759}
760
761SDValue XCoreTargetLowering::
762LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
763 // This node represents offset from frame pointer to first on-stack argument.
764 // This is needed for correct stack adjustment during unwind.
765 // However, we don't know the offset until after the frame has be finalised.
766 // This is done during the XCoreFTAOElim pass.
767 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
768}
769
770SDValue XCoreTargetLowering::
771LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
772 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
773 // This node represents 'eh_return' gcc dwarf builtin, which is used to
774 // return from exception. The general meaning is: adjust stack by OFFSET and
775 // pass execution to HANDLER.
777 SDValue Chain = Op.getOperand(0);
778 SDValue Offset = Op.getOperand(1);
779 SDValue Handler = Op.getOperand(2);
780 SDLoc dl(Op);
781
782 // Absolute SP = (FP + FrameToArgs) + Offset
783 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
785 RegInfo->getFrameRegister(MF), MVT::i32);
786 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
787 MVT::i32);
788 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
789 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
790
791 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
792 // which leaves 2 caller saved registers, R2 & R3 for us to use.
793 unsigned StackReg = XCore::R2;
794 unsigned HandlerReg = XCore::R3;
795
796 SDValue OutChains[] = {
797 DAG.getCopyToReg(Chain, dl, StackReg, Stack),
798 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
799 };
800
801 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
802
803 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
804 DAG.getRegister(StackReg, MVT::i32),
805 DAG.getRegister(HandlerReg, MVT::i32));
806
807}
808
809SDValue XCoreTargetLowering::
810LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
811 return Op.getOperand(0);
812}
813
814SDValue XCoreTargetLowering::
815LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
816 SDValue Chain = Op.getOperand(0);
817 SDValue Trmp = Op.getOperand(1); // trampoline
818 SDValue FPtr = Op.getOperand(2); // nested function
819 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
820
821 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
822
823 // .align 4
824 // LDAPF_u10 r11, nest
825 // LDW_2rus r11, r11[0]
826 // STWSP_ru6 r11, sp[0]
827 // LDAPF_u10 r11, fptr
828 // LDW_2rus r11, r11[0]
829 // BAU_1r r11
830 // nest:
831 // .word nest
832 // fptr:
833 // .word fptr
834 SDValue OutChains[5];
835
836 SDValue Addr = Trmp;
837
838 SDLoc dl(Op);
839 OutChains[0] =
840 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
841 MachinePointerInfo(TrmpAddr));
842
843 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
844 DAG.getConstant(4, dl, MVT::i32));
845 OutChains[1] =
846 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
847 MachinePointerInfo(TrmpAddr, 4));
848
849 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
850 DAG.getConstant(8, dl, MVT::i32));
851 OutChains[2] =
852 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
853 MachinePointerInfo(TrmpAddr, 8));
854
855 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
856 DAG.getConstant(12, dl, MVT::i32));
857 OutChains[3] =
858 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
859
860 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
861 DAG.getConstant(16, dl, MVT::i32));
862 OutChains[4] =
863 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
864
865 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
866}
867
868SDValue XCoreTargetLowering::
869LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
870 SDLoc DL(Op);
871 unsigned IntNo = Op.getConstantOperandVal(0);
872 switch (IntNo) {
873 case Intrinsic::xcore_crc8:
874 EVT VT = Op.getValueType();
875 SDValue Data =
876 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
877 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
878 SDValue Crc(Data.getNode(), 1);
879 SDValue Results[] = { Crc, Data };
880 return DAG.getMergeValues(Results, DL);
881 }
882 return SDValue();
883}
884
885SDValue XCoreTargetLowering::
886LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
887 SDLoc DL(Op);
888 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
889}
890
891//===----------------------------------------------------------------------===//
892// Calling Convention Implementation
893//===----------------------------------------------------------------------===//
894
895#include "XCoreGenCallingConv.inc"
896
897//===----------------------------------------------------------------------===//
898// Call Calling Convention Implementation
899//===----------------------------------------------------------------------===//
900
901/// XCore call implementation
903XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
904 SmallVectorImpl<SDValue> &InVals) const {
905 SelectionDAG &DAG = CLI.DAG;
906 SDLoc &dl = CLI.DL;
908 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
910 SDValue Chain = CLI.Chain;
911 SDValue Callee = CLI.Callee;
912 bool &isTailCall = CLI.IsTailCall;
913 CallingConv::ID CallConv = CLI.CallConv;
914 bool isVarArg = CLI.IsVarArg;
915
916 // XCore target does not yet support tail call optimization.
917 isTailCall = false;
918
919 // For now, only CallingConv::C implemented
920 switch (CallConv)
921 {
922 default:
923 report_fatal_error("Unsupported calling convention");
925 case CallingConv::C:
926 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
927 Outs, OutVals, Ins, dl, DAG, InVals);
928 }
929}
930
931/// LowerCallResult - Lower the result values of a call into the
932/// appropriate copies out of appropriate physical registers / memory locations.
934 const SmallVectorImpl<CCValAssign> &RVLocs,
935 const SDLoc &dl, SelectionDAG &DAG,
936 SmallVectorImpl<SDValue> &InVals) {
937 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
938 // Copy results out of physical registers.
939 for (const CCValAssign &VA : RVLocs) {
940 if (VA.isRegLoc()) {
941 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
942 InGlue).getValue(1);
943 InGlue = Chain.getValue(2);
944 InVals.push_back(Chain.getValue(0));
945 } else {
946 assert(VA.isMemLoc());
947 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
948 InVals.size()));
949 // Reserve space for this result.
950 InVals.push_back(SDValue());
951 }
952 }
953
954 // Copy results out of memory.
955 SmallVector<SDValue, 4> MemOpChains;
956 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
957 int offset = ResultMemLocs[i].first;
958 unsigned index = ResultMemLocs[i].second;
959 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
960 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
961 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
962 InVals[index] = load;
963 MemOpChains.push_back(load.getValue(1));
964 }
965
966 // Transform all loads nodes into one single node because
967 // all load nodes are independent of each other.
968 if (!MemOpChains.empty())
969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
970
971 return Chain;
972}
973
974/// LowerCCCCallTo - functions arguments are copied from virtual
975/// regs to (physical regs)/(stack frame), CALLSEQ_START and
976/// CALLSEQ_END are emitted.
977/// TODO: isTailCall, sret.
978SDValue XCoreTargetLowering::LowerCCCCallTo(
979 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
980 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
981 const SmallVectorImpl<SDValue> &OutVals,
982 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
983 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
984
985 // Analyze operands of the call, assigning locations to each operand.
987 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
988 *DAG.getContext());
989
990 // The ABI dictates there should be one stack slot available to the callee
991 // on function entry (for saving lr).
992 CCInfo.AllocateStack(4, Align(4));
993
994 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
995
997 // Analyze return values to determine the number of bytes of stack required.
998 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
999 *DAG.getContext());
1000 RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));
1001 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1002
1003 // Get a count of how many bytes are to be pushed on the stack.
1004 unsigned NumBytes = RetCCInfo.getStackSize();
1005
1006 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1007
1009 SmallVector<SDValue, 12> MemOpChains;
1010
1011 // Walk the register/memloc assignments, inserting copies/loads.
1012 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1013 CCValAssign &VA = ArgLocs[i];
1014 SDValue Arg = OutVals[i];
1015
1016 // Promote the value if needed.
1017 switch (VA.getLocInfo()) {
1018 default: llvm_unreachable("Unknown loc info!");
1019 case CCValAssign::Full: break;
1020 case CCValAssign::SExt:
1021 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1022 break;
1023 case CCValAssign::ZExt:
1024 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1025 break;
1026 case CCValAssign::AExt:
1027 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1028 break;
1029 }
1030
1031 // Arguments that can be passed on register must be kept at
1032 // RegsToPass vector
1033 if (VA.isRegLoc()) {
1034 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1035 } else {
1036 assert(VA.isMemLoc());
1037
1038 int Offset = VA.getLocMemOffset();
1039
1040 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1041 Chain, Arg,
1042 DAG.getConstant(Offset/4, dl,
1043 MVT::i32)));
1044 }
1045 }
1046
1047 // Transform all store nodes into one single node because
1048 // all store nodes are independent of each other.
1049 if (!MemOpChains.empty())
1050 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1051
1052 // Build a sequence of copy-to-reg nodes chained together with token
1053 // chain and flag operands which copy the outgoing args into registers.
1054 // The InGlue in necessary since all emitted instructions must be
1055 // stuck together.
1056 SDValue InGlue;
1057 for (const auto &[Reg, N] : RegsToPass) {
1058 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1059 InGlue = Chain.getValue(1);
1060 }
1061
1062 // If the callee is a GlobalAddress node (quite common, every direct call is)
1063 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1064 // Likewise ExternalSymbol -> TargetExternalSymbol.
1065 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1066 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1067 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1068 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1069
1070 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1071 // = Chain, Callee, Reg#1, Reg#2, ...
1072 //
1073 // Returns a chain & a flag for retval copy to use.
1074 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1076 Ops.push_back(Chain);
1077 Ops.push_back(Callee);
1078
1079 // Add argument registers to the end of the list so that they are
1080 // known live into the call.
1081 for (const auto &[Reg, N] : RegsToPass)
1082 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1083
1084 if (InGlue.getNode())
1085 Ops.push_back(InGlue);
1086
1087 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1088 InGlue = Chain.getValue(1);
1089
1090 // Create the CALLSEQ_END node.
1091 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, dl);
1092 InGlue = Chain.getValue(1);
1093
1094 // Handle result values, copying them out of physregs into vregs that we
1095 // return.
1096 return LowerCallResult(Chain, InGlue, RVLocs, dl, DAG, InVals);
1097}
1098
1099//===----------------------------------------------------------------------===//
1100// Formal Arguments Calling Convention Implementation
1101//===----------------------------------------------------------------------===//
1102
1103namespace {
1104 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1105}
1106
1107/// XCore formal arguments implementation
1108SDValue XCoreTargetLowering::LowerFormalArguments(
1109 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1110 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1111 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1112 switch (CallConv)
1113 {
1114 default:
1115 report_fatal_error("Unsupported calling convention");
1116 case CallingConv::C:
1117 case CallingConv::Fast:
1118 return LowerCCCArguments(Chain, CallConv, isVarArg,
1119 Ins, dl, DAG, InVals);
1120 }
1121}
1122
1123/// LowerCCCArguments - transform physical registers into
1124/// virtual registers and generate load operations for
1125/// arguments places on the stack.
1126/// TODO: sret
1127SDValue XCoreTargetLowering::LowerCCCArguments(
1128 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1129 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1130 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1132 MachineFrameInfo &MFI = MF.getFrameInfo();
1135
1136 // Assign locations to all of the incoming arguments.
1138 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1139 *DAG.getContext());
1140
1141 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1142
1143 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1144
1145 unsigned LRSaveSize = StackSlotSize;
1146
1147 if (!isVarArg)
1148 XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize);
1149
1150 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1151 // scheduler clobbering a register before it has been copied.
1152 // The stages are:
1153 // 1. CopyFromReg (and load) arg & vararg registers.
1154 // 2. Chain CopyFromReg nodes into a TokenFactor.
1155 // 3. Memcpy 'byVal' args & push final InVals.
1156 // 4. Chain mem ops nodes into a TokenFactor.
1157 SmallVector<SDValue, 4> CFRegNode;
1160
1161 // 1a. CopyFromReg (and load) arg registers.
1162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1163
1164 CCValAssign &VA = ArgLocs[i];
1165 SDValue ArgIn;
1166
1167 if (VA.isRegLoc()) {
1168 // Arguments passed in registers
1169 EVT RegVT = VA.getLocVT();
1170 switch (RegVT.getSimpleVT().SimpleTy) {
1171 default:
1172 {
1173#ifndef NDEBUG
1174 errs() << "LowerFormalArguments Unhandled argument type: "
1175 << RegVT << "\n";
1176#endif
1177 llvm_unreachable(nullptr);
1178 }
1179 case MVT::i32:
1180 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1181 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1182 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1183 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1184 }
1185 } else {
1186 // Only arguments passed on the stack should make it here.
1187 assert(VA.isMemLoc());
1188 // Load the argument to a virtual register
1189 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1190 if (ObjSize > StackSlotSize) {
1191 errs() << "LowerFormalArguments Unhandled argument type: "
1192 << VA.getLocVT() << "\n";
1193 }
1194 // Create the frame index object for this incoming parameter...
1195 int FI = MFI.CreateFixedObject(ObjSize,
1196 LRSaveSize + VA.getLocMemOffset(),
1197 true);
1198
1199 // Create the SelectionDAG nodes corresponding to a load
1200 //from this parameter
1201 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1202 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1204 }
1205 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1206 ArgData.push_back(ADP);
1207 }
1208
1209 // 1b. CopyFromReg vararg registers.
1210 if (isVarArg) {
1211 // Argument registers
1212 static const MCPhysReg ArgRegs[] = {
1213 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1214 };
1216 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1217 if (FirstVAReg < std::size(ArgRegs)) {
1218 int offset = 0;
1219 // Save remaining registers, storing higher register numbers at a higher
1220 // address
1221 for (int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1222 // Create a stack slot
1223 int FI = MFI.CreateFixedObject(4, offset, true);
1224 if (i == (int)FirstVAReg) {
1225 XFI->setVarArgsFrameIndex(FI);
1226 }
1227 offset -= StackSlotSize;
1228 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1229 // Move argument from phys reg -> virt reg
1230 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1231 RegInfo.addLiveIn(ArgRegs[i], VReg);
1232 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1233 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1234 // Move argument from virt reg -> stack
1235 SDValue Store =
1236 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1237 MemOps.push_back(Store);
1238 }
1239 } else {
1240 // This will point to the next argument passed via stack.
1242 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true));
1243 }
1244 }
1245
1246 // 2. chain CopyFromReg nodes into a TokenFactor.
1247 if (!CFRegNode.empty())
1248 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1249
1250 // 3. Memcpy 'byVal' args & push final InVals.
1251 // Aggregates passed "byVal" need to be copied by the callee.
1252 // The callee will use a pointer to this copy, rather than the original
1253 // pointer.
1254 for (const ArgDataPair &ArgDI : ArgData) {
1255 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1256 unsigned Size = ArgDI.Flags.getByValSize();
1257 Align Alignment =
1258 std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
1259 // Create a new object on the stack and copy the pointee into it.
1260 int FI = MFI.CreateStackObject(Size, Alignment, false);
1261 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1262 InVals.push_back(FIN);
1263 MemOps.push_back(DAG.getMemcpy(
1264 Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),
1265 Alignment, false, false, /*CI=*/nullptr, std::nullopt,
1267 } else {
1268 InVals.push_back(ArgDI.SDV);
1269 }
1270 }
1271
1272 // 4, chain mem ops nodes into a TokenFactor.
1273 if (!MemOps.empty()) {
1274 MemOps.push_back(Chain);
1275 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1276 }
1277
1278 return Chain;
1279}
1280
1281//===----------------------------------------------------------------------===//
1282// Return Value Calling Convention Implementation
1283//===----------------------------------------------------------------------===//
1284
1285bool XCoreTargetLowering::
1286CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1287 bool isVarArg,
1289 LLVMContext &Context, const Type *RetTy) const {
1291 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1292 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1293 return false;
1294 if (CCInfo.getStackSize() != 0 && isVarArg)
1295 return false;
1296 return true;
1297}
1298
1299SDValue
1300XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1301 bool isVarArg,
1303 const SmallVectorImpl<SDValue> &OutVals,
1304 const SDLoc &dl, SelectionDAG &DAG) const {
1305
1306 XCoreFunctionInfo *XFI =
1309
1310 // CCValAssign - represent the assignment of
1311 // the return value to a location
1313
1314 // CCState - Info about the registers and stack slot.
1315 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1316 *DAG.getContext());
1317
1318 // Analyze return values.
1319 if (!isVarArg)
1320 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
1321
1322 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1323
1324 SDValue Glue;
1325 SmallVector<SDValue, 4> RetOps(1, Chain);
1326
1327 // Return on XCore is always a "retsp 0"
1328 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1329
1330 SmallVector<SDValue, 4> MemOpChains;
1331 // Handle return values that must be copied to memory.
1332 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1333 CCValAssign &VA = RVLocs[i];
1334 if (VA.isRegLoc())
1335 continue;
1336 assert(VA.isMemLoc());
1337 if (isVarArg) {
1338 report_fatal_error("Can't return value from vararg function in memory");
1339 }
1340
1341 int Offset = VA.getLocMemOffset();
1342 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1343 // Create the frame index object for the memory location.
1344 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1345
1346 // Create a SelectionDAG node corresponding to a store
1347 // to this memory location.
1348 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1349 MemOpChains.push_back(DAG.getStore(
1350 Chain, dl, OutVals[i], FIN,
1352 }
1353
1354 // Transform all store nodes into one single node because
1355 // all stores are independent of each other.
1356 if (!MemOpChains.empty())
1357 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1358
1359 // Now handle return values copied to registers.
1360 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1361 CCValAssign &VA = RVLocs[i];
1362 if (!VA.isRegLoc())
1363 continue;
1364 // Copy the result values into the output registers.
1365 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1366
1367 // guarantee that all emitted copies are
1368 // stuck together, avoiding something bad
1369 Glue = Chain.getValue(1);
1370 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1371 }
1372
1373 RetOps[0] = Chain; // Update chain.
1374
1375 // Add the glue if we have it.
1376 if (Glue.getNode())
1377 RetOps.push_back(Glue);
1378
1379 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1380}
1381
1382//===----------------------------------------------------------------------===//
1383// Other Lowering Code
1384//===----------------------------------------------------------------------===//
1385
1388 MachineBasicBlock *BB) const {
1389 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1390 DebugLoc dl = MI.getDebugLoc();
1391 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1392 "Unexpected instr type to insert");
1393
1394 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1395 // control-flow pattern. The incoming instruction knows the destination vreg
1396 // to set, the condition code register to branch on, the true/false values to
1397 // select between, and a branch opcode to use.
1398 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1400
1401 // thisMBB:
1402 // ...
1403 // TrueVal = ...
1404 // cmpTY ccX, r1, r2
1405 // bCC copy1MBB
1406 // fallthrough --> copy0MBB
1407 MachineBasicBlock *thisMBB = BB;
1408 MachineFunction *F = BB->getParent();
1409 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1410 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1411 F->insert(It, copy0MBB);
1412 F->insert(It, sinkMBB);
1413
1414 // Transfer the remainder of BB and its successor edges to sinkMBB.
1415 sinkMBB->splice(sinkMBB->begin(), BB,
1416 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1418
1419 // Next, add the true and fallthrough blocks as its successors.
1420 BB->addSuccessor(copy0MBB);
1421 BB->addSuccessor(sinkMBB);
1422
1423 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1424 .addReg(MI.getOperand(1).getReg())
1425 .addMBB(sinkMBB);
1426
1427 // copy0MBB:
1428 // %FalseValue = ...
1429 // # fallthrough to sinkMBB
1430 BB = copy0MBB;
1431
1432 // Update machine-CFG edges
1433 BB->addSuccessor(sinkMBB);
1434
1435 // sinkMBB:
1436 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1437 // ...
1438 BB = sinkMBB;
1439 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1440 .addReg(MI.getOperand(3).getReg())
1441 .addMBB(copy0MBB)
1442 .addReg(MI.getOperand(2).getReg())
1443 .addMBB(thisMBB);
1444
1445 MI.eraseFromParent(); // The pseudo instruction is gone now.
1446 return BB;
1447}
1448
1449//===----------------------------------------------------------------------===//
1450// Target Optimization Hooks
1451//===----------------------------------------------------------------------===//
1452
1453SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1454 DAGCombinerInfo &DCI) const {
1455 SelectionDAG &DAG = DCI.DAG;
1456 SDLoc dl(N);
1457 switch (N->getOpcode()) {
1458 default: break;
1460 switch (N->getConstantOperandVal(1)) {
1461 case Intrinsic::xcore_outt:
1462 case Intrinsic::xcore_outct:
1463 case Intrinsic::xcore_chkct: {
1464 SDValue OutVal = N->getOperand(3);
1465 // These instructions ignore the high bits.
1466 if (OutVal.hasOneUse()) {
1467 unsigned BitWidth = OutVal.getValueSizeInBits();
1468 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1469 KnownBits Known;
1470 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1471 !DCI.isBeforeLegalizeOps());
1472 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1473 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1474 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1475 DCI.CommitTargetLoweringOpt(TLO);
1476 }
1477 break;
1478 }
1479 case Intrinsic::xcore_setpt: {
1480 SDValue Time = N->getOperand(3);
1481 // This instruction ignores the high bits.
1482 if (Time.hasOneUse()) {
1483 unsigned BitWidth = Time.getValueSizeInBits();
1484 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1485 KnownBits Known;
1486 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1487 !DCI.isBeforeLegalizeOps());
1488 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1489 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1490 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1491 DCI.CommitTargetLoweringOpt(TLO);
1492 }
1493 break;
1494 }
1495 }
1496 break;
1497 case XCoreISD::LADD: {
1498 SDValue N0 = N->getOperand(0);
1499 SDValue N1 = N->getOperand(1);
1500 SDValue N2 = N->getOperand(2);
1501 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1502 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1503 EVT VT = N0.getValueType();
1504
1505 // canonicalize constant to RHS
1506 if (N0C && !N1C)
1507 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1508
1509 // fold (ladd 0, 0, x) -> 0, x & 1
1510 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1511 SDValue Carry = DAG.getConstant(0, dl, VT);
1512 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1513 DAG.getConstant(1, dl, VT));
1514 SDValue Ops[] = { Result, Carry };
1515 return DAG.getMergeValues(Ops, dl);
1516 }
1517
1518 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1519 // low bit set
1520 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1522 VT.getSizeInBits() - 1);
1523 KnownBits Known = DAG.computeKnownBits(N2);
1524 if ((Known.Zero & Mask) == Mask) {
1525 SDValue Carry = DAG.getConstant(0, dl, VT);
1526 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1527 SDValue Ops[] = { Result, Carry };
1528 return DAG.getMergeValues(Ops, dl);
1529 }
1530 }
1531 }
1532 break;
1533 case XCoreISD::LSUB: {
1534 SDValue N0 = N->getOperand(0);
1535 SDValue N1 = N->getOperand(1);
1536 SDValue N2 = N->getOperand(2);
1537 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1538 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1539 EVT VT = N0.getValueType();
1540
1541 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1542 if (N0C && N0C->isZero() && N1C && N1C->isZero()) {
1544 VT.getSizeInBits() - 1);
1545 KnownBits Known = DAG.computeKnownBits(N2);
1546 if ((Known.Zero & Mask) == Mask) {
1547 SDValue Borrow = N2;
1548 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1549 DAG.getConstant(0, dl, VT), N2);
1550 SDValue Ops[] = { Result, Borrow };
1551 return DAG.getMergeValues(Ops, dl);
1552 }
1553 }
1554
1555 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1556 // low bit set
1557 if (N1C && N1C->isZero() && N->hasNUsesOfValue(0, 1)) {
1559 VT.getSizeInBits() - 1);
1560 KnownBits Known = DAG.computeKnownBits(N2);
1561 if ((Known.Zero & Mask) == Mask) {
1562 SDValue Borrow = DAG.getConstant(0, dl, VT);
1563 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1564 SDValue Ops[] = { Result, Borrow };
1565 return DAG.getMergeValues(Ops, dl);
1566 }
1567 }
1568 }
1569 break;
1570 case XCoreISD::LMUL: {
1571 SDValue N0 = N->getOperand(0);
1572 SDValue N1 = N->getOperand(1);
1573 SDValue N2 = N->getOperand(2);
1574 SDValue N3 = N->getOperand(3);
1575 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1576 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1577 EVT VT = N0.getValueType();
1578 // Canonicalize multiplicative constant to RHS. If both multiplicative
1579 // operands are constant canonicalize smallest to RHS.
1580 if ((N0C && !N1C) ||
1581 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1582 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1583 N1, N0, N2, N3);
1584
1585 // lmul(x, 0, a, b)
1586 if (N1C && N1C->isZero()) {
1587 // If the high result is unused fold to add(a, b)
1588 if (N->hasNUsesOfValue(0, 0)) {
1589 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1590 SDValue Ops[] = { Lo, Lo };
1591 return DAG.getMergeValues(Ops, dl);
1592 }
1593 // Otherwise fold to ladd(a, b, 0)
1594 SDValue Result =
1595 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1596 SDValue Carry(Result.getNode(), 1);
1597 SDValue Ops[] = { Carry, Result };
1598 return DAG.getMergeValues(Ops, dl);
1599 }
1600 }
1601 break;
1602 case ISD::ADD: {
1603 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1604 // lmul(x, y, a, b). The high result of lmul will be ignored.
1605 // This is only profitable if the intermediate results are unused
1606 // elsewhere.
1607 SDValue Mul0, Mul1, Addend0, Addend1;
1608 if (N->getValueType(0) == MVT::i32 &&
1609 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1610 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1611 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1612 Mul1, Addend0, Addend1);
1613 SDValue Result(Ignored.getNode(), 1);
1614 return Result;
1615 }
1616 APInt HighMask = APInt::getHighBitsSet(64, 32);
1617 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1618 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1619 // before type legalization as it is messy to match the operands after
1620 // that.
1621 if (N->getValueType(0) == MVT::i64 &&
1622 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1623 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1624 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1625 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1626 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1627 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1628 Mul0, DAG.getConstant(0, dl, MVT::i32));
1629 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1630 Mul1, DAG.getConstant(0, dl, MVT::i32));
1631 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1632 Addend0, DAG.getConstant(0, dl, MVT::i32));
1633 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1634 Addend1, DAG.getConstant(0, dl, MVT::i32));
1635 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1636 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1637 Addend0L, Addend1L);
1638 SDValue Lo(Hi.getNode(), 1);
1639 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1640 }
1641 }
1642 break;
1643 case ISD::STORE: {
1644 // Replace unaligned store of unaligned load with memmove.
1645 StoreSDNode *ST = cast<StoreSDNode>(N);
1646 if (!DCI.isBeforeLegalize() ||
1648 ST->getMemoryVT(),
1649 *ST->getMemOperand()) ||
1650 ST->isVolatile() || ST->isIndexed()) {
1651 break;
1652 }
1653 SDValue Chain = ST->getChain();
1654
1655 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1656 assert((StoreBits % 8) == 0 &&
1657 "Store size in bits must be a multiple of 8");
1658 Align Alignment = ST->getAlign();
1659
1660 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1661 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1662 LD->getAlign() == Alignment &&
1663 !LD->isVolatile() && !LD->isIndexed() &&
1665 bool isTail = isInTailCallPosition(DAG, ST, Chain);
1666 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),
1667 DAG.getConstant(StoreBits / 8, dl, MVT::i32),
1668 Alignment, false, nullptr, isTail,
1669 ST->getPointerInfo(), LD->getPointerInfo());
1670 }
1671 }
1672 break;
1673 }
1674 }
1675 return SDValue();
1676}
1677
1678void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1679 KnownBits &Known,
1680 const APInt &DemandedElts,
1681 const SelectionDAG &DAG,
1682 unsigned Depth) const {
1683 Known.resetAll();
1684 switch (Op.getOpcode()) {
1685 default: break;
1686 case XCoreISD::LADD:
1687 case XCoreISD::LSUB:
1688 if (Op.getResNo() == 1) {
1689 // Top bits of carry / borrow are clear.
1690 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1691 Known.getBitWidth() - 1);
1692 }
1693 break;
1695 {
1696 unsigned IntNo = Op.getConstantOperandVal(1);
1697 switch (IntNo) {
1698 case Intrinsic::xcore_getts:
1699 // High bits are known to be zero.
1700 Known.Zero =
1701 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 16);
1702 break;
1703 case Intrinsic::xcore_int:
1704 case Intrinsic::xcore_inct:
1705 // High bits are known to be zero.
1706 Known.Zero =
1707 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 8);
1708 break;
1709 case Intrinsic::xcore_testct:
1710 // Result is either 0 or 1.
1711 Known.Zero =
1712 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 1);
1713 break;
1714 case Intrinsic::xcore_testwct:
1715 // Result is in the range 0 - 4.
1716 Known.Zero =
1717 APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 3);
1718 break;
1719 }
1720 }
1721 break;
1722 }
1723}
1724
1725//===----------------------------------------------------------------------===//
1726// Addressing mode description hooks
1727//===----------------------------------------------------------------------===//
1728
1729static inline bool isImmUs(int64_t val)
1730{
1731 return (val >= 0 && val <= 11);
1732}
1733
1734static inline bool isImmUs2(int64_t val)
1735{
1736 return (val%2 == 0 && isImmUs(val/2));
1737}
1738
1739static inline bool isImmUs4(int64_t val)
1740{
1741 return (val%4 == 0 && isImmUs(val/4));
1742}
1743
1744/// isLegalAddressingMode - Return true if the addressing mode represented
1745/// by AM is legal for this target, for a load/store of the specified type.
1747 const AddrMode &AM, Type *Ty,
1748 unsigned AS,
1749 Instruction *I) const {
1750 if (Ty->getTypeID() == Type::VoidTyID)
1751 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1752
1753 unsigned Size = DL.getTypeAllocSize(Ty);
1754 if (AM.BaseGV) {
1755 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1756 AM.BaseOffs%4 == 0;
1757 }
1758
1759 switch (Size) {
1760 case 1:
1761 // reg + imm
1762 if (AM.Scale == 0) {
1763 return isImmUs(AM.BaseOffs);
1764 }
1765 // reg + reg
1766 return AM.Scale == 1 && AM.BaseOffs == 0;
1767 case 2:
1768 case 3:
1769 // reg + imm
1770 if (AM.Scale == 0) {
1771 return isImmUs2(AM.BaseOffs);
1772 }
1773 // reg + reg<<1
1774 return AM.Scale == 2 && AM.BaseOffs == 0;
1775 default:
1776 // reg + imm
1777 if (AM.Scale == 0) {
1778 return isImmUs4(AM.BaseOffs);
1779 }
1780 // reg + reg<<2
1781 return AM.Scale == 4 && AM.BaseOffs == 0;
1782 }
1783}
1784
1785//===----------------------------------------------------------------------===//
1786// XCore Inline Assembly Support
1787//===----------------------------------------------------------------------===//
1788
1789std::pair<unsigned, const TargetRegisterClass *>
1790XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1791 StringRef Constraint,
1792 MVT VT) const {
1793 if (Constraint.size() == 1) {
1794 switch (Constraint[0]) {
1795 default : break;
1796 case 'r':
1797 return std::make_pair(0U, &XCore::GRRegsRegClass);
1798 }
1799 }
1800 // Use the default implementation in TargetLowering to convert the register
1801 // constraint into a member of a register class.
1802 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1803}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Mark last scratch load
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
Register const TargetRegisterInfo * TRI
uint64_t High
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
static bool isImmUs(int64_t val)
static bool isImmUs4(int64_t val)
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
static bool isImmUs2(int64_t val)
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:296
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
The address of a basic block.
Definition: Constants.h:899
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
int64_t getLocMemOffset() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1274
uint64_t getZExtValue() const
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Definition: DataLayout.cpp:850
A debug info location.
Definition: DebugLoc.h:124
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:530
LLVM_ABI StringRef getSection() const
Definition: Globals.cpp:191
bool hasSection() const
Definition: GlobalValue.h:292
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition: Globals.cpp:132
Type * getValueType() const
Definition: GlobalValue.h:298
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:72
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:229
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:813
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:504
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:839
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:498
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:808
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:493
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:511
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:777
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:581
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:269
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
@ VoidTyID
type with no size
Definition: Type.h:63
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:258
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
LLVM Value Representation.
Definition: Value.h:75
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:953
static int stackSlotSize()
Stack slot size (4 bytes)
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
void setReturnStackOffset(unsigned value)
int createLRSpillSlot(MachineFunction &MF)
const TargetRegisterInfo * getRegisterInfo() const override
const XCoreInstrInfo * getInstrInfo() const override
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
self_iterator getIterator()
Definition: ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1236
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1232
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:270
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1265
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:140
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:259
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1141
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:835
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:215
@ GlobalAddress
Definition: ISDOpcodes.h:88
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1338
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1343
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:249
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:1309
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:151
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:826
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1187
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1166
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:242
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1261
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:695
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:756
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:832
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:793
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1151
@ ConstantPool
Definition: ISDOpcodes.h:92
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:110
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:730
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1318
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:200
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:53
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1256
@ BlockAddress
Definition: ISDOpcodes.h:94
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:815
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:1315
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:208
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const unsigned CodeModelLargeSize
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:235
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:74
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...