LLVM 22.0.0git
M68kISelLowering.cpp
Go to the documentation of this file.
1//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the interfaces that M68k uses to lower LLVM code into a
11/// selection DAG.
12///
13//===----------------------------------------------------------------------===//
14
15#include "M68kISelLowering.h"
16#include "M68kCallingConv.h"
17#include "M68kMachineFunction.h"
19#include "M68kSubtarget.h"
20#include "M68kTargetMachine.h"
23
24#include "llvm/ADT/Statistic.h"
33#include "llvm/IR/CallingConv.h"
37#include "llvm/Support/Debug.h"
41
42using namespace llvm;
43
44#define DEBUG_TYPE "M68k-isel"
45
46STATISTIC(NumTailCalls, "Number of tail calls");
47
49 const M68kSubtarget &STI)
50 : TargetLowering(TM), Subtarget(STI), TM(TM) {
51
52 MVT PtrVT = MVT::i32;
53
54 // This is based on M68k SetCC (scc) setting the destination byte to all 1s.
55 // See also getSetCCResultType().
57
58 auto *RegInfo = Subtarget.getRegisterInfo();
59 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
60
61 // Set up the register classes.
62 addRegisterClass(MVT::i8, &M68k::DR8RegClass);
63 addRegisterClass(MVT::i16, &M68k::XR16RegClass);
64 addRegisterClass(MVT::i32, &M68k::XR32RegClass);
65
66 for (auto VT : MVT::integer_valuetypes()) {
70 }
71
72 // We don't accept any truncstore of integer registers.
73 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
74 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
75 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
76 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
77 setTruncStoreAction(MVT::i32, MVT::i8, Expand);
78 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
79
82 if (Subtarget.atLeastM68020())
84 else
87
88 for (auto OP :
91 setOperationAction(OP, MVT::i8, Promote);
92 setOperationAction(OP, MVT::i16, Legal);
93 setOperationAction(OP, MVT::i32, LibCall);
94 }
95
96 for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
97 setOperationAction(OP, MVT::i8, Expand);
98 setOperationAction(OP, MVT::i16, Expand);
99 }
100
101 for (auto OP : {ISD::SMULO, ISD::UMULO}) {
102 setOperationAction(OP, MVT::i8, Custom);
103 setOperationAction(OP, MVT::i16, Custom);
104 setOperationAction(OP, MVT::i32, Custom);
105 }
106
108 setOperationAction(OP, MVT::i32, Custom);
109
110 // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
111 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
116 }
117
118 // SADDO and friends are legal with this setup, i hope
119 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
124 }
125
128
129 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
135 }
136
137 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
142 }
143
150
155
158
160
162
163 // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
164 // for subtarget < M68020
166 setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32},
167 Subtarget.atLeastM68020() ? Legal : LibCall);
168
170
171 // M68k does not have native read-modify-write support, so expand all of them
172 // to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
173 // See `shouldExpandAtomicRMWInIR` below.
175 {
187 },
188 {MVT::i8, MVT::i16, MVT::i32}, LibCall);
189
191}
192
195 return Subtarget.atLeastM68020()
198}
199
202 return M68k::D0;
203}
204
207 return M68k::D1;
208}
209
212 return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
214 // We borrow ConstraintCode::Um for 'U'.
217}
218
220 LLVMContext &Context, EVT VT) const {
221 // M68k SETcc producess either 0x00 or 0xFF
222 return MVT::i8;
223}
224
226 EVT Ty) const {
227 if (Ty.isSimple()) {
228 return Ty.getSimpleVT();
229 }
230 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
231}
232
233#include "M68kGenCallingConv.inc"
234
236
237static StructReturnType
239 if (Outs.empty())
240 return NotStructReturn;
241
242 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
243 if (!Flags.isSRet())
244 return NotStructReturn;
245 if (Flags.isInReg())
246 return RegStructReturn;
247 return StackStructReturn;
248}
249
250/// Determines whether a function uses struct return semantics.
251static StructReturnType
253 if (Ins.empty())
254 return NotStructReturn;
255
256 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
257 if (!Flags.isSRet())
258 return NotStructReturn;
259 if (Flags.isInReg())
260 return RegStructReturn;
261 return StackStructReturn;
262}
263
264/// Make a copy of an aggregate at address specified by "Src" to address
265/// "Dst" with size and alignment information specified by the specific
266/// parameter attribute. The copy will be passed as a byval function parameter.
268 SDValue Chain, ISD::ArgFlagsTy Flags,
269 SelectionDAG &DAG, const SDLoc &DL) {
270 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
271
272 return DAG.getMemcpy(
273 Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
274 /*isVolatile=*/false, /*AlwaysInline=*/true,
275 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
276}
277
278/// Return true if the calling convention is one that we can guarantee TCO for.
279static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
280
281/// Return true if we might ever do TCO for calls with this calling convention.
283 switch (CC) {
284 // C calling conventions:
285 case CallingConv::C:
286 return true;
287 default:
288 return canGuaranteeTCO(CC);
289 }
290}
291
292/// Return true if the function is being made into a tailcall target by
293/// changing its ABI.
294static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
295 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
296}
297
298/// Return true if the given stack call argument is already available in the
299/// same position (relatively) of the caller's incoming argument stack.
300static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
303 const M68kInstrInfo *TII,
304 const CCValAssign &VA) {
305 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
306
307 for (;;) {
308 // Look through nodes that don't alter the bits of the incoming value.
309 unsigned Op = Arg.getOpcode();
311 Arg = Arg.getOperand(0);
312 continue;
313 }
314 if (Op == ISD::TRUNCATE) {
315 const SDValue &TruncInput = Arg.getOperand(0);
316 if (TruncInput.getOpcode() == ISD::AssertZext &&
317 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
318 Arg.getValueType()) {
319 Arg = TruncInput.getOperand(0);
320 continue;
321 }
322 }
323 break;
324 }
325
326 int FI = INT_MAX;
327 if (Arg.getOpcode() == ISD::CopyFromReg) {
328 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
330 return false;
331 MachineInstr *Def = MRI->getVRegDef(VR);
332 if (!Def)
333 return false;
334 if (!Flags.isByVal()) {
335 if (!TII->isLoadFromStackSlot(*Def, FI))
336 return false;
337 } else {
338 unsigned Opcode = Def->getOpcode();
339 if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
340 Def->getOperand(1).isFI()) {
341 FI = Def->getOperand(1).getIndex();
342 Bytes = Flags.getByValSize();
343 } else
344 return false;
345 }
346 } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
347 if (Flags.isByVal())
348 // ByVal argument is passed in as a pointer but it's now being
349 // dereferenced. e.g.
350 // define @foo(%struct.X* %A) {
351 // tail call @bar(%struct.X* byval %A)
352 // }
353 return false;
354 SDValue Ptr = Ld->getBasePtr();
355 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
356 if (!FINode)
357 return false;
358 FI = FINode->getIndex();
359 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
360 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
361 FI = FINode->getIndex();
362 Bytes = Flags.getByValSize();
363 } else
364 return false;
365
366 assert(FI != INT_MAX);
367 if (!MFI.isFixedObjectIndex(FI))
368 return false;
369
370 if (Offset != MFI.getObjectOffset(FI))
371 return false;
372
373 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
374 // If the argument location is wider than the argument type, check that any
375 // extension flags match.
376 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
377 Flags.isSExt() != MFI.isObjectSExt(FI)) {
378 return false;
379 }
380 }
381
382 return Bytes == MFI.getObjectSize(FI);
383}
384
386M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
389 int ReturnAddrIndex = FuncInfo->getRAIndex();
390
391 if (ReturnAddrIndex == 0) {
392 // Set up a frame object for the return address.
393 unsigned SlotSize = Subtarget.getSlotSize();
394 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
395 SlotSize, -(int64_t)SlotSize, false);
396 FuncInfo->setRAIndex(ReturnAddrIndex);
397 }
398
399 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
400}
401
402SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
403 SDValue &OutRetAddr,
404 SDValue Chain,
405 bool IsTailCall, int FPDiff,
406 const SDLoc &DL) const {
407 EVT VT = getPointerTy(DAG.getDataLayout());
408 OutRetAddr = getReturnAddressFrameIndex(DAG);
409
410 // Load the "old" Return address.
411 OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
412 return SDValue(OutRetAddr.getNode(), 1);
413}
414
415SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
416 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
417 EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
418 if (!FPDiff)
419 return Chain;
420
421 // Calculate the new stack slot for the return address.
422 int NewFO = MF.getFrameInfo().CreateFixedObject(
423 SlotSize, (int64_t)FPDiff - SlotSize, false);
424
425 SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
426 // Store the return address to the appropriate stack slot.
427 Chain = DAG.getStore(
428 Chain, DL, RetFI, NewFI,
430 return Chain;
431}
432
434M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
436 const SDLoc &DL, SelectionDAG &DAG,
437 const CCValAssign &VA,
438 MachineFrameInfo &MFI,
439 unsigned ArgIdx) const {
440 // Create the nodes corresponding to a load from this parameter slot.
441 ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
442 EVT ValVT;
443
444 // If value is passed by pointer we have address passed instead of the value
445 // itself.
447 ValVT = VA.getLocVT();
448 else
449 ValVT = VA.getValVT();
450
451 // Because we are dealing with BE architecture we need to offset loading of
452 // partial types
453 int Offset = VA.getLocMemOffset();
454 if (VA.getValVT() == MVT::i8) {
455 Offset += 3;
456 } else if (VA.getValVT() == MVT::i16) {
457 Offset += 2;
458 }
459
460 // TODO Interrupt handlers
461 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
462 // taken by a return address.
463
464 // FIXME For now, all byval parameter objects are marked mutable. This can
465 // be changed with more analysis. In case of tail call optimization mark all
466 // arguments mutable. Since they could be overwritten by lowering of arguments
467 // in case of a tail call.
468 bool AlwaysUseMutable = shouldGuaranteeTCO(
469 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
470 bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
471
472 if (Flags.isByVal()) {
473 unsigned Bytes = Flags.getByValSize();
474 if (Bytes == 0)
475 Bytes = 1; // Don't create zero-sized stack objects.
476 int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
477 // TODO Interrupt handlers
478 // Adjust SP offset of interrupt parameter.
479 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
480 } else {
481 int FI =
482 MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
483
484 // Set SExt or ZExt flag.
485 if (VA.getLocInfo() == CCValAssign::ZExt) {
486 MFI.setObjectZExt(FI, true);
487 } else if (VA.getLocInfo() == CCValAssign::SExt) {
488 MFI.setObjectSExt(FI, true);
489 }
490
491 // TODO Interrupt handlers
492 // Adjust SP offset of interrupt parameter.
493
494 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
495 SDValue Val = DAG.getLoad(
496 ValVT, DL, Chain, FIN,
498 return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
499 : Val;
500 }
501}
502
503SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
504 SDValue Arg, const SDLoc &DL,
505 SelectionDAG &DAG,
506 const CCValAssign &VA,
507 ISD::ArgFlagsTy Flags) const {
508 unsigned LocMemOffset = VA.getLocMemOffset();
509 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
510 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
511 StackPtr, PtrOff);
512 if (Flags.isByVal())
513 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
514
515 return DAG.getStore(
516 Chain, DL, Arg, PtrOff,
518}
519
520//===----------------------------------------------------------------------===//
521// Call
522//===----------------------------------------------------------------------===//
523
524SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
525 SmallVectorImpl<SDValue> &InVals) const {
526 SelectionDAG &DAG = CLI.DAG;
527 SDLoc &DL = CLI.DL;
529 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
531 SDValue Chain = CLI.Chain;
532 SDValue Callee = CLI.Callee;
533 CallingConv::ID CallConv = CLI.CallConv;
534 bool &IsTailCall = CLI.IsTailCall;
535 bool IsVarArg = CLI.IsVarArg;
536
539 bool IsSibcall = false;
541 // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
542
543 if (CallConv == CallingConv::M68k_INTR)
544 report_fatal_error("M68k interrupts may not be called directly");
545
546 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
547 if (Attr.getValueAsBool())
548 IsTailCall = false;
549
550 // FIXME Add tailcalls support
551
552 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
553 if (IsMustTail) {
554 // Force this to be a tail call. The verifier rules are enough to ensure
555 // that we can lower this successfully without moving the return address
556 // around.
557 IsTailCall = true;
558 } else if (IsTailCall) {
559 // Check if it's really possible to do a tail call.
560 IsTailCall = IsEligibleForTailCallOptimization(
561 Callee, CallConv, IsVarArg, SR != NotStructReturn,
562 MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
563 DAG);
564
565 // Sibcalls are automatically detected tailcalls which do not require
566 // ABI changes.
567 if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
568 IsSibcall = true;
569
570 if (IsTailCall)
571 ++NumTailCalls;
572 }
573
574 assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
575 "Var args not supported with calling convention fastcc");
576
577 // Analyze operands of the call, assigning locations to each operand.
579 SmallVector<Type *, 4> ArgTypes;
580 for (const auto &Arg : CLI.getArgs())
581 ArgTypes.emplace_back(Arg.Ty);
582 M68kCCState CCInfo(ArgTypes, CallConv, IsVarArg, MF, ArgLocs,
583 *DAG.getContext());
584 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
585
586 // Get a count of how many bytes are to be pushed on the stack.
587 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
588 if (IsSibcall) {
589 // This is a sibcall. The memory operands are available in caller's
590 // own caller's stack.
591 NumBytes = 0;
592 } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
593 canGuaranteeTCO(CallConv)) {
594 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
595 }
596
597 int FPDiff = 0;
598 if (IsTailCall && !IsSibcall && !IsMustTail) {
599 // Lower arguments at fp - stackoffset + fpdiff.
600 unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
601
602 FPDiff = NumBytesCallerPushed - NumBytes;
603
604 // Set the delta of movement of the returnaddr stackslot.
605 // But only set if delta is greater than previous delta.
606 if (FPDiff < MFI->getTCReturnAddrDelta())
607 MFI->setTCReturnAddrDelta(FPDiff);
608 }
609
610 unsigned NumBytesToPush = NumBytes;
611 unsigned NumBytesToPop = NumBytes;
612
613 // If we have an inalloca argument, all stack space has already been allocated
614 // for us and be right at the top of the stack. We don't support multiple
615 // arguments passed in memory when using inalloca.
616 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
617 NumBytesToPush = 0;
618 if (!ArgLocs.back().isMemLoc())
619 report_fatal_error("cannot use inalloca attribute on a register "
620 "parameter");
621 if (ArgLocs.back().getLocMemOffset() != 0)
622 report_fatal_error("any parameter with the inalloca attribute must be "
623 "the only memory argument");
624 }
625
626 if (!IsSibcall)
627 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
628 NumBytes - NumBytesToPush, DL);
629
630 SDValue RetFI;
631 // Load return address for tail calls.
632 if (IsTailCall && FPDiff)
633 Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
634
636 SmallVector<SDValue, 8> MemOpChains;
638
639 // Walk the register/memloc assignments, inserting copies/loads. In the case
640 // of tail call optimization arguments are handle later.
641 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
642 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
643 ISD::ArgFlagsTy Flags = Outs[i].Flags;
644
645 // Skip inalloca arguments, they have already been written.
646 if (Flags.isInAlloca())
647 continue;
648
649 CCValAssign &VA = ArgLocs[i];
650 EVT RegVT = VA.getLocVT();
651 SDValue Arg = OutVals[i];
652 bool IsByVal = Flags.isByVal();
653
654 // Promote the value if needed.
655 switch (VA.getLocInfo()) {
656 default:
657 llvm_unreachable("Unknown loc info!");
659 break;
661 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
662 break;
664 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
665 break;
667 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
668 break;
670 Arg = DAG.getBitcast(RegVT, Arg);
671 break;
673 // Store the argument.
674 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
675 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
676 Chain = DAG.getStore(
677 Chain, DL, Arg, SpillSlot,
679 Arg = SpillSlot;
680 break;
681 }
682 }
683
684 if (VA.isRegLoc()) {
685 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
686 } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
687 assert(VA.isMemLoc());
688 if (!StackPtr.getNode()) {
689 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
691 }
692 MemOpChains.push_back(
693 LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
694 }
695 }
696
697 if (!MemOpChains.empty())
698 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
699
700 // FIXME Make sure PIC style GOT works as expected
701 // The only time GOT is really needed is for Medium-PIC static data
702 // otherwise we are happy with pc-rel or static references
703
704 if (IsVarArg && IsMustTail) {
705 const auto &Forwards = MFI->getForwardedMustTailRegParms();
706 for (const auto &F : Forwards) {
707 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
708 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
709 }
710 }
711
712 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
713 // don't need this because the eligibility check rejects calls that require
714 // shuffling arguments passed in memory.
715 if (!IsSibcall && IsTailCall) {
716 // Force all the incoming stack arguments to be loaded from the stack
717 // before any new outgoing arguments are stored to the stack, because the
718 // outgoing stack slots may alias the incoming argument stack slots, and
719 // the alias isn't otherwise explicit. This is slightly more conservative
720 // than necessary, because it means that each store effectively depends
721 // on every argument instead of just those arguments it would clobber.
722 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
723
724 SmallVector<SDValue, 8> MemOpChains2;
725 SDValue FIN;
726 int FI = 0;
727 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
728 CCValAssign &VA = ArgLocs[i];
729 if (VA.isRegLoc())
730 continue;
731 assert(VA.isMemLoc());
732 SDValue Arg = OutVals[i];
733 ISD::ArgFlagsTy Flags = Outs[i].Flags;
734 // Skip inalloca arguments. They don't require any work.
735 if (Flags.isInAlloca())
736 continue;
737 // Create frame index.
738 int32_t Offset = VA.getLocMemOffset() + FPDiff;
739 uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
740 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
741 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
742
743 if (Flags.isByVal()) {
744 // Copy relative to framepointer.
746 if (!StackPtr.getNode()) {
747 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
749 }
752
753 MemOpChains2.push_back(
754 CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
755 } else {
756 // Store relative to framepointer.
757 MemOpChains2.push_back(DAG.getStore(
758 ArgChain, DL, Arg, FIN,
760 }
761 }
762
763 if (!MemOpChains2.empty())
764 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
765
766 // Store the return address to the appropriate stack slot.
767 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
769 Subtarget.getSlotSize(), FPDiff, DL);
770 }
771
772 // Build a sequence of copy-to-reg nodes chained together with token chain
773 // and flag operands which copy the outgoing args into registers.
774 SDValue InGlue;
775 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
776 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
777 RegsToPass[i].second, InGlue);
778 InGlue = Chain.getValue(1);
779 }
780
781 if (Callee->getOpcode() == ISD::GlobalAddress) {
782 // If the callee is a GlobalAddress node (quite common, every direct call
783 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
784 // it.
785 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
786
787 // We should use extra load for direct calls to dllimported functions in
788 // non-JIT mode.
789 const GlobalValue *GV = G->getGlobal();
790 if (!GV->hasDLLImportStorageClass()) {
791 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
792
794 GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
795
796 if (OpFlags == M68kII::MO_GOTPCREL) {
797
798 // Add a wrapper.
799 Callee = DAG.getNode(M68kISD::WrapperPC, DL,
800 getPointerTy(DAG.getDataLayout()), Callee);
801
802 // Add extra indirection
803 Callee = DAG.getLoad(
804 getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee,
806 }
807 }
808 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
810 unsigned char OpFlags =
811 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
812
814 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
815 }
816
818
819 if (!IsSibcall && IsTailCall) {
820 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, DL);
821 InGlue = Chain.getValue(1);
822 }
823
824 Ops.push_back(Chain);
825 Ops.push_back(Callee);
826
827 if (IsTailCall)
828 Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
829
830 // Add argument registers to the end of the list so that they are known live
831 // into the call.
832 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
833 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
834 RegsToPass[i].second.getValueType()));
835
836 // Add a register mask operand representing the call-preserved registers.
837 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
838 assert(Mask && "Missing call preserved mask for calling convention");
839
840 Ops.push_back(DAG.getRegisterMask(Mask));
841
842 if (InGlue.getNode())
843 Ops.push_back(InGlue);
844
845 if (IsTailCall) {
847 return DAG.getNode(M68kISD::TC_RETURN, DL, MVT::Other, Ops);
848 }
849
850 // Returns a chain & a flag for retval copy to use.
851 Chain = DAG.getNode(M68kISD::CALL, DL, {MVT::Other, MVT::Glue}, Ops);
852 InGlue = Chain.getValue(1);
853
854 // Create the CALLSEQ_END node.
855 unsigned NumBytesForCalleeToPop;
856 if (M68k::isCalleePop(CallConv, IsVarArg,
858 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
859 } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
860 // If this is a call to a struct-return function, the callee
861 // pops the hidden struct pointer, so we have to push it back.
862 NumBytesForCalleeToPop = 4;
863 } else {
864 NumBytesForCalleeToPop = 0; // Callee pops nothing.
865 }
866
867 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
868 // No need to reset the stack after the call if the call doesn't return. To
869 // make the MI verify, we'll pretend the callee does it for us.
870 NumBytesForCalleeToPop = NumBytes;
871 }
872
873 // Returns a flag for retval copy to use.
874 if (!IsSibcall) {
875 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
876 InGlue, DL);
877 InGlue = Chain.getValue(1);
878 }
879
880 // Handle result values, copying them out of physregs into vregs that we
881 // return.
882 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
883 InVals);
884}
885
886SDValue M68kTargetLowering::LowerCallResult(
887 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
888 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
889 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
890
891 // Assign locations to each value returned by this call.
893 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
894 *DAG.getContext());
895 CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
896
897 // Copy all of the result registers out of their specified physreg.
898 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
899 CCValAssign &VA = RVLocs[i];
900 EVT CopyVT = VA.getLocVT();
901
902 /// ??? is this correct?
903 Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InGlue)
904 .getValue(1);
905 SDValue Val = Chain.getValue(0);
906
907 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
908 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
909
910 InGlue = Chain.getValue(2);
911 InVals.push_back(Val);
912 }
913
914 return Chain;
915}
916
917//===----------------------------------------------------------------------===//
918// Formal Arguments Calling Convention Implementation
919//===----------------------------------------------------------------------===//
920
921SDValue M68kTargetLowering::LowerFormalArguments(
922 SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
923 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
924 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
927 // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
928
929 MachineFrameInfo &MFI = MF.getFrameInfo();
930
931 // Assign locations to all of the incoming arguments.
933 SmallVector<Type *, 4> ArgTypes;
934 for (const Argument &Arg : MF.getFunction().args())
935 ArgTypes.emplace_back(Arg.getType());
936 M68kCCState CCInfo(ArgTypes, CCID, IsVarArg, MF, ArgLocs, *DAG.getContext());
937
938 CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
939
940 unsigned LastVal = ~0U;
941 SDValue ArgValue;
942 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
943 CCValAssign &VA = ArgLocs[i];
944 assert(VA.getValNo() != LastVal && "Same value in different locations");
945 (void)LastVal;
946
947 LastVal = VA.getValNo();
948
949 if (VA.isRegLoc()) {
950 EVT RegVT = VA.getLocVT();
951 const TargetRegisterClass *RC;
952 if (RegVT == MVT::i32)
953 RC = &M68k::XR32RegClass;
954 else
955 llvm_unreachable("Unknown argument type!");
956
957 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
958 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
959
960 // If this is an 8 or 16-bit value, it is really passed promoted to 32
961 // bits. Insert an assert[sz]ext to capture this, then truncate to the
962 // right size.
963 if (VA.getLocInfo() == CCValAssign::SExt) {
964 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
965 DAG.getValueType(VA.getValVT()));
966 } else if (VA.getLocInfo() == CCValAssign::ZExt) {
967 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
968 DAG.getValueType(VA.getValVT()));
969 } else if (VA.getLocInfo() == CCValAssign::BCvt) {
970 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
971 }
972
973 if (VA.isExtInLoc()) {
974 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
975 }
976 } else {
977 assert(VA.isMemLoc());
978 ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
979 }
980
981 // If value is passed via pointer - do a load.
982 // TODO Make sure this handling on indirect arguments is correct
984 ArgValue =
985 DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
986
987 InVals.push_back(ArgValue);
988 }
989
990 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
991 // Swift calling convention does not require we copy the sret argument
992 // into %D0 for the return. We don't set SRetReturnReg for Swift.
993 if (CCID == CallingConv::Swift)
994 continue;
995
996 // ABI require that for returning structs by value we copy the sret argument
997 // into %D0 for the return. Save the argument into a virtual register so
998 // that we can access it from the return points.
999 if (Ins[i].Flags.isSRet()) {
1000 unsigned Reg = MMFI->getSRetReturnReg();
1001 if (!Reg) {
1002 MVT PtrTy = getPointerTy(DAG.getDataLayout());
1004 MMFI->setSRetReturnReg(Reg);
1005 }
1006 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
1007 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
1008 break;
1009 }
1010 }
1011
1012 unsigned StackSize = CCInfo.getStackSize();
1013 // Align stack specially for tail calls.
1015 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1016
1017 // If the function takes variable number of arguments, make a frame index for
1018 // the start of the first vararg value... for expansion of llvm.va_start. We
1019 // can skip this if there are no va_start calls.
1020 if (MFI.hasVAStart()) {
1021 MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1022 }
1023
1024 if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
1025 // We forward some GPRs and some vector types.
1026 SmallVector<MVT, 2> RegParmTypes;
1027 MVT IntVT = MVT::i32;
1028 RegParmTypes.push_back(IntVT);
1029
1030 // Compute the set of forwarded registers. The rest are scratch.
1031 // ??? what is this for?
1034 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
1035
1036 // Copy all forwards from physical to virtual registers.
1037 for (ForwardedRegister &F : Forwards) {
1038 // FIXME Can we use a less constrained schedule?
1039 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
1041 Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
1042 }
1043 }
1044
1045 // Some CCs need callee pop.
1046 if (M68k::isCalleePop(CCID, IsVarArg,
1048 MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1049 } else {
1050 MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
1051 // If this is an sret function, the return should pop the hidden pointer.
1053 MMFI->setBytesToPopOnReturn(4);
1054 }
1055
1056 MMFI->setArgumentStackSize(StackSize);
1057
1058 return Chain;
1059}
1060
1061//===----------------------------------------------------------------------===//
1062// Return Value Calling Convention Implementation
1063//===----------------------------------------------------------------------===//
1064
1065bool M68kTargetLowering::CanLowerReturn(
1066 CallingConv::ID CCID, MachineFunction &MF, bool IsVarArg,
1067 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
1068 const Type *RetTy) const {
1070 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, Context);
1071 return CCInfo.CheckReturn(Outs, RetCC_M68k);
1072}
1073
1074SDValue
1075M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1076 bool IsVarArg,
1078 const SmallVectorImpl<SDValue> &OutVals,
1079 const SDLoc &DL, SelectionDAG &DAG) const {
1082
1084 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1085 CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1086
1087 SDValue Glue;
1089 // Operand #0 = Chain (updated below)
1090 RetOps.push_back(Chain);
1091 // Operand #1 = Bytes To Pop
1092 RetOps.push_back(
1093 DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
1094
1095 // Copy the result values into the output registers.
1096 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1097 CCValAssign &VA = RVLocs[i];
1098 assert(VA.isRegLoc() && "Can only return in registers!");
1099 SDValue ValToCopy = OutVals[i];
1100 EVT ValVT = ValToCopy.getValueType();
1101
1102 // Promote values to the appropriate types.
1103 if (VA.getLocInfo() == CCValAssign::SExt)
1104 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1105 else if (VA.getLocInfo() == CCValAssign::ZExt)
1106 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1107 else if (VA.getLocInfo() == CCValAssign::AExt) {
1108 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1109 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1110 else
1111 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1112 } else if (VA.getLocInfo() == CCValAssign::BCvt)
1113 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1114
1115 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Glue);
1116 Glue = Chain.getValue(1);
1117 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1118 }
1119
1120 // Swift calling convention does not require we copy the sret argument
1121 // into %d0 for the return, and SRetReturnReg is not set for Swift.
1122
1123 // ABI require that for returning structs by value we copy the sret argument
1124 // into %D0 for the return. Save the argument into a virtual register so that
1125 // we can access it from the return points.
1126 //
1127 // Checking Function.hasStructRetAttr() here is insufficient because the IR
1128 // may not have an explicit sret argument. If MFI.CanLowerReturn is
1129 // false, then an sret argument may be implicitly inserted in the SelDAG. In
1130 // either case MFI->setSRetReturnReg() will have been called.
1131 if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1132 // ??? Can i just move this to the top and escape this explanation?
1133 // When we have both sret and another return value, we should use the
1134 // original Chain stored in RetOps[0], instead of the current Chain updated
1135 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
1136
1137 // For the case of sret and another return value, we have
1138 // Chain_0 at the function entry
1139 // Chain_1 = getCopyToReg(Chain_0) in the above loop
1140 // If we use Chain_1 in getCopyFromReg, we will have
1141 // Val = getCopyFromReg(Chain_1)
1142 // Chain_2 = getCopyToReg(Chain_1, Val) from below
1143
1144 // getCopyToReg(Chain_0) will be glued together with
1145 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1146 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1147 // Data dependency from Unit B to Unit A due to usage of Val in
1148 // getCopyToReg(Chain_1, Val)
1149 // Chain dependency from Unit A to Unit B
1150
1151 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1152 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1154
1155 // ??? How will this work if CC does not use registers for args passing?
1156 // ??? What if I return multiple structs?
1157 unsigned RetValReg = M68k::D0;
1158 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Glue);
1159 Glue = Chain.getValue(1);
1160
1161 RetOps.push_back(
1162 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1163 }
1164
1165 RetOps[0] = Chain; // Update chain.
1166
1167 // Add the glue if we have it.
1168 if (Glue.getNode())
1169 RetOps.push_back(Glue);
1170
1171 return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1172}
1173
1174//===----------------------------------------------------------------------===//
1175// Fast Calling Convention (tail call) implementation
1176//===----------------------------------------------------------------------===//
1177
1178// Like std call, callee cleans arguments, convention except that ECX is
1179// reserved for storing the tail called function address. Only 2 registers are
1180// free for argument passing (inreg). Tail call optimization is performed
1181// provided:
1182// * tailcallopt is enabled
1183// * caller/callee are fastcc
1184// On M68k_64 architecture with GOT-style position independent code only
1185// local (within module) calls are supported at the moment. To keep the stack
1186// aligned according to platform abi the function GetAlignedArgumentStackSize
1187// ensures that argument delta is always multiples of stack alignment. (Dynamic
1188// linkers need this - darwin's dyld for example) If a tail called function
1189// callee has more arguments than the caller the caller needs to make sure that
1190// there is room to move the RETADDR to. This is achieved by reserving an area
1191// the size of the argument delta right after the original RETADDR, but before
1192// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1193// calls callee(arg1, arg2,arg3,arg4) stack layout:
1194// arg1
1195// arg2
1196// RETADDR
1197// [ new RETADDR
1198// move area ]
1199// (possible EBP)
1200// ESI
1201// EDI
1202// local1 ..
1203
1204/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1205/// requirement.
1206unsigned
1207M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1208 SelectionDAG &DAG) const {
1209 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1210 unsigned StackAlignment = TFI.getStackAlignment();
1211 uint64_t AlignMask = StackAlignment - 1;
1212 int64_t Offset = StackSize;
1213 unsigned SlotSize = Subtarget.getSlotSize();
1214 if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1215 // Number smaller than 12 so just add the difference.
1216 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1217 } else {
1218 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1219 Offset =
1220 ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1221 }
1222 return Offset;
1223}
1224
1225/// Check whether the call is eligible for tail call optimization. Targets
1226/// that want to do tail call optimization should implement this function.
1227bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1228 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1229 bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1231 const SmallVectorImpl<SDValue> &OutVals,
1232 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1233 if (!mayTailCallThisCC(CalleeCC))
1234 return false;
1235
1236 // If -tailcallopt is specified, make fastcc functions tail-callable.
1238 const auto &CallerF = MF.getFunction();
1239
1240 CallingConv::ID CallerCC = CallerF.getCallingConv();
1241 bool CCMatch = CallerCC == CalleeCC;
1242
1244 if (canGuaranteeTCO(CalleeCC) && CCMatch)
1245 return true;
1246 return false;
1247 }
1248
1249 // Look for obvious safe cases to perform tail call optimization that do not
1250 // require ABI changes. This is what gcc calls sibcall.
1251
1252 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1253 // emit a special epilogue.
1254 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1255 if (RegInfo->hasStackRealignment(MF))
1256 return false;
1257
1258 // Also avoid sibcall optimization if either caller or callee uses struct
1259 // return semantics.
1260 if (IsCalleeStructRet || IsCallerStructRet)
1261 return false;
1262
1263 // Do not sibcall optimize vararg calls unless all arguments are passed via
1264 // registers.
1265 LLVMContext &C = *DAG.getContext();
1266 if (IsVarArg && !Outs.empty()) {
1267
1269 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1270
1271 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1272 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1273 if (!ArgLocs[i].isRegLoc())
1274 return false;
1275 }
1276
1277 // Check that the call results are passed in the same way.
1278 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1279 RetCC_M68k))
1280 return false;
1281
1282 // The callee has to preserve all registers the caller needs to preserve.
1283 const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1284 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1285 if (!CCMatch) {
1286 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1287 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1288 return false;
1289 }
1290
1291 unsigned StackArgsSize = 0;
1292
1293 // If the callee takes no arguments then go on to check the results of the
1294 // call.
1295 if (!Outs.empty()) {
1296 // Check if stack adjustment is needed. For now, do not do this if any
1297 // argument is passed on the stack.
1299 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1300
1301 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1302 StackArgsSize = CCInfo.getStackSize();
1303
1304 if (StackArgsSize) {
1305 // Check if the arguments are already laid out in the right way as
1306 // the caller's fixed stack objects.
1307 MachineFrameInfo &MFI = MF.getFrameInfo();
1308 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1309 const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1310 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1311 CCValAssign &VA = ArgLocs[i];
1312 SDValue Arg = OutVals[i];
1313 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1315 return false;
1316 if (!VA.isRegLoc()) {
1317 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1318 TII, VA))
1319 return false;
1320 }
1321 }
1322 }
1323
1324 bool PositionIndependent = isPositionIndependent();
1325 // If the tailcall address may be in a register, then make sure it's
1326 // possible to register allocate for it. The call address can
1327 // only target %A0 or %A1 since the tail call must be scheduled after
1328 // callee-saved registers are restored. These happen to be the same
1329 // registers used to pass 'inreg' arguments so watch out for those.
1330 if ((!isa<GlobalAddressSDNode>(Callee) &&
1331 !isa<ExternalSymbolSDNode>(Callee)) ||
1332 PositionIndependent) {
1333 unsigned NumInRegs = 0;
1334 // In PIC we need an extra register to formulate the address computation
1335 // for the callee.
1336 unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1337
1338 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1339 CCValAssign &VA = ArgLocs[i];
1340 if (!VA.isRegLoc())
1341 continue;
1342 Register Reg = VA.getLocReg();
1343 switch (Reg) {
1344 default:
1345 break;
1346 case M68k::A0:
1347 case M68k::A1:
1348 if (++NumInRegs == MaxInRegs)
1349 return false;
1350 break;
1351 }
1352 }
1353 }
1354
1355 const MachineRegisterInfo &MRI = MF.getRegInfo();
1356 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1357 return false;
1358 }
1359
1360 bool CalleeWillPop = M68k::isCalleePop(
1361 CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1362
1363 if (unsigned BytesToPop =
1365 // If we have bytes to pop, the callee must pop them.
1366 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1367 if (!CalleePopMatches)
1368 return false;
1369 } else if (CalleeWillPop && StackArgsSize > 0) {
1370 // If we don't have bytes to pop, make sure the callee doesn't pop any.
1371 return false;
1372 }
1373
1374 return true;
1375}
1376
1377//===----------------------------------------------------------------------===//
1378// Custom Lower
1379//===----------------------------------------------------------------------===//
1380
1382 SelectionDAG &DAG) const {
1383 switch (Op.getOpcode()) {
1384 default:
1385 llvm_unreachable("Should not custom lower this!");
1386 case ISD::SADDO:
1387 case ISD::UADDO:
1388 case ISD::SSUBO:
1389 case ISD::USUBO:
1390 case ISD::SMULO:
1391 case ISD::UMULO:
1392 return LowerXALUO(Op, DAG);
1393 case ISD::SETCC:
1394 return LowerSETCC(Op, DAG);
1395 case ISD::SETCCCARRY:
1396 return LowerSETCCCARRY(Op, DAG);
1397 case ISD::SELECT:
1398 return LowerSELECT(Op, DAG);
1399 case ISD::BRCOND:
1400 return LowerBRCOND(Op, DAG);
1401 case ISD::ADDC:
1402 case ISD::ADDE:
1403 case ISD::SUBC:
1404 case ISD::SUBE:
1405 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1406 case ISD::ConstantPool:
1407 return LowerConstantPool(Op, DAG);
1408 case ISD::GlobalAddress:
1409 return LowerGlobalAddress(Op, DAG);
1411 return LowerExternalSymbol(Op, DAG);
1412 case ISD::BlockAddress:
1413 return LowerBlockAddress(Op, DAG);
1414 case ISD::JumpTable:
1415 return LowerJumpTable(Op, DAG);
1416 case ISD::VASTART:
1417 return LowerVASTART(Op, DAG);
1419 return LowerDYNAMIC_STACKALLOC(Op, DAG);
1420 case ISD::SHL_PARTS:
1421 return LowerShiftLeftParts(Op, DAG);
1422 case ISD::SRA_PARTS:
1423 return LowerShiftRightParts(Op, DAG, true);
1424 case ISD::SRL_PARTS:
1425 return LowerShiftRightParts(Op, DAG, false);
1426 case ISD::ATOMIC_FENCE:
1427 return LowerATOMICFENCE(Op, DAG);
1429 return LowerGlobalTLSAddress(Op, DAG);
1430 }
1431}
1432
1433SDValue M68kTargetLowering::LowerExternalSymbolCall(SelectionDAG &DAG,
1434 SDLoc Loc,
1435 llvm::StringRef SymbolName,
1436 ArgListTy &&ArgList) const {
1437 PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1438 CallLoweringInfo CLI(DAG);
1439 CLI.setDebugLoc(Loc)
1440 .setChain(DAG.getEntryNode())
1442 DAG.getExternalSymbol(SymbolName.data(),
1444 std::move(ArgList));
1445 return LowerCallTo(CLI).first;
1446}
1447
1448SDValue M68kTargetLowering::getTLSGetAddr(GlobalAddressSDNode *GA,
1449 SelectionDAG &DAG,
1450 unsigned TargetFlags) const {
1451 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1453 GA->getGlobal(), GA, GA->getValueType(0), GA->getOffset(), TargetFlags);
1454 SDValue Arg = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, GOT, TGA);
1455
1456 PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1457
1458 ArgListTy Args;
1459 Args.emplace_back(Arg, PtrTy);
1460 return LowerExternalSymbolCall(DAG, SDLoc(GA), "__tls_get_addr",
1461 std::move(Args));
1462}
1463
1464SDValue M68kTargetLowering::getM68kReadTp(SDLoc Loc, SelectionDAG &DAG) const {
1465 return LowerExternalSymbolCall(DAG, Loc, "__m68k_read_tp", ArgListTy());
1466}
1467
1468SDValue M68kTargetLowering::LowerTLSGeneralDynamic(GlobalAddressSDNode *GA,
1469 SelectionDAG &DAG) const {
1470 return getTLSGetAddr(GA, DAG, M68kII::MO_TLSGD);
1471}
1472
1473SDValue M68kTargetLowering::LowerTLSLocalDynamic(GlobalAddressSDNode *GA,
1474 SelectionDAG &DAG) const {
1475 SDValue Addr = getTLSGetAddr(GA, DAG, M68kII::MO_TLSLDM);
1476 SDValue TGA =
1477 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1479 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Addr);
1480}
1481
1482SDValue M68kTargetLowering::LowerTLSInitialExec(GlobalAddressSDNode *GA,
1483 SelectionDAG &DAG) const {
1484 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1485 SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1486 SDValue TGA =
1487 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1489 SDValue Addr = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, GOT);
1490 SDValue Offset =
1491 DAG.getLoad(MVT::i32, SDLoc(GA), DAG.getEntryNode(), Addr,
1493
1494 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, Offset, Tp);
1495}
1496
1497SDValue M68kTargetLowering::LowerTLSLocalExec(GlobalAddressSDNode *GA,
1498 SelectionDAG &DAG) const {
1499 SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1500 SDValue TGA =
1501 DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1503 return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Tp);
1504}
1505
1506SDValue M68kTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1507 SelectionDAG &DAG) const {
1508 assert(Subtarget.isTargetELF());
1509
1510 auto *GA = cast<GlobalAddressSDNode>(Op);
1511 TLSModel::Model AccessModel = DAG.getTarget().getTLSModel(GA->getGlobal());
1512
1513 switch (AccessModel) {
1515 return LowerTLSGeneralDynamic(GA, DAG);
1517 return LowerTLSLocalDynamic(GA, DAG);
1519 return LowerTLSInitialExec(GA, DAG);
1521 return LowerTLSLocalExec(GA, DAG);
1522 }
1523
1524 llvm_unreachable("Unexpected TLS access model type");
1525}
1526
1527bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1528 SDValue C) const {
1529 // Shifts and add instructions in M68000 and M68010 support
1530 // up to 32 bits, but mul only has 16-bit variant. So it's almost
1531 // certainly beneficial to lower 8/16/32-bit mul to their
1532 // add / shifts counterparts. But for 64-bits mul, it might be
1533 // safer to just leave it to compiler runtime implementations.
1534 return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1535}
1536
1537static bool isOverflowArithmetic(unsigned Opcode) {
1538 switch (Opcode) {
1539 case ISD::UADDO:
1540 case ISD::SADDO:
1541 case ISD::USUBO:
1542 case ISD::SSUBO:
1543 case ISD::UMULO:
1544 case ISD::SMULO:
1545 return true;
1546 default:
1547 return false;
1548 }
1549}
1550
1552 SDValue &Result, SDValue &CCR,
1553 unsigned &CC) {
1554 SDNode *N = Op.getNode();
1555 EVT VT = N->getValueType(0);
1556 SDValue LHS = N->getOperand(0);
1557 SDValue RHS = N->getOperand(1);
1558 SDLoc DL(Op);
1559
1560 unsigned TruncOp = 0;
1561 auto PromoteMULO = [&](unsigned ExtOp) {
1562 // We don't have 8-bit multiplications, so promote i8 version of U/SMULO
1563 // to i16.
1564 // Ideally this should be done by legalizer but sadly there is no promotion
1565 // rule for U/SMULO at this moment.
1566 if (VT == MVT::i8) {
1567 LHS = DAG.getNode(ExtOp, DL, MVT::i16, LHS);
1568 RHS = DAG.getNode(ExtOp, DL, MVT::i16, RHS);
1569 VT = MVT::i16;
1570 TruncOp = ISD::TRUNCATE;
1571 }
1572 };
1573
1574 bool NoOverflow = false;
1575 unsigned BaseOp = 0;
1576 switch (Op.getOpcode()) {
1577 default:
1578 llvm_unreachable("Unknown ovf instruction!");
1579 case ISD::SADDO:
1580 BaseOp = M68kISD::ADD;
1581 CC = M68k::COND_VS;
1582 break;
1583 case ISD::UADDO:
1584 BaseOp = M68kISD::ADD;
1585 CC = M68k::COND_CS;
1586 break;
1587 case ISD::SSUBO:
1588 BaseOp = M68kISD::SUB;
1589 CC = M68k::COND_VS;
1590 break;
1591 case ISD::USUBO:
1592 BaseOp = M68kISD::SUB;
1593 CC = M68k::COND_CS;
1594 break;
1595 case ISD::UMULO:
1596 PromoteMULO(ISD::ZERO_EXTEND);
1597 NoOverflow = VT != MVT::i32;
1598 BaseOp = NoOverflow ? (unsigned)ISD::MUL : (unsigned)M68kISD::UMUL;
1599 CC = M68k::COND_VS;
1600 break;
1601 case ISD::SMULO:
1602 PromoteMULO(ISD::SIGN_EXTEND);
1603 NoOverflow = VT != MVT::i32;
1604 BaseOp = NoOverflow ? (unsigned)ISD::MUL : (unsigned)M68kISD::SMUL;
1605 CC = M68k::COND_VS;
1606 break;
1607 }
1608
1609 SDVTList VTs;
1610 if (NoOverflow)
1611 VTs = DAG.getVTList(VT);
1612 else
1613 // Also sets CCR.
1614 VTs = DAG.getVTList(VT, MVT::i8);
1615
1616 SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1617 Result = Arith.getValue(0);
1618 if (TruncOp)
1619 // Right now the only place to truncate is from i16 to i8.
1620 Result = DAG.getNode(TruncOp, DL, MVT::i8, Arith);
1621
1622 if (NoOverflow)
1623 CCR = DAG.getConstant(0, DL, N->getValueType(1));
1624 else
1625 CCR = Arith.getValue(1);
1626}
1627
1628SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1629 SDNode *N = Op.getNode();
1630 SDLoc DL(Op);
1631
1632 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1633 // a "setcc" instruction that checks the overflow flag.
1634 SDValue Result, CCR;
1635 unsigned CC;
1636 lowerOverflowArithmetic(Op, DAG, Result, CCR, CC);
1637
1638 SDValue Overflow;
1639 if (isa<ConstantSDNode>(CCR)) {
1640 // It's likely a result of operations that will not overflow
1641 // hence no setcc is needed.
1642 Overflow = CCR;
1643 } else {
1644 // Generate a M68kISD::SETCC.
1645 Overflow = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1646 DAG.getConstant(CC, DL, MVT::i8), CCR);
1647 }
1648
1649 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Overflow);
1650}
1651
1652/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
1653/// condition according to equal/not-equal condition code \p CC.
1655 const SDLoc &DL, SelectionDAG &DAG) {
1656 // If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
1657 // instruction. Since the shift amount is in-range-or-undefined, we know
1658 // that doing a bittest on the i32 value is ok.
1659 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1660 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1661
1662 // If the operand types disagree, extend the shift amount to match. Since
1663 // BTST ignores high bits (like shifts) we can use anyextend.
1664 if (Src.getValueType() != BitNo.getValueType())
1665 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1666
1667 SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
1668
1669 // NOTE BTST sets CCR.Z flag if bit is 0, same as AND with bitmask
1671 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1672 DAG.getConstant(Cond, DL, MVT::i8), BTST);
1673}
1674
1675/// Result of 'and' is compared against zero. Change to a BTST node if possible.
1677 SelectionDAG &DAG) {
1678 SDValue Op0 = And.getOperand(0);
1679 SDValue Op1 = And.getOperand(1);
1680 if (Op0.getOpcode() == ISD::TRUNCATE)
1681 Op0 = Op0.getOperand(0);
1682 if (Op1.getOpcode() == ISD::TRUNCATE)
1683 Op1 = Op1.getOperand(0);
1684
1685 SDValue LHS, RHS;
1686 if (Op1.getOpcode() == ISD::SHL)
1687 std::swap(Op0, Op1);
1688 if (Op0.getOpcode() == ISD::SHL) {
1689 if (isOneConstant(Op0.getOperand(0))) {
1690 // If we looked past a truncate, check that it's only truncating away
1691 // known zeros.
1692 unsigned BitWidth = Op0.getValueSizeInBits();
1693 unsigned AndBitWidth = And.getValueSizeInBits();
1694 if (BitWidth > AndBitWidth) {
1695 auto Known = DAG.computeKnownBits(Op0);
1696 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1697 return SDValue();
1698 }
1699 LHS = Op1;
1700 RHS = Op0.getOperand(1);
1701 }
1702 } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1703 uint64_t AndRHSVal = AndRHS->getZExtValue();
1704 SDValue AndLHS = Op0;
1705
1706 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1707 LHS = AndLHS.getOperand(0);
1708 RHS = AndLHS.getOperand(1);
1709 }
1710
1711 // Use BTST if the immediate can't be encoded in a TEST instruction.
1712 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1713 LHS = AndLHS;
1714 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1715 }
1716 }
1717
1718 if (LHS.getNode())
1719 return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1720
1721 return SDValue();
1722}
1723
1725 switch (SetCCOpcode) {
1726 default:
1727 llvm_unreachable("Invalid integer condition!");
1728 case ISD::SETEQ:
1729 return M68k::COND_EQ;
1730 case ISD::SETGT:
1731 return M68k::COND_GT;
1732 case ISD::SETGE:
1733 return M68k::COND_GE;
1734 case ISD::SETLT:
1735 return M68k::COND_LT;
1736 case ISD::SETLE:
1737 return M68k::COND_LE;
1738 case ISD::SETNE:
1739 return M68k::COND_NE;
1740 case ISD::SETULT:
1741 return M68k::COND_CS;
1742 case ISD::SETUGE:
1743 return M68k::COND_CC;
1744 case ISD::SETUGT:
1745 return M68k::COND_HI;
1746 case ISD::SETULE:
1747 return M68k::COND_LS;
1748 }
1749}
1750
1751/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1752/// condition code, returning the condition code and the LHS/RHS of the
1753/// comparison to make.
1754static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1755 bool IsFP, SDValue &LHS, SDValue &RHS,
1756 SelectionDAG &DAG) {
1757 if (!IsFP) {
1758 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1759 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
1760 // X > -1 -> X == 0, jump !sign.
1761 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1762 return M68k::COND_PL;
1763 }
1764 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
1765 // X < 0 -> X == 0, jump on sign.
1766 return M68k::COND_MI;
1767 }
1768 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1769 // X < 1 -> X <= 0
1770 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1771 return M68k::COND_LE;
1772 }
1773 }
1774
1775 return TranslateIntegerM68kCC(SetCCOpcode);
1776 }
1777
1778 // First determine if it is required or is profitable to flip the operands.
1779
1780 // If LHS is a foldable load, but RHS is not, flip the condition.
1781 if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1782 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1783 std::swap(LHS, RHS);
1784 }
1785
1786 switch (SetCCOpcode) {
1787 default:
1788 break;
1789 case ISD::SETOLT:
1790 case ISD::SETOLE:
1791 case ISD::SETUGT:
1792 case ISD::SETUGE:
1793 std::swap(LHS, RHS);
1794 break;
1795 }
1796
1797 // On a floating point condition, the flags are set as follows:
1798 // ZF PF CF op
1799 // 0 | 0 | 0 | X > Y
1800 // 0 | 0 | 1 | X < Y
1801 // 1 | 0 | 0 | X == Y
1802 // 1 | 1 | 1 | unordered
1803 switch (SetCCOpcode) {
1804 default:
1805 llvm_unreachable("Condcode should be pre-legalized away");
1806 case ISD::SETUEQ:
1807 case ISD::SETEQ:
1808 return M68k::COND_EQ;
1809 case ISD::SETOLT: // flipped
1810 case ISD::SETOGT:
1811 case ISD::SETGT:
1812 return M68k::COND_HI;
1813 case ISD::SETOLE: // flipped
1814 case ISD::SETOGE:
1815 case ISD::SETGE:
1816 return M68k::COND_CC;
1817 case ISD::SETUGT: // flipped
1818 case ISD::SETULT:
1819 case ISD::SETLT:
1820 return M68k::COND_CS;
1821 case ISD::SETUGE: // flipped
1822 case ISD::SETULE:
1823 case ISD::SETLE:
1824 return M68k::COND_LS;
1825 case ISD::SETONE:
1826 case ISD::SETNE:
1827 return M68k::COND_NE;
1828 case ISD::SETOEQ:
1829 case ISD::SETUNE:
1830 return M68k::COND_INVALID;
1831 }
1832}
1833
1834// Convert (truncate (srl X, N) to i1) to (bt X, N)
1836 const SDLoc &DL, SelectionDAG &DAG) {
1837
1838 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
1839 "Expected TRUNCATE to i1 node");
1840
1841 if (Op.getOperand(0).getOpcode() != ISD::SRL)
1842 return SDValue();
1843
1844 SDValue ShiftRight = Op.getOperand(0);
1845 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1846 CC, DL, DAG);
1847}
1848
1849/// \brief return true if \c Op has a use that doesn't just read flags.
1851 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1852 ++UI) {
1853 SDNode *User = UI->getUser();
1854 unsigned UOpNo = UI->getOperandNo();
1855 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1856 // Look past truncate.
1857 UOpNo = User->use_begin()->getOperandNo();
1858 User = User->use_begin()->getUser();
1859 }
1860
1861 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1862 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1863 return true;
1864 }
1865 return false;
1866}
1867
1868SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1869 const SDLoc &DL, SelectionDAG &DAG) const {
1870
1871 // CF and OF aren't always set the way we want. Determine which
1872 // of these we need.
1873 bool NeedCF = false;
1874 bool NeedOF = false;
1875 switch (M68kCC) {
1876 default:
1877 break;
1878 case M68k::COND_HI:
1879 case M68k::COND_CC:
1880 case M68k::COND_CS:
1881 case M68k::COND_LS:
1882 NeedCF = true;
1883 break;
1884 case M68k::COND_GT:
1885 case M68k::COND_GE:
1886 case M68k::COND_LT:
1887 case M68k::COND_LE:
1888 case M68k::COND_VS:
1889 case M68k::COND_VC: {
1890 // Check if we really need to set the
1891 // Overflow flag. If NoSignedWrap is present
1892 // that is not actually needed.
1893 switch (Op->getOpcode()) {
1894 case ISD::ADD:
1895 case ISD::SUB:
1896 case ISD::MUL:
1897 case ISD::SHL: {
1898 if (Op.getNode()->getFlags().hasNoSignedWrap())
1899 break;
1900 [[fallthrough]];
1901 }
1902 default:
1903 NeedOF = true;
1904 break;
1905 }
1906 break;
1907 }
1908 }
1909 // See if we can use the CCR value from the operand instead of
1910 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
1911 // we prove that the arithmetic won't overflow, we can't use OF or CF.
1912 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1913 // Emit a CMP with 0, which is the TEST pattern.
1914 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1915 DAG.getConstant(0, DL, Op.getValueType()), Op);
1916 }
1917 unsigned Opcode = 0;
1918 unsigned NumOperands = 0;
1919
1920 // Truncate operations may prevent the merge of the SETCC instruction
1921 // and the arithmetic instruction before it. Attempt to truncate the operands
1922 // of the arithmetic instruction and use a reduced bit-width instruction.
1923 bool NeedTruncation = false;
1924 SDValue ArithOp = Op;
1925 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1926 SDValue Arith = Op->getOperand(0);
1927 // Both the trunc and the arithmetic op need to have one user each.
1928 if (Arith->hasOneUse())
1929 switch (Arith.getOpcode()) {
1930 default:
1931 break;
1932 case ISD::ADD:
1933 case ISD::SUB:
1934 case ISD::AND:
1935 case ISD::OR:
1936 case ISD::XOR: {
1937 NeedTruncation = true;
1938 ArithOp = Arith;
1939 }
1940 }
1941 }
1942
1943 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1944 // which may be the result of a CAST. We use the variable 'Op', which is the
1945 // non-casted variable when we check for possible users.
1946 switch (ArithOp.getOpcode()) {
1947 case ISD::ADD:
1948 Opcode = M68kISD::ADD;
1949 NumOperands = 2;
1950 break;
1951 case ISD::SHL:
1952 case ISD::SRL:
1953 // If we have a constant logical shift that's only used in a comparison
1954 // against zero turn it into an equivalent AND. This allows turning it into
1955 // a TEST instruction later.
1956 if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1957 Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1958 !hasNonFlagsUse(Op)) {
1959 EVT VT = Op.getValueType();
1960 unsigned BitWidth = VT.getSizeInBits();
1961 unsigned ShAmt = Op->getConstantOperandVal(1);
1962 if (ShAmt >= BitWidth) // Avoid undefined shifts.
1963 break;
1964 APInt Mask = ArithOp.getOpcode() == ISD::SRL
1966 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1967 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1968 break;
1969 Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1970 DAG.getConstant(Mask, DL, VT));
1971 }
1972 break;
1973
1974 case ISD::AND:
1975 // If the primary 'and' result isn't used, don't bother using
1976 // M68kISD::AND, because a TEST instruction will be better.
1977 if (!hasNonFlagsUse(Op)) {
1978 SDValue Op0 = ArithOp->getOperand(0);
1979 SDValue Op1 = ArithOp->getOperand(1);
1980 EVT VT = ArithOp.getValueType();
1981 bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1982 bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1983
1984 // But if we can combine this into an ANDN operation, then create an AND
1985 // now and allow it to be pattern matched into an ANDN.
1986 if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1987 break;
1988 }
1989 [[fallthrough]];
1990 case ISD::SUB:
1991 case ISD::OR:
1992 case ISD::XOR:
1993 // Due to the ISEL shortcoming noted above, be conservative if this op is
1994 // likely to be selected as part of a load-modify-store instruction.
1995 for (const auto *U : Op.getNode()->users())
1996 if (U->getOpcode() == ISD::STORE)
1997 goto default_case;
1998
1999 // Otherwise use a regular CCR-setting instruction.
2000 switch (ArithOp.getOpcode()) {
2001 default:
2002 llvm_unreachable("unexpected operator!");
2003 case ISD::SUB:
2004 Opcode = M68kISD::SUB;
2005 break;
2006 case ISD::XOR:
2007 Opcode = M68kISD::XOR;
2008 break;
2009 case ISD::AND:
2010 Opcode = M68kISD::AND;
2011 break;
2012 case ISD::OR:
2013 Opcode = M68kISD::OR;
2014 break;
2015 }
2016
2017 NumOperands = 2;
2018 break;
2019 case M68kISD::ADD:
2020 case M68kISD::SUB:
2021 case M68kISD::OR:
2022 case M68kISD::XOR:
2023 case M68kISD::AND:
2024 return SDValue(Op.getNode(), 1);
2025 default:
2026 default_case:
2027 break;
2028 }
2029
2030 // If we found that truncation is beneficial, perform the truncation and
2031 // update 'Op'.
2032 if (NeedTruncation) {
2033 EVT VT = Op.getValueType();
2034 SDValue WideVal = Op->getOperand(0);
2035 EVT WideVT = WideVal.getValueType();
2036 unsigned ConvertedOp = 0;
2037 // Use a target machine opcode to prevent further DAGCombine
2038 // optimizations that may separate the arithmetic operations
2039 // from the setcc node.
2040 switch (WideVal.getOpcode()) {
2041 default:
2042 break;
2043 case ISD::ADD:
2044 ConvertedOp = M68kISD::ADD;
2045 break;
2046 case ISD::SUB:
2047 ConvertedOp = M68kISD::SUB;
2048 break;
2049 case ISD::AND:
2050 ConvertedOp = M68kISD::AND;
2051 break;
2052 case ISD::OR:
2053 ConvertedOp = M68kISD::OR;
2054 break;
2055 case ISD::XOR:
2056 ConvertedOp = M68kISD::XOR;
2057 break;
2058 }
2059
2060 if (ConvertedOp) {
2061 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2062 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
2063 SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
2064 SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
2065 Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
2066 }
2067 }
2068 }
2069
2070 if (Opcode == 0) {
2071 // Emit a CMP with 0, which is the TEST pattern.
2072 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2073 DAG.getConstant(0, DL, Op.getValueType()), Op);
2074 }
2075 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
2076 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
2077
2078 SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
2079 DAG.ReplaceAllUsesWith(Op, New);
2080 return SDValue(New.getNode(), 1);
2081}
2082
2083/// \brief Return true if the condition is an unsigned comparison operation.
2084static bool isM68kCCUnsigned(unsigned M68kCC) {
2085 switch (M68kCC) {
2086 default:
2087 llvm_unreachable("Invalid integer condition!");
2088 case M68k::COND_EQ:
2089 case M68k::COND_NE:
2090 case M68k::COND_CS:
2091 case M68k::COND_HI:
2092 case M68k::COND_LS:
2093 case M68k::COND_CC:
2094 return true;
2095 case M68k::COND_GT:
2096 case M68k::COND_GE:
2097 case M68k::COND_LT:
2098 case M68k::COND_LE:
2099 return false;
2100 }
2101}
2102
2103SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
2104 const SDLoc &DL, SelectionDAG &DAG) const {
2105 if (isNullConstant(Op1))
2106 return EmitTest(Op0, M68kCC, DL, DAG);
2107
2108 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
2109 "Unexpected comparison operation for MVT::i1 operands");
2110
2111 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
2112 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
2113 // Only promote the compare up to I32 if it is a 16 bit operation
2114 // with an immediate. 16 bit immediates are to be avoided.
2115 if ((Op0.getValueType() == MVT::i16 &&
2116 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
2118 unsigned ExtendOp =
2120 Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
2121 Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
2122 }
2123 // Use SUB instead of CMP to enable CSE between SUB and CMP.
2124 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
2125 SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
2126 return SDValue(Sub.getNode(), 1);
2127 }
2128 return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
2129}
2130
2131/// Result of 'and' or 'trunc to i1' is compared against zero.
2132/// Change to a BTST node if possible.
2133SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
2134 const SDLoc &DL,
2135 SelectionDAG &DAG) const {
2136 if (Op.getOpcode() == ISD::AND)
2137 return LowerAndToBTST(Op, CC, DL, DAG);
2138 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
2139 return LowerTruncateToBTST(Op, CC, DL, DAG);
2140 return SDValue();
2141}
2142
2143SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2144 MVT VT = Op.getSimpleValueType();
2145 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
2146
2147 SDValue Op0 = Op.getOperand(0);
2148 SDValue Op1 = Op.getOperand(1);
2149 SDLoc DL(Op);
2150 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2151
2152 // Optimize to BTST if possible.
2153 // Lower (X & (1 << N)) == 0 to BTST(X, N).
2154 // Lower ((X >>u N) & 1) != 0 to BTST(X, N).
2155 // Lower ((X >>s N) & 1) != 0 to BTST(X, N).
2156 // Lower (trunc (X >> N) to i1) to BTST(X, N).
2157 if (Op0.hasOneUse() && isNullConstant(Op1) &&
2158 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2159 if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
2160 if (VT == MVT::i1)
2161 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
2162 return NewSetCC;
2163 }
2164 }
2165
2166 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
2167 // these.
2168 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
2169 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2170
2171 // If the input is a setcc, then reuse the input setcc or use a new one with
2172 // the inverted condition.
2173 if (Op0.getOpcode() == M68kISD::SETCC) {
2175 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
2176 if (!Invert)
2177 return Op0;
2178
2179 CCode = M68k::GetOppositeBranchCondition(CCode);
2180 SDValue SetCC =
2181 DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2182 DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
2183 if (VT == MVT::i1)
2184 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
2185 return SetCC;
2186 }
2187 }
2188 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2189 if (isOneConstant(Op1)) {
2191 return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
2192 }
2193 if (!isNullConstant(Op1)) {
2194 SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
2195 return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
2196 }
2197 }
2198
2199 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
2200 unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
2201 if (M68kCC == M68k::COND_INVALID)
2202 return SDValue();
2203
2204 SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
2205 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2206 DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
2207}
2208
2209SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
2210 SelectionDAG &DAG) const {
2211 SDValue LHS = Op.getOperand(0);
2212 SDValue RHS = Op.getOperand(1);
2213 SDValue Carry = Op.getOperand(2);
2214 SDValue Cond = Op.getOperand(3);
2215 SDLoc DL(Op);
2216
2217 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
2218 M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
2219
2220 EVT CarryVT = Carry.getValueType();
2221 APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
2222 Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
2223 DAG.getConstant(NegOne, DL, CarryVT));
2224
2225 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2226 SDValue Cmp =
2227 DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
2228
2229 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2230 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
2231}
2232
2233/// Return true if opcode is a M68k logical comparison.
2235 unsigned Opc = Op.getNode()->getOpcode();
2236 if (Opc == M68kISD::CMP)
2237 return true;
2238 if (Op.getResNo() == 1 &&
2239 (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
2240 Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
2241 Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
2242 return true;
2243
2244 if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2245 return true;
2246
2247 return false;
2248}
2249
2251 if (V.getOpcode() != ISD::TRUNCATE)
2252 return false;
2253
2254 SDValue VOp0 = V.getOperand(0);
2255 unsigned InBits = VOp0.getValueSizeInBits();
2256 unsigned Bits = V.getValueSizeInBits();
2257 return DAG.MaskedValueIsZero(VOp0,
2258 APInt::getHighBitsSet(InBits, InBits - Bits));
2259}
2260
2261SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2262 bool addTest = true;
2263 SDValue Cond = Op.getOperand(0);
2264 SDValue Op1 = Op.getOperand(1);
2265 SDValue Op2 = Op.getOperand(2);
2266 SDLoc DL(Op);
2267 SDValue CC;
2268
2269 if (Cond.getOpcode() == ISD::SETCC) {
2270 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2271 Cond = NewCond;
2272 }
2273
2274 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2275 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2276 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2277 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2278 if (Cond.getOpcode() == M68kISD::SETCC &&
2279 Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2280 isNullConstant(Cond.getOperand(1).getOperand(0))) {
2281 SDValue Cmp = Cond.getOperand(1);
2282
2283 unsigned CondCode = Cond.getConstantOperandVal(0);
2284
2285 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2286 (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2287 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2288
2289 SDValue CmpOp0 = Cmp.getOperand(1);
2290 // Apply further optimizations for special cases
2291 // (select (x != 0), -1, 0) -> neg & sbb
2292 // (select (x == 0), 0, -1) -> neg & sbb
2293 if (isNullConstant(Y) &&
2294 (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2295
2296 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2297
2298 SDValue Neg =
2299 DAG.getNode(M68kISD::SUB, DL, VTs,
2300 DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2301
2302 SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2303 DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
2304 SDValue(Neg.getNode(), 1));
2305 return Res;
2306 }
2307
2308 Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2309 DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2310
2311 SDValue Res = // Res = 0 or -1.
2312 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2313 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2314
2315 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2316 Res = DAG.getNOT(DL, Res, Res.getValueType());
2317
2318 if (!isNullConstant(Op2))
2319 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2320 return Res;
2321 }
2322 }
2323
2324 // Look past (and (setcc_carry (cmp ...)), 1).
2325 if (Cond.getOpcode() == ISD::AND &&
2326 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2327 isOneConstant(Cond.getOperand(1)))
2328 Cond = Cond.getOperand(0);
2329
2330 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2331 // setting operand in place of the M68kISD::SETCC.
2332 unsigned CondOpcode = Cond.getOpcode();
2333 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2334 CC = Cond.getOperand(0);
2335
2336 SDValue Cmp = Cond.getOperand(1);
2337 unsigned Opc = Cmp.getOpcode();
2338
2339 bool IllegalFPCMov = false;
2340
2341 if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
2342 Cond = Cmp;
2343 addTest = false;
2344 }
2345 } else if (isOverflowArithmetic(CondOpcode)) {
2346 // Result is unused here.
2348 unsigned CCode;
2349 lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2350 CC = DAG.getConstant(CCode, DL, MVT::i8);
2351 addTest = false;
2352 }
2353
2354 if (addTest) {
2355 // Look past the truncate if the high bits are known zero.
2357 Cond = Cond.getOperand(0);
2358
2359 // We know the result of AND is compared against zero. Try to match
2360 // it to BT.
2361 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2362 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2363 CC = NewSetCC.getOperand(0);
2364 Cond = NewSetCC.getOperand(1);
2365 addTest = false;
2366 }
2367 }
2368 }
2369
2370 if (addTest) {
2371 CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
2372 Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2373 }
2374
2375 // a < b ? -1 : 0 -> RES = ~setcc_carry
2376 // a < b ? 0 : -1 -> RES = setcc_carry
2377 // a >= b ? -1 : 0 -> RES = setcc_carry
2378 // a >= b ? 0 : -1 -> RES = ~setcc_carry
2379 if (Cond.getOpcode() == M68kISD::SUB) {
2380 unsigned CondCode = CC->getAsZExtVal();
2381
2382 if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2383 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2384 (isNullConstant(Op1) || isNullConstant(Op2))) {
2385 SDValue Res =
2386 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2387 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
2388 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2389 return DAG.getNOT(DL, Res, Res.getValueType());
2390 return Res;
2391 }
2392 }
2393
2394 // M68k doesn't have an i8 cmov. If both operands are the result of a
2395 // truncate widen the cmov and push the truncate through. This avoids
2396 // introducing a new branch during isel and doesn't add any extensions.
2397 if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2398 Op2.getOpcode() == ISD::TRUNCATE) {
2399 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2400 if (T1.getValueType() == T2.getValueType() &&
2401 // Block CopyFromReg so partial register stalls are avoided.
2402 T1.getOpcode() != ISD::CopyFromReg &&
2403 T2.getOpcode() != ISD::CopyFromReg) {
2404 SDValue Cmov =
2405 DAG.getNode(M68kISD::CMOV, DL, T1.getValueType(), T2, T1, CC, Cond);
2406 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2407 }
2408 }
2409
2410 // Simple optimization when Cond is a constant to avoid generating
2411 // M68kISD::CMOV if possible.
2412 // TODO: Generalize this to use SelectionDAG::computeKnownBits.
2413 if (auto *Const = dyn_cast<ConstantSDNode>(Cond.getNode())) {
2414 const APInt &C = Const->getAPIntValue();
2415 if (C.countr_zero() >= 5)
2416 return Op2;
2417 else if (C.countr_one() >= 5)
2418 return Op1;
2419 }
2420
2421 // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2422 // condition is true.
2423 SDValue Ops[] = {Op2, Op1, CC, Cond};
2424 return DAG.getNode(M68kISD::CMOV, DL, Op.getValueType(), Ops);
2425}
2426
2427/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2428/// each of which has no other use apart from the AND / OR.
2429static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2430 Opc = Op.getOpcode();
2431 if (Opc != ISD::OR && Opc != ISD::AND)
2432 return false;
2433 return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2434 Op.getOperand(0).hasOneUse() &&
2435 M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2436 Op.getOperand(1).hasOneUse());
2437}
2438
2439/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2440/// SETCC node has a single use.
2442 if (Op.getOpcode() != ISD::XOR)
2443 return false;
2444 if (isOneConstant(Op.getOperand(1)))
2445 return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2446 Op.getOperand(0).hasOneUse();
2447 return false;
2448}
2449
2450SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2451 bool AddTest = true;
2452 SDValue Chain = Op.getOperand(0);
2453 SDValue Cond = Op.getOperand(1);
2454 SDValue Dest = Op.getOperand(2);
2455 SDLoc DL(Op);
2456 SDValue CC;
2457 bool Inverted = false;
2458
2459 if (Cond.getOpcode() == ISD::SETCC) {
2460 // Check for setcc([su]{add,sub}o == 0).
2461 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2462 isNullConstant(Cond.getOperand(1)) &&
2463 Cond.getOperand(0).getResNo() == 1 &&
2464 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2465 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2466 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2467 Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2468 Inverted = true;
2469 Cond = Cond.getOperand(0);
2470 } else {
2471 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2472 Cond = NewCond;
2473 }
2474 }
2475
2476 // Look pass (and (setcc_carry (cmp ...)), 1).
2477 if (Cond.getOpcode() == ISD::AND &&
2478 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2479 isOneConstant(Cond.getOperand(1)))
2480 Cond = Cond.getOperand(0);
2481
2482 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2483 // setting operand in place of the M68kISD::SETCC.
2484 unsigned CondOpcode = Cond.getOpcode();
2485 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2486 CC = Cond.getOperand(0);
2487
2488 SDValue Cmp = Cond.getOperand(1);
2489 unsigned Opc = Cmp.getOpcode();
2490
2491 if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
2492 Cond = Cmp;
2493 AddTest = false;
2494 } else {
2495 switch (CC->getAsZExtVal()) {
2496 default:
2497 break;
2498 case M68k::COND_VS:
2499 case M68k::COND_CS:
2500 // These can only come from an arithmetic instruction with overflow,
2501 // e.g. SADDO, UADDO.
2502 Cond = Cond.getNode()->getOperand(1);
2503 AddTest = false;
2504 break;
2505 }
2506 }
2507 }
2508 CondOpcode = Cond.getOpcode();
2509 if (isOverflowArithmetic(CondOpcode)) {
2511 unsigned CCode;
2512 lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2513
2514 if (Inverted)
2516 CC = DAG.getConstant(CCode, DL, MVT::i8);
2517
2518 AddTest = false;
2519 } else {
2520 unsigned CondOpc;
2521 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2522 SDValue Cmp = Cond.getOperand(0).getOperand(1);
2523 if (CondOpc == ISD::OR) {
2524 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
2525 // two branches instead of an explicit OR instruction with a
2526 // separate test.
2527 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2528 CC = Cond.getOperand(0).getOperand(0);
2529 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2530 Dest, CC, Cmp);
2531 CC = Cond.getOperand(1).getOperand(0);
2532 Cond = Cmp;
2533 AddTest = false;
2534 }
2535 } else { // ISD::AND
2536 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2537 // two branches instead of an explicit AND instruction with a
2538 // separate test. However, we only do this if this block doesn't
2539 // have a fall-through edge, because this requires an explicit
2540 // jmp when the condition is false.
2541 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2542 Op.getNode()->hasOneUse()) {
2543 M68k::CondCode CCode =
2544 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2545 CCode = M68k::GetOppositeBranchCondition(CCode);
2546 CC = DAG.getConstant(CCode, DL, MVT::i8);
2547 SDNode *User = *Op.getNode()->user_begin();
2548 // Look for an unconditional branch following this conditional branch.
2549 // We need this because we need to reverse the successors in order
2550 // to implement FCMP_OEQ.
2551 if (User->getOpcode() == ISD::BR) {
2552 SDValue FalseBB = User->getOperand(1);
2553 SDNode *NewBR =
2554 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2555 assert(NewBR == User);
2556 (void)NewBR;
2557 Dest = FalseBB;
2558
2559 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2560 Dest, CC, Cmp);
2561 M68k::CondCode CCode =
2562 (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2563 CCode = M68k::GetOppositeBranchCondition(CCode);
2564 CC = DAG.getConstant(CCode, DL, MVT::i8);
2565 Cond = Cmp;
2566 AddTest = false;
2567 }
2568 }
2569 }
2570 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2571 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2572 // It should be transformed during dag combiner except when the condition
2573 // is set by a arithmetics with overflow node.
2574 M68k::CondCode CCode =
2575 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2576 CCode = M68k::GetOppositeBranchCondition(CCode);
2577 CC = DAG.getConstant(CCode, DL, MVT::i8);
2578 Cond = Cond.getOperand(0).getOperand(1);
2579 AddTest = false;
2580 }
2581 }
2582
2583 if (AddTest) {
2584 // Look pass the truncate if the high bits are known zero.
2586 Cond = Cond.getOperand(0);
2587
2588 // We know the result is compared against zero. Try to match it to BT.
2589 if (Cond.hasOneUse()) {
2590 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2591 CC = NewSetCC.getOperand(0);
2592 Cond = NewSetCC.getOperand(1);
2593 AddTest = false;
2594 }
2595 }
2596 }
2597
2598 if (AddTest) {
2599 M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2600 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2601 Cond = EmitTest(Cond, MxCond, DL, DAG);
2602 }
2603 return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2604 Cond);
2605}
2606
2607SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2608 SelectionDAG &DAG) const {
2609 MVT VT = Op.getNode()->getSimpleValueType(0);
2610
2611 // Let legalize expand this if it isn't a legal type yet.
2612 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2613 return SDValue();
2614
2615 SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2616
2617 unsigned Opc;
2618 bool ExtraOp = false;
2619 switch (Op.getOpcode()) {
2620 default:
2621 llvm_unreachable("Invalid code");
2622 case ISD::ADDC:
2623 Opc = M68kISD::ADD;
2624 break;
2625 case ISD::ADDE:
2626 Opc = M68kISD::ADDX;
2627 ExtraOp = true;
2628 break;
2629 case ISD::SUBC:
2630 Opc = M68kISD::SUB;
2631 break;
2632 case ISD::SUBE:
2633 Opc = M68kISD::SUBX;
2634 ExtraOp = true;
2635 break;
2636 }
2637
2638 if (!ExtraOp)
2639 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2640 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2641 Op.getOperand(2));
2642}
2643
2644// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2645// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2646// one of the above mentioned nodes. It has to be wrapped because otherwise
2647// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2648// be used to form addressing mode. These wrapped nodes will be selected
2649// into MOV32ri.
2650SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2651 SelectionDAG &DAG) const {
2652 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2653
2654 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2655 // global base reg.
2656 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2657
2658 unsigned WrapperKind = M68kISD::Wrapper;
2659 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2660 WrapperKind = M68kISD::WrapperPC;
2661 }
2662
2663 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2665 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2666
2667 SDLoc DL(CP);
2668 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2669
2670 // With PIC, the address is actually $g + Offset.
2672 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2673 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2674 Result);
2675 }
2676
2677 return Result;
2678}
2679
2680SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2681 SelectionDAG &DAG) const {
2682 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2683
2684 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2685 // global base reg.
2687 unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2688
2689 unsigned WrapperKind = M68kISD::Wrapper;
2690 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2691 WrapperKind = M68kISD::WrapperPC;
2692 }
2693
2694 auto PtrVT = getPointerTy(DAG.getDataLayout());
2695 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2696
2697 SDLoc DL(Op);
2698 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2699
2700 // With PIC, the address is actually $g + Offset.
2702 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2703 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2704 Result);
2705 }
2706
2707 // For symbols that require a load from a stub to get the address, emit the
2708 // load.
2709 if (M68kII::isGlobalStubReference(OpFlag)) {
2710 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2712 }
2713
2714 return Result;
2715}
2716
2717SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2718 SelectionDAG &DAG) const {
2719 unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2720 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2721 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2722 SDLoc DL(Op);
2723 auto PtrVT = getPointerTy(DAG.getDataLayout());
2724
2725 // Create the TargetBlockAddressAddress node.
2726 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2727
2728 if (M68kII::isPCRelBlockReference(OpFlags)) {
2729 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2730 } else {
2731 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2732 }
2733
2734 // With PIC, the address is actually $g + Offset.
2735 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2736 Result =
2737 DAG.getNode(ISD::ADD, DL, PtrVT,
2738 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2739 }
2740
2741 return Result;
2742}
2743
2744SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2745 const SDLoc &DL, int64_t Offset,
2746 SelectionDAG &DAG) const {
2747 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2748 auto PtrVT = getPointerTy(DAG.getDataLayout());
2749
2750 // Create the TargetGlobalAddress node, folding in the constant
2751 // offset if it is legal.
2753 if (M68kII::isDirectGlobalReference(OpFlags)) {
2754 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2755 Offset = 0;
2756 } else {
2757 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2758 }
2759
2760 if (M68kII::isPCRelGlobalReference(OpFlags))
2761 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2762 else
2763 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2764
2765 // With PIC, the address is actually $g + Offset.
2766 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2767 Result =
2768 DAG.getNode(ISD::ADD, DL, PtrVT,
2769 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2770 }
2771
2772 // For globals that require a load from a stub to get the address, emit the
2773 // load.
2774 if (M68kII::isGlobalStubReference(OpFlags)) {
2775 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2777 }
2778
2779 // If there was a non-zero offset that we didn't fold, create an explicit
2780 // addition for it.
2781 if (Offset != 0) {
2782 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2783 DAG.getConstant(Offset, DL, PtrVT));
2784 }
2785
2786 return Result;
2787}
2788
2789SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2790 SelectionDAG &DAG) const {
2791 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2792 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2793 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2794}
2795
2796//===----------------------------------------------------------------------===//
2797// Custom Lower Jump Table
2798//===----------------------------------------------------------------------===//
2799
2800SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2801 SelectionDAG &DAG) const {
2802 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2803
2804 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2805 // global base reg.
2806 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2807
2808 unsigned WrapperKind = M68kISD::Wrapper;
2809 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2810 WrapperKind = M68kISD::WrapperPC;
2811 }
2812
2813 auto PtrVT = getPointerTy(DAG.getDataLayout());
2814 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2815 SDLoc DL(JT);
2816 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2817
2818 // With PIC, the address is actually $g + Offset.
2820 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2821 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2822 Result);
2823 }
2824
2825 return Result;
2826}
2827
2829 return Subtarget.getJumpTableEncoding();
2830}
2831
2833 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2834 unsigned uid, MCContext &Ctx) const {
2836}
2837
2839 SelectionDAG &DAG) const {
2841 return DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(),
2843
2844 // MachineJumpTableInfo::EK_LabelDifference32 entry
2845 return Table;
2846}
2847
2848// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2850 const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2851 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2852}
2853
2856 if (Constraint.size() > 0) {
2857 switch (Constraint[0]) {
2858 case 'a':
2859 case 'd':
2860 return C_RegisterClass;
2861 case 'I':
2862 case 'J':
2863 case 'K':
2864 case 'L':
2865 case 'M':
2866 case 'N':
2867 case 'O':
2868 case 'P':
2869 return C_Immediate;
2870 case 'C':
2871 if (Constraint.size() == 2)
2872 switch (Constraint[1]) {
2873 case '0':
2874 case 'i':
2875 case 'j':
2876 return C_Immediate;
2877 default:
2878 break;
2879 }
2880 break;
2881 case 'Q':
2882 case 'U':
2883 return C_Memory;
2884 default:
2885 break;
2886 }
2887 }
2888
2889 return TargetLowering::getConstraintType(Constraint);
2890}
2891
2893 StringRef Constraint,
2894 std::vector<SDValue> &Ops,
2895 SelectionDAG &DAG) const {
2896 SDValue Result;
2897
2898 if (Constraint.size() == 1) {
2899 // Constant constraints
2900 switch (Constraint[0]) {
2901 case 'I':
2902 case 'J':
2903 case 'K':
2904 case 'L':
2905 case 'M':
2906 case 'N':
2907 case 'O':
2908 case 'P': {
2909 auto *C = dyn_cast<ConstantSDNode>(Op);
2910 if (!C)
2911 return;
2912
2913 int64_t Val = C->getSExtValue();
2914 switch (Constraint[0]) {
2915 case 'I': // constant integer in the range [1,8]
2916 if (Val > 0 && Val <= 8)
2917 break;
2918 return;
2919 case 'J': // constant signed 16-bit integer
2920 if (isInt<16>(Val))
2921 break;
2922 return;
2923 case 'K': // constant that is NOT in the range of [-0x80, 0x80)
2924 if (Val < -0x80 || Val >= 0x80)
2925 break;
2926 return;
2927 case 'L': // constant integer in the range [-8,-1]
2928 if (Val < 0 && Val >= -8)
2929 break;
2930 return;
2931 case 'M': // constant that is NOT in the range of [-0x100, 0x100]
2932 if (Val < -0x100 || Val >= 0x100)
2933 break;
2934 return;
2935 case 'N': // constant integer in the range [24,31]
2936 if (Val >= 24 && Val <= 31)
2937 break;
2938 return;
2939 case 'O': // constant integer 16
2940 if (Val == 16)
2941 break;
2942 return;
2943 case 'P': // constant integer in the range [8,15]
2944 if (Val >= 8 && Val <= 15)
2945 break;
2946 return;
2947 default:
2948 llvm_unreachable("Unhandled constant constraint");
2949 }
2950
2951 Result = DAG.getSignedTargetConstant(Val, SDLoc(Op), Op.getValueType());
2952 break;
2953 }
2954 default:
2955 break;
2956 }
2957 }
2958
2959 if (Constraint.size() == 2) {
2960 switch (Constraint[0]) {
2961 case 'C':
2962 // Constant constraints start with 'C'
2963 switch (Constraint[1]) {
2964 case '0':
2965 case 'i':
2966 case 'j': {
2967 auto *C = dyn_cast<ConstantSDNode>(Op);
2968 if (!C)
2969 break;
2970
2971 int64_t Val = C->getSExtValue();
2972 switch (Constraint[1]) {
2973 case '0': // constant integer 0
2974 if (!Val)
2975 break;
2976 return;
2977 case 'i': // constant integer
2978 break;
2979 case 'j': // integer constant that doesn't fit in 16 bits
2980 if (!isInt<16>(C->getSExtValue()))
2981 break;
2982 return;
2983 default:
2984 llvm_unreachable("Unhandled constant constraint");
2985 }
2986
2987 Result = DAG.getSignedTargetConstant(Val, SDLoc(Op), Op.getValueType());
2988 break;
2989 }
2990 default:
2991 break;
2992 }
2993 break;
2994 default:
2995 break;
2996 }
2997 }
2998
2999 if (Result.getNode()) {
3000 Ops.push_back(Result);
3001 return;
3002 }
3003
3004 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3005}
3006
3007std::pair<unsigned, const TargetRegisterClass *>
3009 StringRef Constraint,
3010 MVT VT) const {
3011 if (Constraint.size() == 1) {
3012 switch (Constraint[0]) {
3013 case 'r':
3014 case 'd':
3015 switch (VT.SimpleTy) {
3016 case MVT::i8:
3017 return std::make_pair(0U, &M68k::DR8RegClass);
3018 case MVT::i16:
3019 return std::make_pair(0U, &M68k::DR16RegClass);
3020 case MVT::i32:
3021 return std::make_pair(0U, &M68k::DR32RegClass);
3022 default:
3023 break;
3024 }
3025 break;
3026 case 'a':
3027 switch (VT.SimpleTy) {
3028 case MVT::i16:
3029 return std::make_pair(0U, &M68k::AR16RegClass);
3030 case MVT::i32:
3031 return std::make_pair(0U, &M68k::AR32RegClass);
3032 default:
3033 break;
3034 }
3035 break;
3036 default:
3037 break;
3038 }
3039 }
3040
3041 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3042}
3043
3044/// Determines whether the callee is required to pop its own arguments.
3045/// Callee pop is necessary to support tail calls.
3046bool M68k::isCalleePop(CallingConv::ID CC, bool IsVarArg, bool GuaranteeTCO) {
3047 return CC == CallingConv::M68k_RTD && !IsVarArg;
3048}
3049
3050// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
3051// together with other CMOV pseudo-opcodes into a single basic-block with
3052// conditional jump around it.
3054 switch (MI.getOpcode()) {
3055 case M68k::CMOV8d:
3056 case M68k::CMOV16d:
3057 case M68k::CMOV32r:
3058 return true;
3059
3060 default:
3061 return false;
3062 }
3063}
3064
3065// The CCR operand of SelectItr might be missing a kill marker
3066// because there were multiple uses of CCR, and ISel didn't know
3067// which to mark. Figure out whether SelectItr should have had a
3068// kill marker, and set it if it should. Returns the correct kill
3069// marker value.
3072 const TargetRegisterInfo *TRI) {
3073 // Scan forward through BB for a use/def of CCR.
3074 MachineBasicBlock::iterator miI(std::next(SelectItr));
3075 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
3076 const MachineInstr &mi = *miI;
3077 if (mi.readsRegister(M68k::CCR, /*TRI=*/nullptr))
3078 return false;
3079 if (mi.definesRegister(M68k::CCR, /*TRI=*/nullptr))
3080 break; // Should have kill-flag - update below.
3081 }
3082
3083 // If we hit the end of the block, check whether CCR is live into a
3084 // successor.
3085 if (miI == BB->end())
3086 for (const auto *SBB : BB->successors())
3087 if (SBB->isLiveIn(M68k::CCR))
3088 return false;
3089
3090 // We found a def, or hit the end of the basic block and CCR wasn't live
3091 // out. SelectMI should have a kill flag on CCR.
3092 SelectItr->addRegisterKilled(M68k::CCR, TRI);
3093 return true;
3094}
3095
3097M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
3098 MachineBasicBlock *MBB) const {
3099 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
3100 DebugLoc DL = MI.getDebugLoc();
3101
3102 // To "insert" a SELECT_CC instruction, we actually have to insert the
3103 // diamond control-flow pattern. The incoming instruction knows the
3104 // destination vreg to set, the condition code register to branch on, the
3105 // true/false values to select between, and a branch opcode to use.
3106 const BasicBlock *BB = MBB->getBasicBlock();
3108
3109 // ThisMBB:
3110 // ...
3111 // TrueVal = ...
3112 // cmp ccX, r1, r2
3113 // bcc Copy1MBB
3114 // fallthrough --> Copy0MBB
3115 MachineBasicBlock *ThisMBB = MBB;
3117
3118 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
3119 // as described above, by inserting a MBB, and then making a PHI at the join
3120 // point to select the true and false operands of the CMOV in the PHI.
3121 //
3122 // The code also handles two different cases of multiple CMOV opcodes
3123 // in a row.
3124 //
3125 // Case 1:
3126 // In this case, there are multiple CMOVs in a row, all which are based on
3127 // the same condition setting (or the exact opposite condition setting).
3128 // In this case we can lower all the CMOVs using a single inserted MBB, and
3129 // then make a number of PHIs at the join point to model the CMOVs. The only
3130 // trickiness here, is that in a case like:
3131 //
3132 // t2 = CMOV cond1 t1, f1
3133 // t3 = CMOV cond1 t2, f2
3134 //
3135 // when rewriting this into PHIs, we have to perform some renaming on the
3136 // temps since you cannot have a PHI operand refer to a PHI result earlier
3137 // in the same block. The "simple" but wrong lowering would be:
3138 //
3139 // t2 = PHI t1(BB1), f1(BB2)
3140 // t3 = PHI t2(BB1), f2(BB2)
3141 //
3142 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
3143 // renaming is to note that on the path through BB1, t2 is really just a
3144 // copy of t1, and do that renaming, properly generating:
3145 //
3146 // t2 = PHI t1(BB1), f1(BB2)
3147 // t3 = PHI t1(BB1), f2(BB2)
3148 //
3149 // Case 2, we lower cascaded CMOVs such as
3150 //
3151 // (CMOV (CMOV F, T, cc1), T, cc2)
3152 //
3153 // to two successives branches.
3154 MachineInstr *CascadedCMOV = nullptr;
3155 MachineInstr *LastCMOV = &MI;
3156 M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
3159 std::next(MachineBasicBlock::iterator(MI));
3160
3161 // Check for case 1, where there are multiple CMOVs with the same condition
3162 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
3163 // number of jumps the most.
3164
3165 if (isCMOVPseudo(MI)) {
3166 // See if we have a string of CMOVS with the same condition.
3167 while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
3168 (NextMIIt->getOperand(3).getImm() == CC ||
3169 NextMIIt->getOperand(3).getImm() == OppCC)) {
3170 LastCMOV = &*NextMIIt;
3171 ++NextMIIt;
3172 }
3173 }
3174
3175 // This checks for case 2, but only do this if we didn't already find
3176 // case 1, as indicated by LastCMOV == MI.
3177 if (LastCMOV == &MI && NextMIIt != MBB->end() &&
3178 NextMIIt->getOpcode() == MI.getOpcode() &&
3179 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
3180 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
3181 NextMIIt->getOperand(1).isKill()) {
3182 CascadedCMOV = &*NextMIIt;
3183 }
3184
3185 MachineBasicBlock *Jcc1MBB = nullptr;
3186
3187 // If we have a cascaded CMOV, we lower it to two successive branches to
3188 // the same block. CCR is used by both, so mark it as live in the second.
3189 if (CascadedCMOV) {
3190 Jcc1MBB = F->CreateMachineBasicBlock(BB);
3191 F->insert(It, Jcc1MBB);
3192 Jcc1MBB->addLiveIn(M68k::CCR);
3193 }
3194
3195 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
3196 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
3197 F->insert(It, Copy0MBB);
3198 F->insert(It, SinkMBB);
3199
3200 // Set the call frame size on entry to the new basic blocks.
3201 unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
3202 Copy0MBB->setCallFrameSize(CallFrameSize);
3203 SinkMBB->setCallFrameSize(CallFrameSize);
3204
3205 // If the CCR register isn't dead in the terminator, then claim that it's
3206 // live into the sink and copy blocks.
3207 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3208
3209 MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
3210 if (!LastCCRSUser->killsRegister(M68k::CCR, /*TRI=*/nullptr) &&
3211 !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
3212 Copy0MBB->addLiveIn(M68k::CCR);
3213 SinkMBB->addLiveIn(M68k::CCR);
3214 }
3215
3216 // Transfer the remainder of MBB and its successor edges to SinkMBB.
3217 SinkMBB->splice(SinkMBB->begin(), MBB,
3218 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
3220
3221 // Add the true and fallthrough blocks as its successors.
3222 if (CascadedCMOV) {
3223 // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
3224 MBB->addSuccessor(Jcc1MBB);
3225
3226 // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
3227 // jump to the SinkMBB.
3228 Jcc1MBB->addSuccessor(Copy0MBB);
3229 Jcc1MBB->addSuccessor(SinkMBB);
3230 } else {
3231 MBB->addSuccessor(Copy0MBB);
3232 }
3233
3234 // The true block target of the first (or only) branch is always SinkMBB.
3235 MBB->addSuccessor(SinkMBB);
3236
3237 // Create the conditional branch instruction.
3238 unsigned Opc = M68k::GetCondBranchFromCond(CC);
3239 BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
3240
3241 if (CascadedCMOV) {
3242 unsigned Opc2 = M68k::GetCondBranchFromCond(
3243 (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
3244 BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
3245 }
3246
3247 // Copy0MBB:
3248 // %FalseValue = ...
3249 // # fallthrough to SinkMBB
3250 Copy0MBB->addSuccessor(SinkMBB);
3251
3252 // SinkMBB:
3253 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
3254 // ...
3257 std::next(MachineBasicBlock::iterator(LastCMOV));
3258 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
3261
3262 // As we are creating the PHIs, we have to be careful if there is more than
3263 // one. Later CMOVs may reference the results of earlier CMOVs, but later
3264 // PHIs have to reference the individual true/false inputs from earlier PHIs.
3265 // That also means that PHI construction must work forward from earlier to
3266 // later, and that the code must maintain a mapping from earlier PHI's
3267 // destination registers, and the registers that went into the PHI.
3268
3269 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
3270 Register DestReg = MIIt->getOperand(0).getReg();
3271 Register Op1Reg = MIIt->getOperand(1).getReg();
3272 Register Op2Reg = MIIt->getOperand(2).getReg();
3273
3274 // If this CMOV we are generating is the opposite condition from
3275 // the jump we generated, then we have to swap the operands for the
3276 // PHI that is going to be generated.
3277 if (MIIt->getOperand(3).getImm() == OppCC)
3278 std::swap(Op1Reg, Op2Reg);
3279
3280 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
3281 Op1Reg = RegRewriteTable[Op1Reg].first;
3282
3283 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
3284 Op2Reg = RegRewriteTable[Op2Reg].second;
3285
3286 MIB =
3287 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
3288 .addReg(Op1Reg)
3289 .addMBB(Copy0MBB)
3290 .addReg(Op2Reg)
3291 .addMBB(ThisMBB);
3292
3293 // Add this PHI to the rewrite table.
3294 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
3295 }
3296
3297 // If we have a cascaded CMOV, the second Jcc provides the same incoming
3298 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
3299 if (CascadedCMOV) {
3300 MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
3301 // Copy the PHI result to the register defined by the second CMOV.
3302 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
3303 DL, TII->get(TargetOpcode::COPY),
3304 CascadedCMOV->getOperand(0).getReg())
3305 .addReg(MI.getOperand(0).getReg());
3306 CascadedCMOV->eraseFromParent();
3307 }
3308
3309 // Now remove the CMOV(s).
3310 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
3311 (MIIt++)->eraseFromParent();
3312
3313 return SinkMBB;
3314}
3315
3317M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
3318 MachineBasicBlock *BB) const {
3319 llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
3320}
3321
3324 MachineBasicBlock *BB) const {
3325 switch (MI.getOpcode()) {
3326 default:
3327 llvm_unreachable("Unexpected instr type to insert");
3328 case M68k::CMOV8d:
3329 case M68k::CMOV16d:
3330 case M68k::CMOV32r:
3331 return EmitLoweredSelect(MI, BB);
3332 case M68k::SALLOCA:
3333 return EmitLoweredSegAlloca(MI, BB);
3334 }
3335}
3336
3337SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3339 auto PtrVT = getPointerTy(MF.getDataLayout());
3341
3342 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3343 SDLoc DL(Op);
3344
3345 // vastart just stores the address of the VarArgsFrameIndex slot into the
3346 // memory location argument.
3347 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3348 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
3349 MachinePointerInfo(SV));
3350}
3351
3352SDValue M68kTargetLowering::LowerATOMICFENCE(SDValue Op,
3353 SelectionDAG &DAG) const {
3354 // Lower to a memory barrier created from inline asm.
3355 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3356 LLVMContext &Ctx = *DAG.getContext();
3357
3358 const unsigned Flags = InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore |
3360 const SDValue AsmOperands[4] = {
3361 Op.getOperand(0), // Input chain
3363 "", TLI.getProgramPointerTy(
3364 DAG.getDataLayout())), // Empty inline asm string
3365 DAG.getMDNode(MDNode::get(Ctx, {})), // (empty) srcloc
3366 DAG.getTargetConstant(Flags, SDLoc(Op),
3367 TLI.getPointerTy(DAG.getDataLayout())), // Flags
3368 };
3369
3370 return DAG.getNode(ISD::INLINEASM, SDLoc(Op),
3371 DAG.getVTList(MVT::Other, MVT::Glue), AsmOperands);
3372}
3373
3374// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
3375// Calls to _alloca are needed to probe the stack when allocating more than 4k
3376// bytes in one go. Touching the stack at 4K increments is necessary to ensure
3377// that the guard pages used by the OS virtual memory manager are allocated in
3378// correct sequence.
3379SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3380 SelectionDAG &DAG) const {
3382 bool SplitStack = MF.shouldSplitStack();
3383
3384 SDLoc DL(Op);
3385
3386 // Get the inputs.
3387 SDNode *Node = Op.getNode();
3388 SDValue Chain = Op.getOperand(0);
3389 SDValue Size = Op.getOperand(1);
3390 unsigned Align = Op.getConstantOperandVal(2);
3391 EVT VT = Node->getValueType(0);
3392
3393 // Chain the dynamic stack allocation so that it doesn't modify the stack
3394 // pointer when other instructions are using the stack.
3395 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3396
3398 if (SplitStack) {
3399 auto &MRI = MF.getRegInfo();
3400 auto SPTy = getPointerTy(DAG.getDataLayout());
3401 auto *ARClass = getRegClassFor(SPTy);
3402 Register Vreg = MRI.createVirtualRegister(ARClass);
3403 Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3404 Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3405 DAG.getRegister(Vreg, SPTy));
3406 } else {
3407 auto &TLI = DAG.getTargetLoweringInfo();
3409 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
3410 " not tell us which reg is the stack pointer!");
3411
3412 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3413 Chain = SP.getValue(1);
3414 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3415 unsigned StackAlign = TFI.getStackAlignment();
3416 Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3417 if (Align > StackAlign)
3418 Result = DAG.getNode(ISD::AND, DL, VT, Result,
3419 DAG.getSignedConstant(-(uint64_t)Align, DL, VT));
3420 Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3421 }
3422
3423 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
3424
3425 SDValue Ops[2] = {Result, Chain};
3426 return DAG.getMergeValues(Ops, DL);
3427}
3428
3429SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
3430 SelectionDAG &DAG) const {
3431 SDLoc DL(Op);
3432 SDValue Lo = Op.getOperand(0);
3433 SDValue Hi = Op.getOperand(1);
3434 SDValue Shamt = Op.getOperand(2);
3435 EVT VT = Lo.getValueType();
3436
3437 // if Shamt - register size < 0: // Shamt < register size
3438 // Lo = Lo << Shamt
3439 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
3440 // else:
3441 // Lo = 0
3442 // Hi = Lo << (Shamt - register size)
3443
3444 SDValue Zero = DAG.getConstant(0, DL, VT);
3445 SDValue One = DAG.getConstant(1, DL, VT);
3446 SDValue MinusRegisterSize = DAG.getSignedConstant(-32, DL, VT);
3447 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3448 SDValue ShamtMinusRegisterSize =
3449 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3450 SDValue RegisterSizeMinus1Shamt =
3451 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3452
3453 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3454 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3455 SDValue ShiftRightLo =
3456 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
3457 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3458 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3459 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
3460
3461 SDValue CC =
3462 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3463
3464 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3465 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3466
3467 return DAG.getMergeValues({Lo, Hi}, DL);
3468}
3469
3470SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3471 bool IsSRA) const {
3472 SDLoc DL(Op);
3473 SDValue Lo = Op.getOperand(0);
3474 SDValue Hi = Op.getOperand(1);
3475 SDValue Shamt = Op.getOperand(2);
3476 EVT VT = Lo.getValueType();
3477
3478 // SRA expansion:
3479 // if Shamt - register size < 0: // Shamt < register size
3480 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3481 // Hi = Hi >>s Shamt
3482 // else:
3483 // Lo = Hi >>s (Shamt - register size);
3484 // Hi = Hi >>s (register size - 1)
3485 //
3486 // SRL expansion:
3487 // if Shamt - register size < 0: // Shamt < register size
3488 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3489 // Hi = Hi >>u Shamt
3490 // else:
3491 // Lo = Hi >>u (Shamt - register size);
3492 // Hi = 0;
3493
3494 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3495
3496 SDValue Zero = DAG.getConstant(0, DL, VT);
3497 SDValue One = DAG.getConstant(1, DL, VT);
3498 SDValue MinusRegisterSize = DAG.getSignedConstant(-32, DL, VT);
3499 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3500 SDValue ShamtMinusRegisterSize =
3501 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3502 SDValue RegisterSizeMinus1Shamt =
3503 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3504
3505 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3506 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3507 SDValue ShiftLeftHi =
3508 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
3509 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3510 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3511 SDValue LoFalse =
3512 DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
3513 SDValue HiFalse =
3514 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
3515
3516 SDValue CC =
3517 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3518
3519 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3520 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3521
3522 return DAG.getMergeValues({Lo, Hi}, DL);
3523}
3524
3525//===----------------------------------------------------------------------===//
3526// DAG Combine
3527//===----------------------------------------------------------------------===//
3528
3530 SelectionDAG &DAG) {
3531 return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3532 DAG.getConstant(Cond, dl, MVT::i8), CCR);
3533}
3534// When legalizing carry, we create carries via add X, -1
3535// If that comes from an actual carry, via setcc, we use the
3536// carry directly.
3538 if (CCR.getOpcode() == M68kISD::ADD) {
3539 if (isAllOnesConstant(CCR.getOperand(1))) {
3540 SDValue Carry = CCR.getOperand(0);
3541 while (Carry.getOpcode() == ISD::TRUNCATE ||
3542 Carry.getOpcode() == ISD::ZERO_EXTEND ||
3543 Carry.getOpcode() == ISD::SIGN_EXTEND ||
3544 Carry.getOpcode() == ISD::ANY_EXTEND ||
3545 (Carry.getOpcode() == ISD::AND &&
3546 isOneConstant(Carry.getOperand(1))))
3547 Carry = Carry.getOperand(0);
3548 if (Carry.getOpcode() == M68kISD::SETCC ||
3549 Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3550 if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3551 return Carry.getOperand(1);
3552 }
3553 }
3554 }
3555
3556 return SDValue();
3557}
3558
3559/// Optimize a CCR definition used according to the condition code \p CC into
3560/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3561/// of chain values.
3563 SelectionDAG &DAG,
3564 const M68kSubtarget &Subtarget) {
3565 if (CC == M68k::COND_CS)
3566 if (SDValue Flags = combineCarryThroughADD(CCR))
3567 return Flags;
3568
3569 return SDValue();
3570}
3571
3572// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3574 const M68kSubtarget &Subtarget) {
3575 SDLoc DL(N);
3576 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3577 SDValue CCR = N->getOperand(1);
3578
3579 // Try to simplify the CCR and condition code operands.
3580 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3581 return getSETCC(CC, Flags, DL, DAG);
3582
3583 return SDValue();
3584}
3586 const M68kSubtarget &Subtarget) {
3587 SDLoc DL(N);
3588 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3589 SDValue CCR = N->getOperand(3);
3590
3591 // Try to simplify the CCR and condition code operands.
3592 // Make sure to not keep references to operands, as combineSetCCCCR can
3593 // RAUW them under us.
3594 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3595 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
3596 return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3597 N->getOperand(1), Cond, Flags);
3598 }
3599
3600 return SDValue();
3601}
3602
3604 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3605 MVT VT = N->getSimpleValueType(0);
3606 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3607 return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3608 N->getOperand(1), Flags);
3609 }
3610
3611 return SDValue();
3612}
3613
3614// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3617 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3618 MVT VT = N->getSimpleValueType(0);
3619 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3620 return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3621 N->getOperand(1), Flags);
3622 }
3623
3624 return SDValue();
3625}
3626
3627SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3628 DAGCombinerInfo &DCI) const {
3629 SelectionDAG &DAG = DCI.DAG;
3630 switch (N->getOpcode()) {
3631 case M68kISD::SUBX:
3632 return combineSUBX(N, DAG);
3633 case M68kISD::ADDX:
3634 return combineADDX(N, DAG, DCI);
3635 case M68kISD::SETCC:
3636 return combineM68kSetCC(N, DAG, Subtarget);
3637 case M68kISD::BRCOND:
3638 return combineM68kBrCond(N, DAG, Subtarget);
3639 }
3640
3641 return SDValue();
3642}
3643
3645 bool IsVarArg) const {
3646 if (Return)
3647 return RetCC_M68k_C;
3648 else
3649 return CC_M68k_C;
3650}
unsigned const MachineRegisterInfo * MRI
static SDValue getSETCC(AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL, SelectionDAG &DAG)
Helper function to create 'CSET', which is equivalent to 'CSINC <Wd>, WZR, WZR, invert(<cond>)'.
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
return RetTy
uint64_t Addr
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
This file contains the custom routines for the M68k Calling Convention that aren't done by tablegen.
static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
static void lowerOverflowArithmetic(SDValue Op, SelectionDAG &DAG, SDValue &Result, SDValue &CCR, unsigned &CC)
static SDValue combineADDX(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc)
Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes each of which has no other use...
static bool hasNonFlagsUse(SDValue Op)
return true if Op has a use that doesn't just read flags.
static bool isM68kCCUnsigned(unsigned M68kCC)
Return true if the condition is an unsigned comparison operation.
static StructReturnType callIsStructReturn(const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isXor1OfSetCC(SDValue Op)
Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the SETCC node has a single use...
static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Result of 'and' is compared against zero. Change to a BTST node if possible.
static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode)
static StructReturnType argsAreStructReturn(const SmallVectorImpl< ISD::InputArg > &Ins)
Determines whether a function uses struct return semantics.
static bool isCMOVPseudo(MachineInstr &MI)
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool isM68kLogicalCmp(SDValue Op)
Return true if opcode is a M68k logical comparison.
static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
Optimize a CCR definition used according to the condition code CC into a simpler CCR value,...
static SDValue combineCarryThroughADD(SDValue CCR)
static bool isOverflowArithmetic(unsigned Opcode)
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)
Return true if the given stack call argument is already available in the same position (relatively) o...
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Create a BTST (Bit Test) node - Test bit BitNo in Src and set condition according to equal/not-equal ...
StructReturnType
@ NotStructReturn
@ RegStructReturn
@ StackStructReturn
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG)
static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG)
static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL, bool IsFP, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG)
Do a one-to-one translation of a ISD::CondCode to the M68k-specific condition code,...
This file defines the interfaces that M68k uses to lower LLVM code into a selection DAG.
This file contains the declarations of the M68k MCAsmInfo properties.
This file declares the M68k specific subclass of MachineFunctionInfo.
This file declares the M68k specific subclass of TargetSubtargetInfo.
This file declares the M68k specific subclass of TargetMachine.
This file contains declarations for M68k ELF object file lowering.
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
Register const TargetRegisterInfo * TRI
#define T1
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
#define OP(OPC)
Definition: Instruction.h:46
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:296
This class represents an incoming formal argument to a Function.
Definition: Argument.h:32
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
The address of a basic block.
Definition: Constants.h:899
CCState - This class holds information needed while lowering arguments and return values.
static LLVM_ABI bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:124
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
iterator end()
Definition: DenseMap.h:87
iterator_range< arg_iterator > args()
Definition: Function.h:890
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:762
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:703
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:270
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:687
const GlobalValue * getGlobal() const
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:280
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:663
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesToPopOnReturn(unsigned bytes)
void setArgumentStackSize(unsigned size)
unsigned char classifyExternalReference(const Module &M) const
Classify a external variable reference for the current subtarget according to how we should reference...
unsigned char classifyBlockAddressReference() const
Classify a blockaddress reference for the current subtarget according to how we should reference it i...
unsigned getSlotSize() const
getSlotSize - Stack slot size in bytes.
const M68kInstrInfo * getInstrInfo() const override
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
Classify a global variable reference for the current subtarget according to how we should reference i...
unsigned getJumpTableEncoding() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
const M68kRegisterInfo * getRegisterInfo() const override
bool atLeastM68020() const
Definition: M68kSubtarget.h:89
unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, const Module &M) const
Classify a global function reference for the current subtarget.
bool isTargetELF() const
const M68kFrameLowering * getFrameLowering() const override
ConstraintType getConstraintType(StringRef ConstraintStr) const override
Given a constraint, return the type of constraint it is for this target.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
CCAssignFn * getCCAssignFn(CallingConv::ID CC, bool Return, bool IsVarArg) const
M68kTargetLowering(const M68kTargetMachine &TM, const M68kSubtarget &STI)
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.h:214
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1565
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
void setObjectSExt(int ObjectIdx, bool IsSExt)
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:72
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Class to represent pointers.
Definition: DerivedTypes.h:700
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:61
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:229
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:813
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:504
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:839
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:498
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:719
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:499
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:707
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:808
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:493
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:511
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:777
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:581
bool empty() const
Definition: SmallVector.h:82
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:154
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:68
R Default(T Value)
Definition: StringSwitch.h:177
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
TargetOptions Options
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:232
LLVM Value Representation.
Definition: Value.h:75
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:439
use_iterator use_begin()
Definition: Value.h:364
self_iterator getIterator()
Definition: ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ M68k_INTR
Used for M68k interrupt routines.
Definition: CallingConv.h:235
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ M68k_RTD
Used for M68k rtd-based CC (similar to X86's stdcall).
Definition: CallingConv.h:252
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:801
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:256
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1236
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1232
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:270
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1379
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:765
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1265
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1381
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1382
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:835
@ GlobalAddress
Definition: ISDOpcodes.h:88
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1343
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:275
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1377
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:975
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1378
@ GlobalTLSAddress
Definition: ISDOpcodes.h:89
@ FrameIndex
Definition: ISDOpcodes.h:90
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:826
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:1157
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:809
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1187
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:347
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1380
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1166
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:778
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1261
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:225
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:343
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:695
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:756
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1375
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:832
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:793
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1358
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1383
@ SMULO
Same for multiplication.
Definition: ISDOpcodes.h:351
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1151
@ ConstantPool
Definition: ISDOpcodes.h:92
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1373
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1374
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:730
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:299
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1372
@ ExternalSymbol
Definition: ISDOpcodes.h:93
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1204
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:838
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1256
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1180
@ BlockAddress
Definition: ISDOpcodes.h:94
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:815
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:62
@ AssertZext
Definition: ISDOpcodes.h:63
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1685
static bool isPCRelBlockReference(unsigned char Flag)
Return True if the Block is referenced using PC.
Definition: M68kBaseInfo.h:243
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
Return true if the specified global value reference is relative to a 32-bit PIC base (M68kISD::GLOBAL...
Definition: M68kBaseInfo.h:221
static bool isGlobalStubReference(unsigned char TargetFlag)
Return true if the specified TargetFlag operand is a reference to a stub for a global,...
Definition: M68kBaseInfo.h:195
static bool isPCRelGlobalReference(unsigned char Flag)
Return True if the specified GlobalValue requires PC addressing mode.
Definition: M68kBaseInfo.h:232
@ MO_TLSLDM
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Definition: M68kBaseInfo.h:177
@ MO_TLSLE
On a symbol operand, this indicates that the immediate is the offset to the variable within in the th...
Definition: M68kBaseInfo.h:189
@ MO_TLSGD
On a symbol operand, this indicates that the immediate is the offset to the slot in GOT which stores ...
Definition: M68kBaseInfo.h:165
@ MO_GOTPCREL
On a symbol operand this indicates that the immediate is offset to the GOT entry for the symbol name ...
Definition: M68kBaseInfo.h:153
@ MO_TLSIE
On a symbol operand, this indicates that the immediate is the offset to the variable within the threa...
Definition: M68kBaseInfo.h:183
@ MO_TLSLD
On a symbol operand, this indicates that the immediate is the offset to variable within the thread lo...
Definition: M68kBaseInfo.h:171
static bool isDirectGlobalReference(unsigned char Flag)
Return True if the specified GlobalValue is a direct reference for a symbol.
Definition: M68kBaseInfo.h:207
static bool IsSETCC(unsigned SETCC)
static unsigned GetCondBranchFromCond(M68k::CondCode CC)
Definition: M68kInstrInfo.h:97
bool isCalleePop(CallingConv::ID CallingConv, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
static M68k::CondCode GetOppositeBranchCondition(M68k::CondCode CC)
Definition: M68kInstrInfo.h:58
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition: MathExtras.h:355
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
@ Mod
The access may modify the value stored in memory.
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:380
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
Definition: ValueTypes.h:303
Describes a register that needs to be forwarded from the prologue to a musttail call.
Custom state to propagate llvm type info to register CC assigner.
Matching combinators.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)