LLVM 22.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
73 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
74 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
75 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
76
77 if (!STI.hasGotox())
78 setOperationAction(ISD::BRIND, MVT::Other, Expand);
79
80 setOperationAction(ISD::TRAP, MVT::Other, Custom);
81
83 if (STI.hasGotox())
85
86 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
87 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
88 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
98 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
99 }
100
101 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
102 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
103 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
104 setOperationAction(ISD::ATOMIC_SWAP, VT, Custom);
105 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
109 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
110 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
145 setOperationAction(ISD::BR_CC, MVT::i32,
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209}
210
212 return false;
213}
214
215bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
216 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
217 return false;
218 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
219 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
220 return NumBits1 > NumBits2;
221}
222
223bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
224 if (!VT1.isInteger() || !VT2.isInteger())
225 return false;
226 unsigned NumBits1 = VT1.getSizeInBits();
227 unsigned NumBits2 = VT2.getSizeInBits();
228 return NumBits1 > NumBits2;
229}
230
231bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
232 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
233 return false;
234 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
235 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
236 return NumBits1 == 32 && NumBits2 == 64;
237}
238
239bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
240 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
241 return false;
242 unsigned NumBits1 = VT1.getSizeInBits();
243 unsigned NumBits2 = VT2.getSizeInBits();
244 return NumBits1 == 32 && NumBits2 == 64;
245}
246
247bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
248 EVT VT1 = Val.getValueType();
249 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
250 MVT MT1 = VT1.getSimpleVT().SimpleTy;
251 MVT MT2 = VT2.getSimpleVT().SimpleTy;
252 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
253 (MT2 == MVT::i32 || MT2 == MVT::i64))
254 return true;
255 }
256 return TargetLoweringBase::isZExtFree(Val, VT2);
257}
258
262
265 if (Constraint.size() == 1) {
266 switch (Constraint[0]) {
267 default:
268 break;
269 case 'w':
270 return C_RegisterClass;
271 }
272 }
273
274 return TargetLowering::getConstraintType(Constraint);
275}
276
277std::pair<unsigned, const TargetRegisterClass *>
279 StringRef Constraint,
280 MVT VT) const {
281 if (Constraint.size() == 1) {
282 // GCC Constraint Letters
283 switch (Constraint[0]) {
284 case 'r': // GENERAL_REGS
285 return std::make_pair(0U, &BPF::GPRRegClass);
286 case 'w':
287 if (HasAlu32)
288 return std::make_pair(0U, &BPF::GPR32RegClass);
289 break;
290 default:
291 break;
292 }
293 }
294
296}
297
298void BPFTargetLowering::ReplaceNodeResults(
300 const char *Msg;
301 uint32_t Opcode = N->getOpcode();
302 switch (Opcode) {
303 default:
304 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
305 case ISD::ATOMIC_LOAD_ADD:
306 case ISD::ATOMIC_LOAD_AND:
307 case ISD::ATOMIC_LOAD_OR:
308 case ISD::ATOMIC_LOAD_XOR:
309 case ISD::ATOMIC_SWAP:
310 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
311 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
312 Msg = "unsupported atomic operation, please use 32/64 bit version";
313 else
314 Msg = "unsupported atomic operation, please use 64 bit version";
315 break;
316 case ISD::ATOMIC_LOAD:
317 case ISD::ATOMIC_STORE:
318 return;
319 }
320
321 SDLoc DL(N);
322 // We'll still produce a fatal error downstream, but this diagnostic is more
323 // user-friendly.
324 fail(DL, DAG, Msg);
325}
326
328 switch (Op.getOpcode()) {
329 default:
330 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
331 case ISD::BR_CC:
332 return LowerBR_CC(Op, DAG);
333 case ISD::JumpTable:
334 return LowerJumpTable(Op, DAG);
336 return LowerGlobalAddress(Op, DAG);
338 return LowerConstantPool(Op, DAG);
340 return LowerBlockAddress(Op, DAG);
341 case ISD::SELECT_CC:
342 return LowerSELECT_CC(Op, DAG);
343 case ISD::SDIV:
344 case ISD::SREM:
345 return LowerSDIVSREM(Op, DAG);
346 case ISD::DYNAMIC_STACKALLOC:
347 return LowerDYNAMIC_STACKALLOC(Op, DAG);
348 case ISD::ATOMIC_LOAD:
349 case ISD::ATOMIC_STORE:
350 return LowerATOMIC_LOAD_STORE(Op, DAG);
351 case ISD::TRAP:
352 return LowerTRAP(Op, DAG);
353 }
354}
355
356// Calling Convention Implementation
357#include "BPFGenCallingConv.inc"
358
359SDValue BPFTargetLowering::LowerFormalArguments(
360 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
361 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
362 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
363 switch (CallConv) {
364 default:
365 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
366 case CallingConv::C:
368 break;
369 }
370
373
374 // Assign locations to all of the incoming arguments.
376 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
377 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
378
379 bool HasMemArgs = false;
380 for (size_t I = 0; I < ArgLocs.size(); ++I) {
381 auto &VA = ArgLocs[I];
382
383 if (VA.isRegLoc()) {
384 // Arguments passed in registers
385 EVT RegVT = VA.getLocVT();
386 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
387 switch (SimpleTy) {
388 default: {
389 std::string Str;
390 {
391 raw_string_ostream OS(Str);
392 RegVT.print(OS);
393 }
394 report_fatal_error("unhandled argument type: " + Twine(Str));
395 }
396 case MVT::i32:
397 case MVT::i64:
398 Register VReg = RegInfo.createVirtualRegister(
399 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
400 RegInfo.addLiveIn(VA.getLocReg(), VReg);
401 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
402
403 // If this is an value that has been promoted to wider types, insert an
404 // assert[sz]ext to capture this, then truncate to the right size.
405 if (VA.getLocInfo() == CCValAssign::SExt)
406 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
407 DAG.getValueType(VA.getValVT()));
408 else if (VA.getLocInfo() == CCValAssign::ZExt)
409 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
410 DAG.getValueType(VA.getValVT()));
411
412 if (VA.getLocInfo() != CCValAssign::Full)
413 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
414
415 InVals.push_back(ArgValue);
416
417 break;
418 }
419 } else {
420 if (VA.isMemLoc())
421 HasMemArgs = true;
422 else
423 report_fatal_error("unhandled argument location");
424 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
425 }
426 }
427 if (HasMemArgs)
428 fail(DL, DAG, "stack arguments are not supported");
429 if (IsVarArg)
430 fail(DL, DAG, "variadic functions are not supported");
431 if (MF.getFunction().hasStructRetAttr())
432 fail(DL, DAG, "aggregate returns are not supported");
433
434 return Chain;
435}
436
437const size_t BPFTargetLowering::MaxArgs = 5;
438
439static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
440 MCRegister Reg) {
441 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
442 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
443}
444
446 MachineFunction &MF,
447 const uint32_t *BaseRegMask) {
448 uint32_t *RegMask = MF.allocateRegMask();
449 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
450 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
451 return RegMask;
452}
453
454SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
455 SmallVectorImpl<SDValue> &InVals) const {
456 SelectionDAG &DAG = CLI.DAG;
457 auto &Outs = CLI.Outs;
458 auto &OutVals = CLI.OutVals;
459 auto &Ins = CLI.Ins;
460 SDValue Chain = CLI.Chain;
461 SDValue Callee = CLI.Callee;
462 bool &IsTailCall = CLI.IsTailCall;
463 CallingConv::ID CallConv = CLI.CallConv;
464 bool IsVarArg = CLI.IsVarArg;
465 MachineFunction &MF = DAG.getMachineFunction();
466
467 // BPF target does not support tail call optimization.
468 IsTailCall = false;
469
470 switch (CallConv) {
471 default:
472 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
474 case CallingConv::C:
475 break;
476 }
477
478 // Analyze operands of the call, assigning locations to each operand.
480 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
481
482 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
483
484 unsigned NumBytes = CCInfo.getStackSize();
485
486 if (Outs.size() > MaxArgs)
487 fail(CLI.DL, DAG, "too many arguments", Callee);
488
489 for (auto &Arg : Outs) {
490 ISD::ArgFlagsTy Flags = Arg.Flags;
491 if (!Flags.isByVal())
492 continue;
493 fail(CLI.DL, DAG, "pass by value not supported", Callee);
494 break;
495 }
496
497 auto PtrVT = getPointerTy(MF.getDataLayout());
498 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
499
500 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
501
502 // Walk arg assignments
503 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
504 CCValAssign &VA = ArgLocs[i];
505 SDValue &Arg = OutVals[i];
506
507 // Promote the value if needed.
508 switch (VA.getLocInfo()) {
509 default:
510 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
512 break;
514 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
515 break;
517 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
518 break;
520 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
521 break;
522 }
523
524 // Push arguments into RegsToPass vector
525 if (VA.isRegLoc())
526 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
527 else
528 report_fatal_error("stack arguments are not supported");
529 }
530
531 SDValue InGlue;
532
533 // Build a sequence of copy-to-reg nodes chained together with token chain and
534 // flag operands which copy the outgoing args into registers. The InGlue in
535 // necessary since all emitted instructions must be stuck together.
536 for (auto &Reg : RegsToPass) {
537 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
538 InGlue = Chain.getValue(1);
539 }
540
541 // If the callee is a GlobalAddress node (quite common, every direct call is)
542 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
543 // Likewise ExternalSymbol -> TargetExternalSymbol.
544 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
545 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
546 G->getOffset(), 0);
547 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
548 if (StringRef(E->getSymbol()) != BPF_TRAP) {
549 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
550 fail(CLI.DL, DAG,
551 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +
552 "' is not supported."));
553 }
554 }
555
556 // Returns a chain & a flag for retval copy to use.
557 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
559 Ops.push_back(Chain);
560 Ops.push_back(Callee);
561
562 // Add argument registers to the end of the list so that they are
563 // known live into the call.
564 for (auto &Reg : RegsToPass)
565 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
566
567 bool HasFastCall =
568 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
569 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
570 if (HasFastCall) {
571 uint32_t *RegMask = regMaskFromTemplate(
572 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
573 for (auto const &RegPair : RegsToPass)
574 resetRegMaskBit(TRI, RegMask, RegPair.first);
575 if (!CLI.CB->getType()->isVoidTy())
576 resetRegMaskBit(TRI, RegMask, BPF::R0);
577 Ops.push_back(DAG.getRegisterMask(RegMask));
578 } else {
579 Ops.push_back(
580 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
581 }
582
583 if (InGlue.getNode())
584 Ops.push_back(InGlue);
585
586 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
587 InGlue = Chain.getValue(1);
588
589 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
590
591 // Create the CALLSEQ_END node.
592 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
593 InGlue = Chain.getValue(1);
594
595 // Handle result values, copying them out of physregs into vregs that we
596 // return.
597 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
598 InVals);
599}
600
602BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
603 bool IsVarArg,
605 const SmallVectorImpl<SDValue> &OutVals,
606 const SDLoc &DL, SelectionDAG &DAG) const {
607 unsigned Opc = BPFISD::RET_GLUE;
608
609 // CCValAssign - represent the assignment of the return value to a location
611 MachineFunction &MF = DAG.getMachineFunction();
612
613 // CCState - Info about the registers and stack slot.
614 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
615
617 fail(DL, DAG, "aggregate returns are not supported");
618 return DAG.getNode(Opc, DL, MVT::Other, Chain);
619 }
620
621 // Analize return values.
622 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
623
624 SDValue Glue;
625 SmallVector<SDValue, 4> RetOps(1, Chain);
626
627 // Copy the result values into the output registers.
628 for (size_t i = 0; i != RVLocs.size(); ++i) {
629 CCValAssign &VA = RVLocs[i];
630 if (!VA.isRegLoc())
631 report_fatal_error("stack return values are not supported");
632
633 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
634
635 // Guarantee that all emitted copies are stuck together,
636 // avoiding something bad.
637 Glue = Chain.getValue(1);
638 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
639 }
640
641 RetOps[0] = Chain; // Update chain.
642
643 // Add the glue if we have it.
644 if (Glue.getNode())
645 RetOps.push_back(Glue);
646
647 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
648}
649
650SDValue BPFTargetLowering::LowerCallResult(
651 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
652 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
653 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
654
655 MachineFunction &MF = DAG.getMachineFunction();
656 // Assign locations to each value returned by this call.
658 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
659
660 if (Ins.size() > 1) {
661 fail(DL, DAG, "only small returns supported");
662 for (auto &In : Ins)
663 InVals.push_back(DAG.getConstant(0, DL, In.VT));
664 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1);
665 }
666
667 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
668
669 // Copy all of the result registers out of their specified physreg.
670 for (auto &Val : RVLocs) {
671 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
672 Val.getValVT(), InGlue).getValue(1);
673 InGlue = Chain.getValue(2);
674 InVals.push_back(Chain.getValue(0));
675 }
676
677 return Chain;
678}
679
681 switch (CC) {
682 default:
683 break;
684 case ISD::SETULT:
685 case ISD::SETULE:
686 case ISD::SETLT:
687 case ISD::SETLE:
689 std::swap(LHS, RHS);
690 break;
691 }
692}
693
694SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
695 SDLoc DL(Op);
696 fail(DL, DAG,
697 "unsupported signed division, please convert to unsigned div/mod.");
698 return DAG.getUNDEF(Op->getValueType(0));
699}
700
701SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
702 SelectionDAG &DAG) const {
703 SDLoc DL(Op);
704 fail(DL, DAG, "unsupported dynamic stack allocation");
705 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
706 return DAG.getMergeValues(Ops, SDLoc());
707}
708
709SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
710 SDValue Chain = Op.getOperand(0);
711 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
712 SDValue LHS = Op.getOperand(2);
713 SDValue RHS = Op.getOperand(3);
714 SDValue Dest = Op.getOperand(4);
715 SDLoc DL(Op);
716
717 if (!getHasJmpExt())
718 NegateCC(LHS, RHS, CC);
719
720 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
721 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
722}
723
724SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
725 SDValue LHS = Op.getOperand(0);
726 SDValue RHS = Op.getOperand(1);
727 SDValue TrueV = Op.getOperand(2);
728 SDValue FalseV = Op.getOperand(3);
729 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
730 SDLoc DL(Op);
731
732 if (!getHasJmpExt())
733 NegateCC(LHS, RHS, CC);
734
735 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
736 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
737
738 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
739}
740
741SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
742 SelectionDAG &DAG) const {
743 SDNode *N = Op.getNode();
744 SDLoc DL(N);
745
746 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
748 fail(DL, DAG,
749 "sequentially consistent (seq_cst) "
750 "atomic load/store is not supported");
751
752 return Op;
753}
754
756 if (auto *Fn = M->getFunction(BPF_TRAP))
757 return Fn;
758
759 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
760 Function *NewF =
762 NewF->setDSOLocal(true);
764 NewF->setSection(".ksyms");
765
766 if (M->debug_compile_units().empty())
767 return NewF;
768
769 DIBuilder DBuilder(*M);
770 DITypeRefArray ParamTypes =
771 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
772 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
773 DICompileUnit *CU = *M->debug_compile_units_begin();
774 DISubprogram *SP =
775 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
776 DINode::FlagZero, DISubprogram::SPFlagZero);
777 NewF->setSubprogram(SP);
778 return NewF;
779}
780
781SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
782 MachineFunction &MF = DAG.getMachineFunction();
783 TargetLowering::CallLoweringInfo CLI(DAG);
785 SDNode *N = Op.getNode();
786 SDLoc DL(N);
787
789 auto PtrVT = getPointerTy(MF.getDataLayout());
790 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
791 CLI.Chain = N->getOperand(0);
792 CLI.IsTailCall = false;
794 CLI.IsVarArg = false;
795 CLI.DL = DL;
796 CLI.NoMerge = false;
797 CLI.DoesNotReturn = true;
798 return LowerCall(CLI, InVals);
799}
800
801SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
802 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
803 return getAddr(N, DAG);
804}
805
806const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
807 switch ((BPFISD::NodeType)Opcode) {
809 break;
810 case BPFISD::RET_GLUE:
811 return "BPFISD::RET_GLUE";
812 case BPFISD::CALL:
813 return "BPFISD::CALL";
815 return "BPFISD::SELECT_CC";
816 case BPFISD::BR_CC:
817 return "BPFISD::BR_CC";
818 case BPFISD::Wrapper:
819 return "BPFISD::Wrapper";
820 case BPFISD::MEMCPY:
821 return "BPFISD::MEMCPY";
822 }
823 return nullptr;
824}
825
827 SelectionDAG &DAG, unsigned Flags) {
828 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
829 N->getOffset(), Flags);
830}
831
833 SelectionDAG &DAG, unsigned Flags) {
834 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
835}
836
837template <class NodeTy>
838SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
839 unsigned Flags) const {
840 SDLoc DL(N);
841
842 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
843
844 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
845}
846
847SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
848 SelectionDAG &DAG) const {
849 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
850 if (N->getOffset() != 0)
851 report_fatal_error("invalid offset for global address: " +
852 Twine(N->getOffset()));
853
854 const GlobalValue *GVal = N->getGlobal();
855 SDLoc DL(Op);
856
857 // Wrap it in a TargetGlobalAddress
858 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
859
860 // Emit pseudo instruction
861 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
862}
863
864SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
865 SelectionDAG &DAG) const {
866 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
867
868 return getAddr(N, DAG);
869}
870
871SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
872 SelectionDAG &DAG) const {
873 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
874 SDLoc DL(Op);
875
876 // Wrap it in a TargetBlockAddress
877 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
878
879 // Emit pseudo instruction
880 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
881}
882
883unsigned
884BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
885 unsigned Reg, bool isSigned) const {
886 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
887 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
888 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
889 MachineFunction *F = BB->getParent();
890 DebugLoc DL = MI.getDebugLoc();
891
892 MachineRegisterInfo &RegInfo = F->getRegInfo();
893
894 if (!isSigned) {
895 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
896 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
897 return PromotedReg0;
898 }
899 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
900 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
901 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
902 if (HasMovsx) {
903 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
904 } else {
905 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
906 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
907 .addReg(PromotedReg0).addImm(32);
908 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
909 .addReg(PromotedReg1).addImm(32);
910 }
911
912 return PromotedReg2;
913}
914
916BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
918 const {
919 MachineFunction *MF = MI.getParent()->getParent();
920 MachineRegisterInfo &MRI = MF->getRegInfo();
921 MachineInstrBuilder MIB(*MF, MI);
922 unsigned ScratchReg;
923
924 // This function does custom insertion during lowering BPFISD::MEMCPY which
925 // only has two register operands from memcpy semantics, the copy source
926 // address and the copy destination address.
927 //
928 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
929 // a third scratch register to serve as the destination register of load and
930 // source register of store.
931 //
932 // The scratch register here is with the Define | Dead | EarlyClobber flags.
933 // The EarlyClobber flag has the semantic property that the operand it is
934 // attached to is clobbered before the rest of the inputs are read. Hence it
935 // must be unique among the operands to the instruction. The Define flag is
936 // needed to coerce the machine verifier that an Undef value isn't a problem
937 // as we anyway is loading memory into it. The Dead flag is needed as the
938 // value in scratch isn't supposed to be used by any other instruction.
939 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
940 MIB.addReg(ScratchReg,
942
943 return BB;
944}
945
946MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
947 MachineInstr &MI, MachineBasicBlock *BB) const {
948 MachineFunction *MF = BB->getParent();
949 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
950 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
951 MachineRegisterInfo &RegInfo = MF->getRegInfo();
952 DebugLoc DL = MI.getDebugLoc();
953
954 // Build address taken map for Global Varaibles and BlockAddresses
955 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
956 for (MachineBasicBlock &MBB : *MF) {
957 if (const BasicBlock *BB = MBB.getBasicBlock())
958 if (BB->hasAddressTaken())
959 AddressTakenBBs[BB] = &MBB;
960 }
961
962 MachineOperand &MO = MI.getOperand(1);
963 assert(MO.isBlockAddress() || MO.isGlobal());
964
965 MCRegister ResultReg = MI.getOperand(0).getReg();
966 Register TmpReg = RegInfo.createVirtualRegister(RC);
967
968 std::vector<MachineBasicBlock *> Targets;
969 unsigned JTI;
970
971 if (MO.isBlockAddress()) {
972 auto *BA = MO.getBlockAddress();
973 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
974 assert(TgtMBB);
975
976 Targets.push_back(TgtMBB);
977 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
978 ->createJumpTableIndex(Targets);
979
980 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
981 .addJumpTableIndex(JTI);
982 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
983 .addReg(TmpReg)
984 .addImm(0);
985 MI.eraseFromParent();
986 return BB;
987 }
988
989 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
990 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
991 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
992 if (GV)
993 MIB.addGlobalAddress(GV);
994 else
995 MIB.addJumpTableIndex(JTI);
996 MI.eraseFromParent();
997 return BB;
998 };
999
1000 // Must be a global at this point
1001 const GlobalValue *GVal = MO.getGlobal();
1002 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1003
1004 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1005 !GV->isConstant() || !GV->hasInitializer())
1006 return emitLDImm64(GVal);
1007
1008 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1009 if (!CA)
1010 return emitLDImm64(GVal);
1011
1012 for (const Use &Op : CA->operands()) {
1013 if (!isa<BlockAddress>(Op))
1014 return emitLDImm64(GVal);
1015 auto *BA = cast<BlockAddress>(Op);
1016 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1017 assert(TgtMBB);
1018 Targets.push_back(TgtMBB);
1019 }
1020
1021 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1022 ->createJumpTableIndex(Targets);
1023 return emitLDImm64(nullptr, JTI);
1024}
1025
1028 MachineBasicBlock *BB) const {
1030 DebugLoc DL = MI.getDebugLoc();
1031 unsigned Opc = MI.getOpcode();
1032 bool isSelectRROp = (Opc == BPF::Select ||
1033 Opc == BPF::Select_64_32 ||
1034 Opc == BPF::Select_32 ||
1035 Opc == BPF::Select_32_64);
1036
1037 bool isMemcpyOp = Opc == BPF::MEMCPY;
1038 bool isLDimm64Op = Opc == BPF::LDIMM64;
1039
1040#ifndef NDEBUG
1041 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1042 Opc == BPF::Select_Ri_64_32 ||
1043 Opc == BPF::Select_Ri_32 ||
1044 Opc == BPF::Select_Ri_32_64);
1045
1046 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1047 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1048#endif
1049
1050 if (isMemcpyOp)
1051 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1052
1053 if (isLDimm64Op)
1054 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1055
1056 bool is32BitCmp = (Opc == BPF::Select_32 ||
1057 Opc == BPF::Select_32_64 ||
1058 Opc == BPF::Select_Ri_32 ||
1059 Opc == BPF::Select_Ri_32_64);
1060
1061 // To "insert" a SELECT instruction, we actually have to insert the diamond
1062 // control-flow pattern. The incoming instruction knows the destination vreg
1063 // to set, the condition code register to branch on, the true/false values to
1064 // select between, and a branch opcode to use.
1065 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1067
1068 // ThisMBB:
1069 // ...
1070 // TrueVal = ...
1071 // jmp_XX r1, r2 goto Copy1MBB
1072 // fallthrough --> Copy0MBB
1073 MachineBasicBlock *ThisMBB = BB;
1074 MachineFunction *F = BB->getParent();
1075 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1076 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1077
1078 F->insert(I, Copy0MBB);
1079 F->insert(I, Copy1MBB);
1080 // Update machine-CFG edges by transferring all successors of the current
1081 // block to the new block which will contain the Phi node for the select.
1082 Copy1MBB->splice(Copy1MBB->begin(), BB,
1083 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1084 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1085 // Next, add the true and fallthrough blocks as its successors.
1086 BB->addSuccessor(Copy0MBB);
1087 BB->addSuccessor(Copy1MBB);
1088
1089 // Insert Branch if Flag
1090 int CC = MI.getOperand(3).getImm();
1091 int NewCC;
1092 switch (CC) {
1093#define SET_NEWCC(X, Y) \
1094 case ISD::X: \
1095 if (is32BitCmp && HasJmp32) \
1096 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1097 else \
1098 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1099 break
1100 SET_NEWCC(SETGT, JSGT);
1101 SET_NEWCC(SETUGT, JUGT);
1102 SET_NEWCC(SETGE, JSGE);
1103 SET_NEWCC(SETUGE, JUGE);
1104 SET_NEWCC(SETEQ, JEQ);
1105 SET_NEWCC(SETNE, JNE);
1106 SET_NEWCC(SETLT, JSLT);
1107 SET_NEWCC(SETULT, JULT);
1108 SET_NEWCC(SETLE, JSLE);
1109 SET_NEWCC(SETULE, JULE);
1110 default:
1111 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1112 }
1113
1114 Register LHS = MI.getOperand(1).getReg();
1115 bool isSignedCmp = (CC == ISD::SETGT ||
1116 CC == ISD::SETGE ||
1117 CC == ISD::SETLT ||
1118 CC == ISD::SETLE);
1119
1120 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1121 // to be promoted, however if the 32-bit comparison operands are destination
1122 // registers then they are implicitly zero-extended already, there is no
1123 // need of explicit zero-extend sequence for them.
1124 //
1125 // We simply do extension for all situations in this method, but we will
1126 // try to remove those unnecessary in BPFMIPeephole pass.
1127 if (is32BitCmp && !HasJmp32)
1128 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1129
1130 if (isSelectRROp) {
1131 Register RHS = MI.getOperand(2).getReg();
1132
1133 if (is32BitCmp && !HasJmp32)
1134 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1135
1136 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1137 } else {
1138 int64_t imm32 = MI.getOperand(2).getImm();
1139 // Check before we build J*_ri instruction.
1140 if (!isInt<32>(imm32))
1141 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1142 BuildMI(BB, DL, TII.get(NewCC))
1143 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1144 }
1145
1146 // Copy0MBB:
1147 // %FalseValue = ...
1148 // # fallthrough to Copy1MBB
1149 BB = Copy0MBB;
1150
1151 // Update machine-CFG edges
1152 BB->addSuccessor(Copy1MBB);
1153
1154 // Copy1MBB:
1155 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1156 // ...
1157 BB = Copy1MBB;
1158 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1159 .addReg(MI.getOperand(5).getReg())
1160 .addMBB(Copy0MBB)
1161 .addReg(MI.getOperand(4).getReg())
1162 .addMBB(ThisMBB);
1163
1164 MI.eraseFromParent(); // The pseudo instruction is gone now.
1165 return BB;
1166}
1167
1169 EVT VT) const {
1170 return getHasAlu32() ? MVT::i32 : MVT::i64;
1171}
1172
1174 EVT VT) const {
1175 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1176}
1177
1178bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1179 const AddrMode &AM, Type *Ty,
1180 unsigned AS,
1181 Instruction *I) const {
1182 // No global is ever allowed as a base.
1183 if (AM.BaseGV)
1184 return false;
1185
1186 switch (AM.Scale) {
1187 case 0: // "r+i" or just "i", depending on HasBaseReg.
1188 break;
1189 case 1:
1190 if (!AM.HasBaseReg) // allow "r+i".
1191 break;
1192 return false; // disallow "r+r" or "r+r+i".
1193 default:
1194 return false;
1195 }
1196
1197 return true;
1198}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define G(x, y, z)
Definition MD5.cpp:56
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:934
CCState - This class holds information needed while lowering arguments and return values.
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeRefArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeRefArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeRefArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
void setCallingConv(CallingConv::ID CC)
Definition Function.h:274
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:154
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:130
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:801
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:774
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:765
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:835
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:826
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:773
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:778
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:695
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:832
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:793
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:870
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:838
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:815
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Dead
Unused definition.
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1152
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:496
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs