40#include "llvm/IR/IntrinsicsX86.h"
50#define DEBUG_TYPE "X86-isel"
56#define GET_GLOBALISEL_PREDICATE_BITSET
57#include "X86GenGlobalISel.inc"
58#undef GET_GLOBALISEL_PREDICATE_BITSET
75 Align Alignment)
const;
144#define GET_GLOBALISEL_PREDICATES_DECL
145#include "X86GenGlobalISel.inc"
146#undef GET_GLOBALISEL_PREDICATES_DECL
148#define GET_GLOBALISEL_TEMPORARIES_DECL
149#include "X86GenGlobalISel.inc"
150#undef GET_GLOBALISEL_TEMPORARIES_DECL
155#define GET_GLOBALISEL_IMPL
156#include "X86GenGlobalISel.inc"
157#undef GET_GLOBALISEL_IMPL
162 : TM(TM), STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()),
165#include
"X86GenGlobalISel.inc"
168#include
"X86GenGlobalISel.inc"
176X86InstructionSelector::getRegClass(
LLT Ty,
const RegisterBank &RB)
const {
177 if (RB.
getID() == X86::GPRRegBankID) {
179 return &X86::GR8RegClass;
181 return &X86::GR16RegClass;
183 return &X86::GR32RegClass;
185 return &X86::GR64RegClass;
187 if (RB.
getID() == X86::VECRRegBankID) {
189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
199 return &X86::VR512RegClass;
202 if (RB.
getID() == X86::PSRRegBankID) {
204 return &X86::RFP80RegClass;
206 return &X86::RFP64RegClass;
208 return &X86::RFP32RegClass;
215X86InstructionSelector::getRegClass(
LLT Ty,
Register Reg,
222 unsigned SubIdx = X86::NoSubRegister;
223 if (RC == &X86::GR32RegClass) {
224 SubIdx = X86::sub_32bit;
225 }
else if (RC == &X86::GR16RegClass) {
226 SubIdx = X86::sub_16bit;
227 }
else if (RC == &X86::GR8RegClass) {
228 SubIdx = X86::sub_8bit;
236 if (X86::GR64RegClass.
contains(Reg))
237 return &X86::GR64RegClass;
238 if (X86::GR32RegClass.
contains(Reg))
239 return &X86::GR32RegClass;
240 if (X86::GR16RegClass.
contains(Reg))
241 return &X86::GR16RegClass;
243 return &X86::GR8RegClass;
251bool X86InstructionSelector::selectDebugInstr(
MachineInstr &
I,
259 if (
Reg.isPhysical())
261 LLT Ty =
MRI.getType(Reg);
264 dyn_cast_if_present<const TargetRegisterClass *>(RegClassOrBank);
266 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
270 dbgs() <<
"Warning: DBG_VALUE operand has unexpected size/bank\n");
274 RBI.constrainGenericRegister(Reg, *RC,
MRI);
283 Register DstReg =
I.getOperand(0).getReg();
284 const unsigned DstSize = RBI.getSizeInBits(DstReg,
MRI,
TRI);
287 Register SrcReg =
I.getOperand(1).getReg();
288 const unsigned SrcSize = RBI.getSizeInBits(SrcReg,
MRI,
TRI);
292 assert(
I.isCopy() &&
"Generic operators do not allow physical registers");
294 if (DstSize > SrcSize && SrcRegBank.
getID() == X86::GPRRegBankID &&
295 DstRegBank.
getID() == X86::GPRRegBankID) {
301 if (SrcRC != DstRC) {
303 Register ExtSrc =
MRI.createVirtualRegister(DstRC);
305 TII.get(TargetOpcode::SUBREG_TO_REG))
309 .
addImm(getSubRegIndex(SrcRC));
311 I.getOperand(1).setReg(ExtSrc);
319 "No phys reg on generic operators");
320 assert((DstSize == SrcSize ||
324 DstSize <= RBI.getSizeInBits(SrcReg,
MRI,
TRI))) &&
325 "Copy with different width?!");
330 if (SrcRegBank.
getID() == X86::GPRRegBankID &&
331 DstRegBank.
getID() == X86::GPRRegBankID && SrcSize > DstSize &&
337 if (DstRC != SrcRC) {
338 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
339 I.getOperand(1).substPhysReg(SrcReg,
TRI);
348 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
354 I.setDesc(
TII.get(X86::COPY));
359 assert(
I.getParent() &&
"Instruction should be in a basic block!");
360 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
366 unsigned Opcode =
I.getOpcode();
370 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
376 if (
I.isDebugInstr())
382 assert(
I.getNumOperands() ==
I.getNumExplicitOperands() &&
383 "Generic instruction has unexpected implicit operands\n");
385 if (selectImpl(
I, *CoverageInfo))
391 switch (
I.getOpcode()) {
394 case TargetOpcode::G_STORE:
395 case TargetOpcode::G_LOAD:
397 case TargetOpcode::G_PTR_ADD:
398 case TargetOpcode::G_FRAME_INDEX:
399 return selectFrameIndexOrGep(
I,
MRI, MF);
400 case TargetOpcode::G_GLOBAL_VALUE:
401 return selectGlobalValue(
I,
MRI, MF);
402 case TargetOpcode::G_CONSTANT:
403 return selectConstant(
I,
MRI, MF);
404 case TargetOpcode::G_FCONSTANT:
405 return materializeFP(
I,
MRI, MF);
406 case TargetOpcode::G_PTRTOINT:
407 case TargetOpcode::G_TRUNC:
408 return selectTruncOrPtrToInt(
I,
MRI, MF);
409 case TargetOpcode::G_INTTOPTR:
411 case TargetOpcode::G_ZEXT:
412 return selectZext(
I,
MRI, MF);
413 case TargetOpcode::G_ANYEXT:
414 return selectAnyext(
I,
MRI, MF);
415 case TargetOpcode::G_ICMP:
416 return selectCmp(
I,
MRI, MF);
417 case TargetOpcode::G_FCMP:
418 return selectFCmp(
I,
MRI, MF);
419 case TargetOpcode::G_UADDE:
420 case TargetOpcode::G_UADDO:
421 case TargetOpcode::G_USUBE:
422 case TargetOpcode::G_USUBO:
423 return selectUAddSub(
I,
MRI, MF);
424 case TargetOpcode::G_UNMERGE_VALUES:
426 case TargetOpcode::G_MERGE_VALUES:
427 case TargetOpcode::G_CONCAT_VECTORS:
429 case TargetOpcode::G_EXTRACT:
430 return selectExtract(
I,
MRI, MF);
431 case TargetOpcode::G_INSERT:
432 return selectInsert(
I,
MRI, MF);
433 case TargetOpcode::G_BRCOND:
434 return selectCondBranch(
I,
MRI, MF);
435 case TargetOpcode::G_IMPLICIT_DEF:
436 case TargetOpcode::G_PHI:
437 return selectImplicitDefOrPHI(
I,
MRI);
438 case TargetOpcode::G_MUL:
439 case TargetOpcode::G_SMULH:
440 case TargetOpcode::G_UMULH:
441 case TargetOpcode::G_SDIV:
442 case TargetOpcode::G_UDIV:
443 case TargetOpcode::G_SREM:
444 case TargetOpcode::G_UREM:
445 return selectMulDivRem(
I,
MRI, MF);
446 case TargetOpcode::G_SELECT:
447 return selectSelect(
I,
MRI, MF);
453unsigned X86InstructionSelector::getPtrLoadStoreOp(
const LLT &Ty,
455 unsigned Opc)
const {
456 assert((
Opc == TargetOpcode::G_STORE ||
Opc == TargetOpcode::G_LOAD) &&
457 "Only G_STORE and G_LOAD are expected for selection");
459 bool IsLoad = (
Opc == TargetOpcode::G_LOAD);
464 return IsLoad ? X86::MOV32rm : X86::MOV32mr;
466 return IsLoad ? X86::MOV64rm : X86::MOV64mr;
472unsigned X86InstructionSelector::getLoadStoreOp(
const LLT &Ty,
475 Align Alignment)
const {
476 bool Isload = (
Opc == TargetOpcode::G_LOAD);
477 bool HasAVX = STI.hasAVX();
478 bool HasAVX512 = STI.hasAVX512();
479 bool HasVLX = STI.hasVLX();
482 if (X86::GPRRegBankID == RB.
getID())
483 return Isload ? X86::MOV8rm : X86::MOV8mr;
485 if (X86::GPRRegBankID == RB.
getID())
486 return Isload ? X86::MOV16rm : X86::MOV16mr;
488 if (X86::GPRRegBankID == RB.
getID())
489 return Isload ? X86::MOV32rm : X86::MOV32mr;
490 if (X86::VECRRegBankID == RB.
getID())
491 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :
492 HasAVX ? X86::VMOVSSrm_alt :
494 : (HasAVX512 ? X86::VMOVSSZmr :
495 HasAVX ? X86::VMOVSSmr :
497 if (X86::PSRRegBankID == RB.
getID())
498 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;
500 if (X86::GPRRegBankID == RB.
getID())
501 return Isload ? X86::MOV64rm : X86::MOV64mr;
502 if (X86::VECRRegBankID == RB.
getID())
503 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :
504 HasAVX ? X86::VMOVSDrm_alt :
506 : (HasAVX512 ? X86::VMOVSDZmr :
507 HasAVX ? X86::VMOVSDmr :
509 if (X86::PSRRegBankID == RB.
getID())
510 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;
512 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;
514 if (Alignment >=
Align(16))
515 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
517 ? X86::VMOVAPSZ128rm_NOVLX
518 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
519 : (HasVLX ? X86::VMOVAPSZ128mr
521 ? X86::VMOVAPSZ128mr_NOVLX
522 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
524 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
526 ? X86::VMOVUPSZ128rm_NOVLX
527 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
528 : (HasVLX ? X86::VMOVUPSZ128mr
530 ? X86::VMOVUPSZ128mr_NOVLX
531 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
533 if (Alignment >=
Align(32))
534 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
535 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
537 : (HasVLX ? X86::VMOVAPSZ256mr
538 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
541 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
542 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
544 : (HasVLX ? X86::VMOVUPSZ256mr
545 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
548 if (Alignment >=
Align(64))
549 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
551 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
560 assert(
I.getOperand(0).isReg() &&
"unsupported operand.");
561 assert(
MRI.getType(
I.getOperand(0).getReg()).isPointer() &&
562 "unsupported type.");
564 switch (
I.getOpcode()) {
567 case TargetOpcode::G_FRAME_INDEX:
571 case TargetOpcode::G_PTR_ADD: {
574 if (isInt<32>(Imm)) {
575 AM.
Disp =
static_cast<int32_t
>(Imm);
576 AM.
Base.
Reg =
I.getOperand(1).getReg();
582 case TargetOpcode::G_GLOBAL_VALUE: {
583 auto GV =
I.getOperand(1).getGlobal();
584 if (GV->isThreadLocal()) {
604 "RIP-relative addresses can't have additional register operands");
609 case TargetOpcode::G_CONSTANT_POOL: {
617 else if (STI.is64Bit())
620 AM.
Disp =
I.getOperand(1).getIndex();
625 AM.
Base.
Reg =
I.getOperand(0).getReg();
629bool X86InstructionSelector::selectLoadStoreOp(
MachineInstr &
I,
632 unsigned Opc =
I.getOpcode();
634 assert((
Opc == TargetOpcode::G_STORE ||
Opc == TargetOpcode::G_LOAD) &&
635 "Only G_STORE and G_LOAD are expected for selection");
637 const Register DefReg =
I.getOperand(0).getReg();
638 LLT Ty =
MRI.getType(DefReg);
642 auto &
MemOp = **
I.memoperands_begin();
643 if (
MemOp.isAtomic()) {
649 if (!
MemOp.isUnordered()) {
659 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB,
Opc);
663 I.setDesc(
TII.get(NewOpc));
671 if (
Opc == TargetOpcode::G_LOAD) {
681 I.addImplicitDefUseOperands(MF);
694bool X86InstructionSelector::selectFrameIndexOrGep(
MachineInstr &
I,
697 unsigned Opc =
I.getOpcode();
699 assert((
Opc == TargetOpcode::G_FRAME_INDEX ||
Opc == TargetOpcode::G_PTR_ADD) &&
700 "unexpected instruction");
702 const Register DefReg =
I.getOperand(0).getReg();
703 LLT Ty =
MRI.getType(DefReg);
706 unsigned NewOpc =
getLeaOP(Ty, STI);
707 I.setDesc(
TII.get(NewOpc));
710 if (
Opc == TargetOpcode::G_FRAME_INDEX) {
716 MIB.addImm(0).addReg(0);
722bool X86InstructionSelector::selectGlobalValue(
MachineInstr &
I,
725 assert((
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
726 "unexpected instruction");
732 const Register DefReg =
I.getOperand(0).getReg();
733 LLT Ty =
MRI.getType(DefReg);
734 unsigned NewOpc =
getLeaOP(Ty, STI);
736 I.setDesc(
TII.get(NewOpc));
748 assert((
I.getOpcode() == TargetOpcode::G_CONSTANT) &&
749 "unexpected instruction");
751 const Register DefReg =
I.getOperand(0).getReg();
752 LLT Ty =
MRI.getType(DefReg);
754 if (RBI.getRegBank(DefReg,
MRI,
TRI)->getID() != X86::GPRRegBankID)
758 if (
I.getOperand(1).isCImm()) {
759 Val =
I.getOperand(1).getCImm()->getZExtValue();
760 I.getOperand(1).ChangeToImmediate(Val);
761 }
else if (
I.getOperand(1).isImm()) {
762 Val =
I.getOperand(1).getImm();
769 NewOpc = X86::MOV8ri;
772 NewOpc = X86::MOV16ri;
775 NewOpc = X86::MOV32ri;
780 NewOpc = X86::MOV64ri32;
782 NewOpc = X86::MOV64ri;
788 I.setDesc(
TII.get(NewOpc));
797 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
798 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
799 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
802bool X86InstructionSelector::selectTurnIntoCOPY(
807 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
808 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
813 I.setDesc(
TII.get(X86::COPY));
817bool X86InstructionSelector::selectTruncOrPtrToInt(
MachineInstr &
I,
820 assert((
I.getOpcode() == TargetOpcode::G_TRUNC ||
821 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
822 "unexpected instruction");
824 const Register DstReg =
I.getOperand(0).getReg();
825 const Register SrcReg =
I.getOperand(1).getReg();
827 const LLT DstTy =
MRI.getType(DstReg);
828 const LLT SrcTy =
MRI.getType(SrcReg);
835 <<
" input/output on different banks\n");
842 if (!DstRC || !SrcRC)
849 return selectTurnIntoCOPY(
I,
MRI, DstReg, DstRC, SrcReg, SrcRC);
851 if (DstRB.
getID() != X86::GPRRegBankID)
855 if (DstRC == SrcRC) {
857 SubIdx = X86::NoSubRegister;
858 }
else if (DstRC == &X86::GR32RegClass) {
859 SubIdx = X86::sub_32bit;
860 }
else if (DstRC == &X86::GR16RegClass) {
861 SubIdx = X86::sub_16bit;
862 }
else if (DstRC == &X86::GR8RegClass) {
863 SubIdx = X86::sub_8bit;
868 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
870 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
871 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
877 I.getOperand(1).setSubReg(SubIdx);
879 I.setDesc(
TII.get(X86::COPY));
886 assert((
I.getOpcode() == TargetOpcode::G_ZEXT) &&
"unexpected instruction");
888 const Register DstReg =
I.getOperand(0).getReg();
889 const Register SrcReg =
I.getOperand(1).getReg();
891 const LLT DstTy =
MRI.getType(DstReg);
892 const LLT SrcTy =
MRI.getType(SrcReg);
895 "8=>16 Zext is handled by tablegen");
897 "8=>32 Zext is handled by tablegen");
899 "16=>32 Zext is handled by tablegen");
901 "8=>64 Zext is handled by tablegen");
903 "16=>64 Zext is handled by tablegen");
905 "32=>64 Zext is handled by tablegen");
912 AndOpc = X86::AND8ri;
914 AndOpc = X86::AND16ri;
916 AndOpc = X86::AND32ri;
918 AndOpc = X86::AND64ri32;
927 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);
931 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)
938 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(AndOpc), DstReg)
951 assert((
I.getOpcode() == TargetOpcode::G_ANYEXT) &&
"unexpected instruction");
953 const Register DstReg =
I.getOperand(0).getReg();
954 const Register SrcReg =
I.getOperand(1).getReg();
956 const LLT DstTy =
MRI.getType(DstReg);
957 const LLT SrcTy =
MRI.getType(SrcReg);
963 "G_ANYEXT input/output on different banks\n");
966 "G_ANYEXT incorrect operand size");
975 return selectTurnIntoCOPY(
I,
MRI, SrcReg, SrcRC, DstReg, DstRC);
977 if (DstRB.
getID() != X86::GPRRegBankID)
980 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
981 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
987 if (SrcRC == DstRC) {
988 I.setDesc(
TII.get(X86::COPY));
993 TII.get(TargetOpcode::SUBREG_TO_REG))
997 .
addImm(getSubRegIndex(SrcRC));
1006 assert((
I.getOpcode() == TargetOpcode::G_ICMP) &&
"unexpected instruction");
1020 LLT Ty =
MRI.getType(LHS);
1026 OpCmp = X86::CMP8rr;
1029 OpCmp = X86::CMP16rr;
1032 OpCmp = X86::CMP32rr;
1035 OpCmp = X86::CMP64rr;
1040 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1045 TII.get(X86::SETCCr),
I.getOperand(0).getReg()).
addImm(CC);
1050 I.eraseFromParent();
1057 assert((
I.getOpcode() == TargetOpcode::G_FCMP) &&
"unexpected instruction");
1059 Register LhsReg =
I.getOperand(2).getReg();
1060 Register RhsReg =
I.getOperand(3).getReg();
1065 static const uint16_t SETFOpcTable[2][3] = {
1073 SETFOpc = &SETFOpcTable[0][0];
1076 SETFOpc = &SETFOpcTable[1][0];
1081 "Both arguments of FCMP need to be virtual!");
1082 auto *LhsBank = RBI.getRegBank(LhsReg,
MRI,
TRI);
1083 [[maybe_unused]]
auto *RhsBank = RBI.getRegBank(RhsReg,
MRI,
TRI);
1084 assert((LhsBank == RhsBank) &&
1085 "Both banks assigned to FCMP arguments need to be same!");
1089 LLT Ty =
MRI.getType(LhsReg);
1094 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32
1098 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64
1102 OpCmp = X86::UCOM_FpIr80;
1106 Register ResultReg =
I.getOperand(0).getReg();
1107 RBI.constrainGenericRegister(
1112 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1116 Register FlagReg1 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1117 Register FlagReg2 =
MRI.createVirtualRegister(&X86::GR8RegClass);
1119 TII.get(X86::SETCCr), FlagReg1).
addImm(SETFOpc[0]);
1121 TII.get(X86::SETCCr), FlagReg2).
addImm(SETFOpc[1]);
1123 TII.get(SETFOpc[2]), ResultReg)
1131 I.eraseFromParent();
1145 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpCmp))
1153 I.eraseFromParent();
1160 assert((
I.getOpcode() == TargetOpcode::G_UADDE ||
1161 I.getOpcode() == TargetOpcode::G_UADDO ||
1162 I.getOpcode() == TargetOpcode::G_USUBE ||
1163 I.getOpcode() == TargetOpcode::G_USUBO) &&
1164 "unexpected instruction");
1166 const Register DstReg =
I.getOperand(0).getReg();
1167 const Register CarryOutReg =
I.getOperand(1).getReg();
1168 const Register Op0Reg =
I.getOperand(2).getReg();
1169 const Register Op1Reg =
I.getOperand(3).getReg();
1170 bool IsSub =
I.getOpcode() == TargetOpcode::G_USUBE ||
1171 I.getOpcode() == TargetOpcode::G_USUBO;
1172 bool HasCarryIn =
I.getOpcode() == TargetOpcode::G_UADDE ||
1173 I.getOpcode() == TargetOpcode::G_USUBE;
1175 const LLT DstTy =
MRI.getType(DstReg);
1176 assert(DstTy.
isScalar() &&
"selectUAddSub only supported for scalar types");
1179 unsigned OpADC, OpADD, OpSBB, OpSUB;
1182 OpADC = X86::ADC8rr;
1183 OpADD = X86::ADD8rr;
1184 OpSBB = X86::SBB8rr;
1185 OpSUB = X86::SUB8rr;
1188 OpADC = X86::ADC16rr;
1189 OpADD = X86::ADD16rr;
1190 OpSBB = X86::SBB16rr;
1191 OpSUB = X86::SUB16rr;
1194 OpADC = X86::ADC32rr;
1195 OpADD = X86::ADD32rr;
1196 OpSBB = X86::SBB32rr;
1197 OpSUB = X86::SUB32rr;
1200 OpADC = X86::ADC64rr;
1201 OpADD = X86::ADD64rr;
1202 OpSBB = X86::SBB64rr;
1203 OpSUB = X86::SUB64rr;
1212 unsigned Opcode = IsSub ? OpSUB : OpADD;
1216 Register CarryInReg =
I.getOperand(4).getReg();
1218 while (
Def->getOpcode() == TargetOpcode::G_TRUNC) {
1219 CarryInReg =
Def->getOperand(1).getReg();
1220 Def =
MRI.getVRegDef(CarryInReg);
1224 if (
Def->getOpcode() == TargetOpcode::G_UADDE ||
1225 Def->getOpcode() == TargetOpcode::G_UADDO ||
1226 Def->getOpcode() == TargetOpcode::G_USUBE ||
1227 Def->getOpcode() == TargetOpcode::G_USUBO) {
1229 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY),
1233 if (!RBI.constrainGenericRegister(CarryInReg, *DstRC,
MRI))
1236 Opcode = IsSub ? OpSBB : OpADC;
1242 Opcode = IsSub ? OpSUB : OpADD;
1248 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1252 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), CarryOutReg)
1256 !RBI.constrainGenericRegister(CarryOutReg, *DstRC,
MRI))
1259 I.eraseFromParent();
1266 assert((
I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1267 "unexpected instruction");
1269 const Register DstReg =
I.getOperand(0).getReg();
1270 const Register SrcReg =
I.getOperand(1).getReg();
1271 int64_t
Index =
I.getOperand(2).getImm();
1273 const LLT DstTy =
MRI.getType(DstReg);
1274 const LLT SrcTy =
MRI.getType(SrcReg);
1285 if (!emitExtractSubreg(DstReg, SrcReg,
I,
MRI, MF))
1288 I.eraseFromParent();
1292 bool HasAVX = STI.hasAVX();
1293 bool HasAVX512 = STI.hasAVX512();
1294 bool HasVLX = STI.hasVLX();
1298 I.setDesc(
TII.get(X86::VEXTRACTF32X4Z256rri));
1300 I.setDesc(
TII.get(X86::VEXTRACTF128rri));
1305 I.setDesc(
TII.get(X86::VEXTRACTF32X4Zrri));
1307 I.setDesc(
TII.get(X86::VEXTRACTF64X4Zrri));
1315 I.getOperand(2).setImm(Index);
1320bool X86InstructionSelector::emitExtractSubreg(
Register DstReg,
Register SrcReg,
1324 const LLT DstTy =
MRI.getType(DstReg);
1325 const LLT SrcTy =
MRI.getType(SrcReg);
1326 unsigned SubIdx = X86::NoSubRegister;
1332 "Incorrect Src/Dst register size");
1335 SubIdx = X86::sub_xmm;
1337 SubIdx = X86::sub_ymm;
1344 SrcRC =
TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1346 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1347 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1352 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY), DstReg)
1353 .
addReg(SrcReg, 0, SubIdx);
1358bool X86InstructionSelector::emitInsertSubreg(
Register DstReg,
Register SrcReg,
1362 const LLT DstTy =
MRI.getType(DstReg);
1363 const LLT SrcTy =
MRI.getType(SrcReg);
1364 unsigned SubIdx = X86::NoSubRegister;
1371 "Incorrect Src/Dst register size");
1374 SubIdx = X86::sub_xmm;
1376 SubIdx = X86::sub_ymm;
1383 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC,
MRI) ||
1384 !RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1389 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::COPY))
1399 assert((
I.getOpcode() == TargetOpcode::G_INSERT) &&
"unexpected instruction");
1401 const Register DstReg =
I.getOperand(0).getReg();
1402 const Register SrcReg =
I.getOperand(1).getReg();
1403 const Register InsertReg =
I.getOperand(2).getReg();
1404 int64_t
Index =
I.getOperand(3).getImm();
1406 const LLT DstTy =
MRI.getType(DstReg);
1407 const LLT InsertRegTy =
MRI.getType(InsertReg);
1416 if (Index == 0 &&
MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1418 if (!emitInsertSubreg(DstReg, InsertReg,
I,
MRI, MF))
1421 I.eraseFromParent();
1425 bool HasAVX = STI.hasAVX();
1426 bool HasAVX512 = STI.hasAVX512();
1427 bool HasVLX = STI.hasVLX();
1431 I.setDesc(
TII.get(X86::VINSERTF32X4Z256rri));
1433 I.setDesc(
TII.get(X86::VINSERTF128rri));
1438 I.setDesc(
TII.get(X86::VINSERTF32X4Zrri));
1440 I.setDesc(
TII.get(X86::VINSERTF64X4Zrri));
1449 I.getOperand(3).setImm(Index);
1454bool X86InstructionSelector::selectUnmergeValues(
1456 assert((
I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1457 "unexpected instruction");
1460 unsigned NumDefs =
I.getNumOperands() - 1;
1461 Register SrcReg =
I.getOperand(NumDefs).getReg();
1462 unsigned DefSize =
MRI.getType(
I.getOperand(0).getReg()).getSizeInBits();
1464 for (
unsigned Idx = 0;
Idx < NumDefs; ++
Idx) {
1467 TII.get(TargetOpcode::G_EXTRACT),
I.getOperand(
Idx).getReg())
1471 if (!select(ExtrInst))
1475 I.eraseFromParent();
1479bool X86InstructionSelector::selectMergeValues(
1481 assert((
I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
1482 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
1483 "unexpected instruction");
1486 Register DstReg =
I.getOperand(0).getReg();
1487 Register SrcReg0 =
I.getOperand(1).getReg();
1489 const LLT DstTy =
MRI.getType(DstReg);
1490 const LLT SrcTy =
MRI.getType(SrcReg0);
1496 Register DefReg =
MRI.createGenericVirtualRegister(DstTy);
1497 MRI.setRegBank(DefReg, RegBank);
1498 if (!emitInsertSubreg(DefReg,
I.getOperand(1).getReg(),
I,
MRI, MF))
1501 for (
unsigned Idx = 2;
Idx <
I.getNumOperands(); ++
Idx) {
1502 Register Tmp =
MRI.createGenericVirtualRegister(DstTy);
1503 MRI.setRegBank(Tmp, RegBank);
1506 TII.get(TargetOpcode::G_INSERT), Tmp)
1513 if (!select(InsertInst))
1518 TII.get(TargetOpcode::COPY), DstReg)
1521 if (!select(CopyInst))
1524 I.eraseFromParent();
1528bool X86InstructionSelector::selectCondBranch(
MachineInstr &
I,
1531 assert((
I.getOpcode() == TargetOpcode::G_BRCOND) &&
"unexpected instruction");
1533 const Register CondReg =
I.getOperand(0).getReg();
1537 *
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::TEST8ri))
1540 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::JCC_1))
1545 I.eraseFromParent();
1552 assert((
I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1553 "unexpected instruction");
1560 const Register DstReg =
I.getOperand(0).getReg();
1561 const LLT DstTy =
MRI.getType(DstReg);
1564 const ConstantFP *CFP =
I.getOperand(1).getFPImm();
1567 const DebugLoc &DbgLoc =
I.getDebugLoc();
1570 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
1574 unsigned char OpFlag = STI.classifyLocalReference(
nullptr);
1580 Register AddrReg =
MRI.createVirtualRegister(&X86::GR64RegClass);
1581 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(X86::MOV64ri), AddrReg)
1598 unsigned PICBase = 0;
1607 BuildMI(*
I.getParent(),
I, DbgLoc,
TII.get(
Opc), DstReg), CPI, PICBase,
1613 I.eraseFromParent();
1617bool X86InstructionSelector::selectImplicitDefOrPHI(
1619 assert((
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1620 I.getOpcode() == TargetOpcode::G_PHI) &&
1621 "unexpected instruction");
1623 Register DstReg =
I.getOperand(0).getReg();
1625 if (!
MRI.getRegClassOrNull(DstReg)) {
1626 const LLT DstTy =
MRI.getType(DstReg);
1629 if (!RBI.constrainGenericRegister(DstReg, *RC,
MRI)) {
1636 if (
I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1637 I.setDesc(
TII.get(X86::IMPLICIT_DEF));
1639 I.setDesc(
TII.get(X86::PHI));
1644bool X86InstructionSelector::selectMulDivRem(
MachineInstr &
I,
1648 assert((
I.getOpcode() == TargetOpcode::G_MUL ||
1649 I.getOpcode() == TargetOpcode::G_SMULH ||
1650 I.getOpcode() == TargetOpcode::G_UMULH ||
1651 I.getOpcode() == TargetOpcode::G_SDIV ||
1652 I.getOpcode() == TargetOpcode::G_SREM ||
1653 I.getOpcode() == TargetOpcode::G_UDIV ||
1654 I.getOpcode() == TargetOpcode::G_UREM) &&
1655 "unexpected instruction");
1657 const Register DstReg =
I.getOperand(0).getReg();
1658 const Register Op1Reg =
I.getOperand(1).getReg();
1659 const Register Op2Reg =
I.getOperand(2).getReg();
1661 const LLT RegTy =
MRI.getType(DstReg);
1662 assert(RegTy ==
MRI.getType(Op1Reg) && RegTy ==
MRI.getType(Op2Reg) &&
1663 "Arguments and return value types must match");
1666 if (!RegRB || RegRB->
getID() != X86::GPRRegBankID)
1669 const static unsigned NumTypes = 4;
1670 const static unsigned NumOps = 7;
1671 const static bool S =
true;
1672 const static bool U =
false;
1673 const static unsigned Copy = TargetOpcode::COPY;
1683 const static struct MulDivRemEntry {
1685 unsigned SizeInBits;
1689 struct MulDivRemResult {
1690 unsigned OpMulDivRem;
1691 unsigned OpSignExtend;
1697 } ResultTable[NumOps];
1698 } OpTable[NumTypes] = {
1703 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},
1704 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},
1705 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL,
U},
1706 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1707 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},
1708 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},
1709 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH,
U},
1715 {X86::IDIV16r, X86::CWD,
Copy, X86::AX, S},
1716 {X86::IDIV16r, X86::CWD,
Copy, X86::DX, S},
1717 {X86::DIV16r, X86::MOV32r0,
Copy, X86::AX,
U},
1718 {X86::DIV16r, X86::MOV32r0,
Copy, X86::DX,
U},
1719 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::AX, S},
1720 {X86::IMUL16r, X86::MOV32r0,
Copy, X86::DX, S},
1721 {X86::MUL16r, X86::MOV32r0,
Copy, X86::DX,
U},
1727 {X86::IDIV32r, X86::CDQ,
Copy, X86::EAX, S},
1728 {X86::IDIV32r, X86::CDQ,
Copy, X86::EDX, S},
1729 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EAX,
U},
1730 {X86::DIV32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1731 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EAX, S},
1732 {X86::IMUL32r, X86::MOV32r0,
Copy, X86::EDX, S},
1733 {X86::MUL32r, X86::MOV32r0,
Copy, X86::EDX,
U},
1739 {X86::IDIV64r, X86::CQO,
Copy, X86::RAX, S},
1740 {X86::IDIV64r, X86::CQO,
Copy, X86::RDX, S},
1741 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RAX,
U},
1742 {X86::DIV64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1743 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RAX, S},
1744 {X86::IMUL64r, X86::MOV32r0,
Copy, X86::RDX, S},
1745 {X86::MUL64r, X86::MOV32r0,
Copy, X86::RDX,
U},
1749 auto OpEntryIt =
llvm::find_if(OpTable, [RegTy](
const MulDivRemEntry &El) {
1752 if (OpEntryIt == std::end(OpTable))
1756 switch (
I.getOpcode()) {
1759 case TargetOpcode::G_SDIV:
1762 case TargetOpcode::G_SREM:
1765 case TargetOpcode::G_UDIV:
1768 case TargetOpcode::G_UREM:
1771 case TargetOpcode::G_MUL:
1774 case TargetOpcode::G_SMULH:
1777 case TargetOpcode::G_UMULH:
1782 const MulDivRemEntry &
TypeEntry = *OpEntryIt;
1783 const MulDivRemEntry::MulDivRemResult &OpEntry =
1787 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC,
MRI) ||
1788 !RBI.constrainGenericRegister(Op2Reg, *RegRC,
MRI) ||
1789 !RBI.constrainGenericRegister(DstReg, *RegRC,
MRI)) {
1796 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpCopy),
1801 if (OpEntry.OpSignExtend) {
1802 if (OpEntry.IsOpSigned)
1804 TII.get(OpEntry.OpSignExtend));
1806 Register Zero32 =
MRI.createVirtualRegister(&X86::GR32RegClass);
1807 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::MOV32r0),
1816 .
addReg(Zero32, 0, X86::sub_16bit);
1823 TII.get(TargetOpcode::SUBREG_TO_REG),
TypeEntry.HighInReg)
1832 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(OpEntry.OpMulDivRem))
1843 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {
1844 Register SourceSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1845 Register ResultSuperReg =
MRI.createVirtualRegister(&X86::GR16RegClass);
1846 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Copy), SourceSuperReg)
1850 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(X86::SHR16ri),
1856 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1858 .
addReg(ResultSuperReg, 0, X86::sub_8bit);
1860 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1862 .
addReg(OpEntry.ResultReg);
1864 I.eraseFromParent();
1879 LLT Ty =
MRI.getType(DstReg);
1884 OpCmp = X86::CMOV_GR8;
1887 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
1890 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
1893 assert(STI.is64Bit() && STI.canUseCMOV());
1894 OpCmp = X86::CMOV64rr;
1903 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
1923 return std::nullopt;
1931 "Unknown type of address base");
1946 MIB.addImm(AM.
Disp);
1956 return new X86InstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
static StringRef getName(Value *V)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)
static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)
This file declares the targeting of the RegisterBankInfo class for X86.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
ConstantFP - Floating Point Values [float, double].
std::optional< SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > > ComplexRendererFns
Register getCondReg() const
Register getFalseReg() const
Register getTrueReg() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
An instruction for reading from memory.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
Type * getType() const
All values are typed, get the type of this value.
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
This class provides the information for the target register banks.
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
const X86InstrInfo * getInstrInfo() const override
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
bool isPICStyleRIPRel() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Reg
All possible values of the reg field in the ModR/M byte.
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
static bool isGlobalStubReference(unsigned char TargetFlag)
isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...
static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)
isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)
addConstantPoolReference - This function is used to add a reference to the base of a constant value s...
static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)
addDirectMem - This function is used to add a direct memory reference to the current instruction – th...
InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@679 BaseType
union llvm::X86AddressMode::BaseUnion Base