32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
210 template <
bool Signed>
213 template <
bool Signed>
231 bool IsSigned,
unsigned Opcode)
const;
233 bool IsSigned)
const;
239 bool IsSigned)
const;
278 GL::GLSLExtInst GLInst)
const;
308 std::pair<Register, bool>
310 const SPIRVType *ResType =
nullptr)
const;
322 SPIRV::StorageClass::StorageClass SC)
const;
329 SPIRV::StorageClass::StorageClass SC,
341 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
344 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
351bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
353 if (
TET->getTargetExtName() ==
"spirv.Image") {
356 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
357 return TET->getTypeParameter(0)->isIntegerTy();
361#define GET_GLOBALISEL_IMPL
362#include "SPIRVGenGlobalISel.inc"
363#undef GET_GLOBALISEL_IMPL
369 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
372#include
"SPIRVGenGlobalISel.inc"
375#include
"SPIRVGenGlobalISel.inc"
387 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
391void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
392 if (HasVRegsReset == &MF)
397 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
399 LLT RegType =
MRI.getType(
Reg);
407 for (
const auto &
MBB : MF) {
408 for (
const auto &
MI :
MBB) {
411 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
415 LLT DstType =
MRI.getType(DstReg);
417 LLT SrcType =
MRI.getType(SrcReg);
418 if (DstType != SrcType)
419 MRI.setType(DstReg,
MRI.getType(SrcReg));
421 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
422 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
423 if (DstRC != SrcRC && SrcRC)
424 MRI.setRegClass(DstReg, SrcRC);
440 case TargetOpcode::G_CONSTANT:
441 case TargetOpcode::G_FCONSTANT:
443 case TargetOpcode::G_INTRINSIC:
444 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
445 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
447 Intrinsic::spv_const_composite;
448 case TargetOpcode::G_BUILD_VECTOR:
449 case TargetOpcode::G_SPLAT_VECTOR: {
460 case SPIRV::OpConstantTrue:
461 case SPIRV::OpConstantFalse:
462 case SPIRV::OpConstantI:
463 case SPIRV::OpConstantF:
464 case SPIRV::OpConstantComposite:
465 case SPIRV::OpConstantCompositeContinuedINTEL:
466 case SPIRV::OpConstantSampler:
467 case SPIRV::OpConstantNull:
469 case SPIRV::OpConstantFunctionPointerINTEL:
485 for (
const auto &MO :
MI.all_defs()) {
487 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
490 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
491 MI.isLifetimeMarker())
495 if (
MI.mayStore() ||
MI.isCall() ||
496 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
497 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
502bool SPIRVInstructionSelector::select(MachineInstr &
I) {
503 resetVRegsType(*
I.getParent()->getParent());
505 assert(
I.getParent() &&
"Instruction should be in a basic block!");
506 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
511 if (Opcode == SPIRV::ASSIGN_TYPE) {
512 Register DstReg =
I.getOperand(0).getReg();
513 Register SrcReg =
I.getOperand(1).getReg();
514 auto *
Def =
MRI->getVRegDef(SrcReg);
516 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
517 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
519 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
520 Register SelectDstReg =
Def->getOperand(0).getReg();
524 Def->removeFromParent();
525 MRI->replaceRegWith(DstReg, SelectDstReg);
527 I.removeFromParent();
529 Res = selectImpl(
I, *CoverageInfo);
531 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
532 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
536 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
543 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
544 MRI->replaceRegWith(SrcReg, DstReg);
546 I.removeFromParent();
548 }
else if (
I.getNumDefs() == 1) {
555 if (DeadMIs.contains(&
I)) {
565 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
566 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
572 bool HasDefs =
I.getNumDefs() > 0;
575 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
576 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
577 if (spvSelect(ResVReg, ResType,
I)) {
579 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
582 I.removeFromParent();
590 case TargetOpcode::G_CONSTANT:
591 case TargetOpcode::G_FCONSTANT:
593 case TargetOpcode::G_SADDO:
594 case TargetOpcode::G_SSUBO:
601 MachineInstr &
I)
const {
602 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
603 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
604 if (DstRC != SrcRC && SrcRC)
605 MRI->setRegClass(DestReg, SrcRC);
606 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
607 TII.get(TargetOpcode::COPY))
613bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
615 MachineInstr &
I)
const {
616 const unsigned Opcode =
I.getOpcode();
618 return selectImpl(
I, *CoverageInfo);
620 case TargetOpcode::G_CONSTANT:
621 case TargetOpcode::G_FCONSTANT:
622 return selectConst(ResVReg, ResType,
I);
623 case TargetOpcode::G_GLOBAL_VALUE:
624 return selectGlobalValue(ResVReg,
I);
625 case TargetOpcode::G_IMPLICIT_DEF:
626 return selectOpUndef(ResVReg, ResType,
I);
627 case TargetOpcode::G_FREEZE:
628 return selectFreeze(ResVReg, ResType,
I);
630 case TargetOpcode::G_INTRINSIC:
631 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
632 case TargetOpcode::G_INTRINSIC_CONVERGENT:
633 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
634 return selectIntrinsic(ResVReg, ResType,
I);
635 case TargetOpcode::G_BITREVERSE:
636 return selectBitreverse(ResVReg, ResType,
I);
638 case TargetOpcode::G_BUILD_VECTOR:
639 return selectBuildVector(ResVReg, ResType,
I);
640 case TargetOpcode::G_SPLAT_VECTOR:
641 return selectSplatVector(ResVReg, ResType,
I);
643 case TargetOpcode::G_SHUFFLE_VECTOR: {
644 MachineBasicBlock &BB = *
I.getParent();
645 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
648 .
addUse(
I.getOperand(1).getReg())
649 .
addUse(
I.getOperand(2).getReg());
650 for (
auto V :
I.getOperand(3).getShuffleMask())
654 case TargetOpcode::G_MEMMOVE:
655 case TargetOpcode::G_MEMCPY:
656 case TargetOpcode::G_MEMSET:
657 return selectMemOperation(ResVReg,
I);
659 case TargetOpcode::G_ICMP:
660 return selectICmp(ResVReg, ResType,
I);
661 case TargetOpcode::G_FCMP:
662 return selectFCmp(ResVReg, ResType,
I);
664 case TargetOpcode::G_FRAME_INDEX:
665 return selectFrameIndex(ResVReg, ResType,
I);
667 case TargetOpcode::G_LOAD:
668 return selectLoad(ResVReg, ResType,
I);
669 case TargetOpcode::G_STORE:
670 return selectStore(
I);
672 case TargetOpcode::G_BR:
673 return selectBranch(
I);
674 case TargetOpcode::G_BRCOND:
675 return selectBranchCond(
I);
677 case TargetOpcode::G_PHI:
678 return selectPhi(ResVReg, ResType,
I);
680 case TargetOpcode::G_FPTOSI:
681 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
682 case TargetOpcode::G_FPTOUI:
683 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
685 case TargetOpcode::G_FPTOSI_SAT:
686 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
687 case TargetOpcode::G_FPTOUI_SAT:
688 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
690 case TargetOpcode::G_SITOFP:
691 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
692 case TargetOpcode::G_UITOFP:
693 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
695 case TargetOpcode::G_CTPOP:
696 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
697 case TargetOpcode::G_SMIN:
698 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
699 case TargetOpcode::G_UMIN:
700 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
702 case TargetOpcode::G_SMAX:
703 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
704 case TargetOpcode::G_UMAX:
705 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
707 case TargetOpcode::G_SCMP:
708 return selectSUCmp(ResVReg, ResType,
I,
true);
709 case TargetOpcode::G_UCMP:
710 return selectSUCmp(ResVReg, ResType,
I,
false);
712 case TargetOpcode::G_STRICT_FMA:
713 case TargetOpcode::G_FMA:
714 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
716 case TargetOpcode::G_STRICT_FLDEXP:
717 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
719 case TargetOpcode::G_FPOW:
720 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
721 case TargetOpcode::G_FPOWI:
722 return selectExtInst(ResVReg, ResType,
I, CL::pown);
724 case TargetOpcode::G_FEXP:
725 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
726 case TargetOpcode::G_FEXP2:
727 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
729 case TargetOpcode::G_FLOG:
730 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
731 case TargetOpcode::G_FLOG2:
732 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
733 case TargetOpcode::G_FLOG10:
734 return selectLog10(ResVReg, ResType,
I);
736 case TargetOpcode::G_FABS:
737 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
738 case TargetOpcode::G_ABS:
739 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
741 case TargetOpcode::G_FMINNUM:
742 case TargetOpcode::G_FMINIMUM:
743 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
744 case TargetOpcode::G_FMAXNUM:
745 case TargetOpcode::G_FMAXIMUM:
746 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
748 case TargetOpcode::G_FCOPYSIGN:
749 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
751 case TargetOpcode::G_FCEIL:
752 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
753 case TargetOpcode::G_FFLOOR:
754 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
756 case TargetOpcode::G_FCOS:
757 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
758 case TargetOpcode::G_FSIN:
759 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
760 case TargetOpcode::G_FTAN:
761 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
762 case TargetOpcode::G_FACOS:
763 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
764 case TargetOpcode::G_FASIN:
765 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
766 case TargetOpcode::G_FATAN:
767 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
768 case TargetOpcode::G_FATAN2:
769 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
770 case TargetOpcode::G_FCOSH:
771 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
772 case TargetOpcode::G_FSINH:
773 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
774 case TargetOpcode::G_FTANH:
775 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
777 case TargetOpcode::G_STRICT_FSQRT:
778 case TargetOpcode::G_FSQRT:
779 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
781 case TargetOpcode::G_CTTZ:
782 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
783 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
784 case TargetOpcode::G_CTLZ:
785 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
786 return selectExtInst(ResVReg, ResType,
I, CL::clz);
788 case TargetOpcode::G_INTRINSIC_ROUND:
789 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
790 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
791 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
792 case TargetOpcode::G_INTRINSIC_TRUNC:
793 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
794 case TargetOpcode::G_FRINT:
795 case TargetOpcode::G_FNEARBYINT:
796 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
798 case TargetOpcode::G_SMULH:
799 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
800 case TargetOpcode::G_UMULH:
801 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
803 case TargetOpcode::G_SADDSAT:
804 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
805 case TargetOpcode::G_UADDSAT:
806 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
807 case TargetOpcode::G_SSUBSAT:
808 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
809 case TargetOpcode::G_USUBSAT:
810 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
812 case TargetOpcode::G_UADDO:
813 return selectOverflowArith(ResVReg, ResType,
I,
814 ResType->
getOpcode() == SPIRV::OpTypeVector
815 ? SPIRV::OpIAddCarryV
816 : SPIRV::OpIAddCarryS);
817 case TargetOpcode::G_USUBO:
818 return selectOverflowArith(ResVReg, ResType,
I,
819 ResType->
getOpcode() == SPIRV::OpTypeVector
820 ? SPIRV::OpISubBorrowV
821 : SPIRV::OpISubBorrowS);
822 case TargetOpcode::G_UMULO:
823 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
824 case TargetOpcode::G_SMULO:
825 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
827 case TargetOpcode::G_SEXT:
828 return selectExt(ResVReg, ResType,
I,
true);
829 case TargetOpcode::G_ANYEXT:
830 case TargetOpcode::G_ZEXT:
831 return selectExt(ResVReg, ResType,
I,
false);
832 case TargetOpcode::G_TRUNC:
833 return selectTrunc(ResVReg, ResType,
I);
834 case TargetOpcode::G_FPTRUNC:
835 case TargetOpcode::G_FPEXT:
836 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
838 case TargetOpcode::G_PTRTOINT:
839 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
840 case TargetOpcode::G_INTTOPTR:
841 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
842 case TargetOpcode::G_BITCAST:
843 return selectBitcast(ResVReg, ResType,
I);
844 case TargetOpcode::G_ADDRSPACE_CAST:
845 return selectAddrSpaceCast(ResVReg, ResType,
I);
846 case TargetOpcode::G_PTR_ADD: {
848 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
852 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
853 (*II).getOpcode() == TargetOpcode::COPY ||
854 (*II).getOpcode() == SPIRV::OpVariable) &&
857 bool IsGVInit =
false;
859 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
860 UseEnd =
MRI->use_instr_end();
861 UseIt != UseEnd; UseIt = std::next(UseIt)) {
862 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
863 (*UseIt).getOpcode() == SPIRV::OpVariable) {
873 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
876 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
877 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
886 "incompatible result and operand types in a bitcast");
888 MachineInstrBuilder MIB =
889 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
896 ? SPIRV::OpInBoundsAccessChain
897 : SPIRV::OpInBoundsPtrAccessChain))
901 .
addUse(
I.getOperand(2).getReg())
904 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
908 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
910 .
addUse(
I.getOperand(2).getReg())
918 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
921 .
addImm(
static_cast<uint32_t
>(
922 SPIRV::Opcode::InBoundsPtrAccessChain))
925 .
addUse(
I.getOperand(2).getReg());
929 case TargetOpcode::G_ATOMICRMW_OR:
930 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
931 case TargetOpcode::G_ATOMICRMW_ADD:
932 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
933 case TargetOpcode::G_ATOMICRMW_AND:
934 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
935 case TargetOpcode::G_ATOMICRMW_MAX:
936 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
937 case TargetOpcode::G_ATOMICRMW_MIN:
938 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
939 case TargetOpcode::G_ATOMICRMW_SUB:
940 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
941 case TargetOpcode::G_ATOMICRMW_XOR:
942 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
943 case TargetOpcode::G_ATOMICRMW_UMAX:
944 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
945 case TargetOpcode::G_ATOMICRMW_UMIN:
946 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
947 case TargetOpcode::G_ATOMICRMW_XCHG:
948 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
949 case TargetOpcode::G_ATOMIC_CMPXCHG:
950 return selectAtomicCmpXchg(ResVReg, ResType,
I);
952 case TargetOpcode::G_ATOMICRMW_FADD:
953 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
954 case TargetOpcode::G_ATOMICRMW_FSUB:
956 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
958 case TargetOpcode::G_ATOMICRMW_FMIN:
959 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
960 case TargetOpcode::G_ATOMICRMW_FMAX:
961 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
963 case TargetOpcode::G_FENCE:
964 return selectFence(
I);
966 case TargetOpcode::G_STACKSAVE:
967 return selectStackSave(ResVReg, ResType,
I);
968 case TargetOpcode::G_STACKRESTORE:
969 return selectStackRestore(
I);
971 case TargetOpcode::G_UNMERGE_VALUES:
977 case TargetOpcode::G_TRAP:
978 case TargetOpcode::G_DEBUGTRAP:
979 case TargetOpcode::G_UBSANTRAP:
980 case TargetOpcode::DBG_LABEL:
988bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
991 GL::GLSLExtInst GLInst)
const {
993 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
995 raw_string_ostream OS(DiagMsg);
996 I.print(OS,
true,
false,
false,
false);
997 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1000 return selectExtInst(ResVReg, ResType,
I,
1001 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1004bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1007 CL::OpenCLExtInst CLInst)
const {
1008 return selectExtInst(ResVReg, ResType,
I,
1009 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1012bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1015 CL::OpenCLExtInst CLInst,
1016 GL::GLSLExtInst GLInst)
const {
1017 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1018 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1019 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1022bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1027 for (
const auto &Ex : Insts) {
1028 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1029 uint32_t Opcode = Ex.second;
1032 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1035 .
addImm(
static_cast<uint32_t
>(Set))
1037 const unsigned NumOps =
I.getNumOperands();
1040 I.getOperand(Index).getType() ==
1041 MachineOperand::MachineOperandType::MO_IntrinsicID)
1044 MIB.
add(
I.getOperand(Index));
1051bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1054 std::vector<Register> Srcs,
1055 unsigned Opcode)
const {
1056 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1065bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1068 unsigned Opcode)
const {
1070 Register SrcReg =
I.getOperand(1).getReg();
1073 MRI->def_instr_begin(SrcReg);
1074 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1075 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1076 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1082 uint32_t SpecOpcode = 0;
1084 case SPIRV::OpConvertPtrToU:
1085 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1087 case SPIRV::OpConvertUToPtr:
1088 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1092 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1093 TII.get(SPIRV::OpSpecConstantOp))
1101 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1105bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1107 MachineInstr &
I)
const {
1108 Register OpReg =
I.getOperand(1).getReg();
1112 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1120 if (
MemOp->isVolatile())
1121 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1122 if (
MemOp->isNonTemporal())
1123 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1124 if (
MemOp->getAlign().value())
1125 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1131 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1132 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1136 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1138 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1142 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1146 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1148 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1160 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1162 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1164 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1168bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1170 MachineInstr &
I)
const {
1177 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1178 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1180 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1182 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1184 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1188 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1189 return generateImageRead(ResVReg, ResType, NewHandleReg, IdxReg,
1190 I.getDebugLoc(),
I);
1194 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1198 if (!
I.getNumMemOperands()) {
1199 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1201 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1204 MachineIRBuilder MIRBuilder(
I);
1210bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1212 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1218 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1219 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1221 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1224 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1228 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1229 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1230 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1231 TII.get(SPIRV::OpImageWrite))
1237 if (sampledTypeIsSignedInteger(LLVMHandleType))
1240 return BMI.constrainAllUses(
TII,
TRI, RBI);
1245 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1248 if (!
I.getNumMemOperands()) {
1249 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1251 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1254 MachineIRBuilder MIRBuilder(
I);
1260bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1262 MachineInstr &
I)
const {
1263 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1265 "llvm.stacksave intrinsic: this instruction requires the following "
1266 "SPIR-V extension: SPV_INTEL_variable_length_array",
1269 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1275bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1276 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1278 "llvm.stackrestore intrinsic: this instruction requires the following "
1279 "SPIR-V extension: SPV_INTEL_variable_length_array",
1281 if (!
I.getOperand(0).isReg())
1284 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1285 .
addUse(
I.getOperand(0).getReg())
1289bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1290 MachineInstr &
I)
const {
1292 Register SrcReg =
I.getOperand(1).getReg();
1294 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1295 MachineIRBuilder MIRBuilder(
I);
1296 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1299 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1300 Type *ArrTy = ArrayType::get(ValTy, Num);
1302 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1305 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1312 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1317 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1320 .
addImm(SPIRV::StorageClass::UniformConstant)
1329 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1331 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1333 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1334 .
addUse(
I.getOperand(0).getReg())
1336 .
addUse(
I.getOperand(2).getReg());
1337 if (
I.getNumMemOperands()) {
1338 MachineIRBuilder MIRBuilder(
I);
1347bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1351 unsigned NegateOpcode)
const {
1354 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1357 auto ScopeConstant = buildI32Constant(Scope,
I);
1358 Register ScopeReg = ScopeConstant.first;
1359 Result &= ScopeConstant.second;
1367 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1368 Register MemSemReg = MemSemConstant.first;
1369 Result &= MemSemConstant.second;
1371 Register ValueReg =
I.getOperand(2).getReg();
1372 if (NegateOpcode != 0) {
1375 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1380 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1390bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1391 unsigned ArgI =
I.getNumOperands() - 1;
1393 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1396 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1398 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1404 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1405 Register ResVReg =
I.getOperand(i).getReg();
1409 ResType = ScalarType;
1415 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1419 .
addImm(
static_cast<int64_t
>(i));
1425bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1428 auto MemSemConstant = buildI32Constant(MemSem,
I);
1429 Register MemSemReg = MemSemConstant.first;
1430 bool Result = MemSemConstant.second;
1432 uint32_t
Scope =
static_cast<uint32_t
>(
1434 auto ScopeConstant = buildI32Constant(Scope,
I);
1435 Register ScopeReg = ScopeConstant.first;
1436 Result &= ScopeConstant.second;
1439 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1445bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1448 unsigned Opcode)
const {
1449 Type *ResTy =
nullptr;
1453 "Not enough info to select the arithmetic with overflow instruction");
1456 "with overflow instruction");
1462 MachineIRBuilder MIRBuilder(
I);
1464 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1465 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1471 Register ZeroReg = buildZerosVal(ResType,
I);
1474 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1476 if (ResName.
size() > 0)
1481 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1484 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1485 MIB.
addUse(
I.getOperand(i).getReg());
1490 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1491 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1493 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1494 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1502 .
addDef(
I.getOperand(1).getReg())
1509bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1511 MachineInstr &
I)
const {
1519 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1522 auto ScopeConstant = buildI32Constant(Scope,
I);
1523 ScopeReg = ScopeConstant.first;
1524 Result &= ScopeConstant.second;
1526 unsigned ScSem =
static_cast<uint32_t
>(
1529 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1530 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1531 MemSemEqReg = MemSemEqConstant.first;
1532 Result &= MemSemEqConstant.second;
1534 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1535 if (MemSemEq == MemSemNeq)
1536 MemSemNeqReg = MemSemEqReg;
1538 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1539 MemSemNeqReg = MemSemNeqConstant.first;
1540 Result &= MemSemNeqConstant.second;
1543 ScopeReg =
I.getOperand(5).getReg();
1544 MemSemEqReg =
I.getOperand(6).getReg();
1545 MemSemNeqReg =
I.getOperand(7).getReg();
1549 Register Val =
I.getOperand(4).getReg();
1554 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1581 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1592 case SPIRV::StorageClass::DeviceOnlyINTEL:
1593 case SPIRV::StorageClass::HostOnlyINTEL:
1602 bool IsGRef =
false;
1603 bool IsAllowedRefs =
1604 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1605 unsigned Opcode = It.getOpcode();
1606 if (Opcode == SPIRV::OpConstantComposite ||
1607 Opcode == SPIRV::OpVariable ||
1608 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1609 return IsGRef = true;
1610 return Opcode == SPIRV::OpName;
1612 return IsAllowedRefs && IsGRef;
1615Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1616 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1618 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1622SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1624 uint32_t Opcode)
const {
1625 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1626 TII.get(SPIRV::OpSpecConstantOp))
1634SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1638 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1640 SPIRV::StorageClass::Generic),
1642 MachineFunction *MF =
I.getParent()->getParent();
1644 MachineInstrBuilder MIB = buildSpecConstantOp(
1646 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1656bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1658 MachineInstr &
I)
const {
1662 Register SrcPtr =
I.getOperand(1).getReg();
1666 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1667 ResType->
getOpcode() != SPIRV::OpTypePointer)
1668 return BuildCOPY(ResVReg, SrcPtr,
I);
1678 unsigned SpecOpcode =
1680 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1683 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1690 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1691 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1692 .constrainAllUses(
TII,
TRI, RBI);
1694 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1696 buildSpecConstantOp(
1698 getUcharPtrTypeReg(
I, DstSC),
1699 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1700 .constrainAllUses(
TII,
TRI, RBI);
1706 return BuildCOPY(ResVReg, SrcPtr,
I);
1708 if ((SrcSC == SPIRV::StorageClass::Function &&
1709 DstSC == SPIRV::StorageClass::Private) ||
1710 (DstSC == SPIRV::StorageClass::Function &&
1711 SrcSC == SPIRV::StorageClass::Private))
1712 return BuildCOPY(ResVReg, SrcPtr,
I);
1716 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1719 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1740 return selectUnOp(ResVReg, ResType,
I,
1741 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1743 return selectUnOp(ResVReg, ResType,
I,
1744 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1746 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1748 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1758 return SPIRV::OpFOrdEqual;
1760 return SPIRV::OpFOrdGreaterThanEqual;
1762 return SPIRV::OpFOrdGreaterThan;
1764 return SPIRV::OpFOrdLessThanEqual;
1766 return SPIRV::OpFOrdLessThan;
1768 return SPIRV::OpFOrdNotEqual;
1770 return SPIRV::OpOrdered;
1772 return SPIRV::OpFUnordEqual;
1774 return SPIRV::OpFUnordGreaterThanEqual;
1776 return SPIRV::OpFUnordGreaterThan;
1778 return SPIRV::OpFUnordLessThanEqual;
1780 return SPIRV::OpFUnordLessThan;
1782 return SPIRV::OpFUnordNotEqual;
1784 return SPIRV::OpUnordered;
1794 return SPIRV::OpIEqual;
1796 return SPIRV::OpINotEqual;
1798 return SPIRV::OpSGreaterThanEqual;
1800 return SPIRV::OpSGreaterThan;
1802 return SPIRV::OpSLessThanEqual;
1804 return SPIRV::OpSLessThan;
1806 return SPIRV::OpUGreaterThanEqual;
1808 return SPIRV::OpUGreaterThan;
1810 return SPIRV::OpULessThanEqual;
1812 return SPIRV::OpULessThan;
1821 return SPIRV::OpPtrEqual;
1823 return SPIRV::OpPtrNotEqual;
1834 return SPIRV::OpLogicalEqual;
1836 return SPIRV::OpLogicalNotEqual;
1870bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1873 unsigned OpAnyOrAll)
const {
1874 assert(
I.getNumOperands() == 3);
1875 assert(
I.getOperand(2).isReg());
1877 Register InputRegister =
I.getOperand(2).getReg();
1884 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1885 if (IsBoolTy && !IsVectorTy) {
1886 assert(ResVReg ==
I.getOperand(0).getReg());
1887 return BuildCOPY(ResVReg, InputRegister,
I);
1891 unsigned SpirvNotEqualId =
1892 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1899 IsBoolTy ? InputRegister
1908 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1928bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1930 MachineInstr &
I)
const {
1931 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1934bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1936 MachineInstr &
I)
const {
1937 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1941bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1943 MachineInstr &
I)
const {
1944 assert(
I.getNumOperands() == 4);
1945 assert(
I.getOperand(2).isReg());
1946 assert(
I.getOperand(3).isReg());
1953 "dot product requires a vector of at least 2 components");
1961 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1964 .
addUse(
I.getOperand(2).getReg())
1965 .
addUse(
I.getOperand(3).getReg())
1969bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1973 assert(
I.getNumOperands() == 4);
1974 assert(
I.getOperand(2).isReg());
1975 assert(
I.getOperand(3).isReg());
1978 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1982 .
addUse(
I.getOperand(2).getReg())
1983 .
addUse(
I.getOperand(3).getReg())
1989bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1991 assert(
I.getNumOperands() == 4);
1992 assert(
I.getOperand(2).isReg());
1993 assert(
I.getOperand(3).isReg());
1997 Register Vec0 =
I.getOperand(2).getReg();
1998 Register Vec1 =
I.getOperand(3).getReg();
2011 "dot product requires a vector of at least 2 components");
2025 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2048bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2050 MachineInstr &
I)
const {
2052 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2055 .
addUse(
I.getOperand(2).getReg())
2059template <
bool Signed>
2060bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2062 MachineInstr &
I)
const {
2063 assert(
I.getNumOperands() == 5);
2064 assert(
I.getOperand(2).isReg());
2065 assert(
I.getOperand(3).isReg());
2066 assert(
I.getOperand(4).isReg());
2069 Register Acc =
I.getOperand(2).getReg();
2073 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2093template <
bool Signed>
2094bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2096 assert(
I.getNumOperands() == 5);
2097 assert(
I.getOperand(2).isReg());
2098 assert(
I.getOperand(3).isReg());
2099 assert(
I.getOperand(4).isReg());
2104 Register Acc =
I.getOperand(2).getReg();
2110 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2114 for (
unsigned i = 0; i < 4; i++) {
2116 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2127 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2147 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2159 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2175bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2177 MachineInstr &
I)
const {
2178 assert(
I.getNumOperands() == 3);
2179 assert(
I.getOperand(2).isReg());
2181 Register VZero = buildZerosValF(ResType,
I);
2182 Register VOne = buildOnesValF(ResType,
I);
2184 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2187 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2189 .
addUse(
I.getOperand(2).getReg())
2195bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2197 MachineInstr &
I)
const {
2198 assert(
I.getNumOperands() == 3);
2199 assert(
I.getOperand(2).isReg());
2201 Register InputRegister =
I.getOperand(2).getReg();
2203 auto &
DL =
I.getDebugLoc();
2213 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2215 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2217 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2224 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2229 if (NeedsConversion) {
2230 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2241bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2244 unsigned Opcode)
const {
2248 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2254 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2255 BMI.addUse(
I.getOperand(J).getReg());
2261bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2267 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2268 SPIRV::OpGroupNonUniformBallot);
2272 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2277 .
addImm(SPIRV::GroupOperation::Reduce)
2284bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2287 bool IsUnsigned)
const {
2288 assert(
I.getNumOperands() == 3);
2289 assert(
I.getOperand(2).isReg());
2291 Register InputRegister =
I.getOperand(2).getReg();
2300 auto IntegerOpcodeType =
2301 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2302 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2303 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2308 .
addImm(SPIRV::GroupOperation::Reduce)
2309 .
addUse(
I.getOperand(2).getReg())
2313bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2315 MachineInstr &
I)
const {
2316 assert(
I.getNumOperands() == 3);
2317 assert(
I.getOperand(2).isReg());
2319 Register InputRegister =
I.getOperand(2).getReg();
2329 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2330 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2335 .
addImm(SPIRV::GroupOperation::Reduce)
2336 .
addUse(
I.getOperand(2).getReg());
2339bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2341 MachineInstr &
I)
const {
2343 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2346 .
addUse(
I.getOperand(1).getReg())
2350bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2352 MachineInstr &
I)
const {
2358 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2360 Register OpReg =
I.getOperand(1).getReg();
2361 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2362 if (
Def->getOpcode() == TargetOpcode::COPY)
2363 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2365 switch (
Def->getOpcode()) {
2366 case SPIRV::ASSIGN_TYPE:
2367 if (MachineInstr *AssignToDef =
2368 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2369 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2370 Reg =
Def->getOperand(2).getReg();
2373 case SPIRV::OpUndef:
2374 Reg =
Def->getOperand(1).getReg();
2377 unsigned DestOpCode;
2379 DestOpCode = SPIRV::OpConstantNull;
2381 DestOpCode = TargetOpcode::COPY;
2384 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2385 .
addDef(
I.getOperand(0).getReg())
2392bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2394 MachineInstr &
I)
const {
2396 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2398 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2402 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2407 for (
unsigned i =
I.getNumExplicitDefs();
2408 i <
I.getNumExplicitOperands() && IsConst; ++i)
2412 if (!IsConst &&
N < 2)
2414 "There must be at least two constituent operands in a vector");
2417 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2418 TII.get(IsConst ? SPIRV::OpConstantComposite
2419 : SPIRV::OpCompositeConstruct))
2422 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2423 MIB.
addUse(
I.getOperand(i).getReg());
2427bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2429 MachineInstr &
I)
const {
2431 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2433 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2439 if (!
I.getOperand(
OpIdx).isReg())
2446 if (!IsConst &&
N < 2)
2448 "There must be at least two constituent operands in a vector");
2451 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2452 TII.get(IsConst ? SPIRV::OpConstantComposite
2453 : SPIRV::OpCompositeConstruct))
2456 for (
unsigned i = 0; i <
N; ++i)
2461bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2463 MachineInstr &
I)
const {
2468 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2470 Opcode = SPIRV::OpDemoteToHelperInvocation;
2472 Opcode = SPIRV::OpKill;
2474 if (MachineInstr *NextI =
I.getNextNode()) {
2476 NextI->removeFromParent();
2481 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2485bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2488 MachineInstr &
I)
const {
2489 Register Cmp0 =
I.getOperand(2).getReg();
2490 Register Cmp1 =
I.getOperand(3).getReg();
2493 "CMP operands should have the same type");
2494 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2502bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2504 MachineInstr &
I)
const {
2505 auto Pred =
I.getOperand(1).getPredicate();
2508 Register CmpOperand =
I.getOperand(2).getReg();
2515 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2518std::pair<Register, bool>
2519SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2525 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2533 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2536 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2539 .
addImm(APInt(32, Val).getZExtValue());
2541 GR.
add(ConstInt,
MI);
2546bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2548 MachineInstr &
I)
const {
2550 return selectCmp(ResVReg, ResType, CmpOp,
I);
2554 MachineInstr &
I)
const {
2557 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2563 MachineInstr &
I)
const {
2567 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2573 MachineInstr &
I)
const {
2577 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2584 MachineInstr &
I)
const {
2588 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2593bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2595 MachineInstr &
I)
const {
2596 Register SelectFirstArg =
I.getOperand(2).getReg();
2597 Register SelectSecondArg =
I.getOperand(3).getReg();
2606 SPIRV::OpTypeVector;
2613 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2614 }
else if (IsPtrTy) {
2615 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2617 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2621 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2622 }
else if (IsPtrTy) {
2623 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2625 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2628 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2631 .
addUse(
I.getOperand(1).getReg())
2637bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2640 bool IsSigned)
const {
2642 Register ZeroReg = buildZerosVal(ResType,
I);
2643 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2647 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2648 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2651 .
addUse(
I.getOperand(1).getReg())
2657bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2659 MachineInstr &
I,
bool IsSigned,
2660 unsigned Opcode)
const {
2661 Register SrcReg =
I.getOperand(1).getReg();
2667 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2672 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2674 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2677bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2679 MachineInstr &
I,
bool IsSigned)
const {
2680 Register SrcReg =
I.getOperand(1).getReg();
2682 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2685 if (SrcType == ResType)
2686 return BuildCOPY(ResVReg, SrcReg,
I);
2688 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2689 return selectUnOp(ResVReg, ResType,
I, Opcode);
2692bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2695 bool IsSigned)
const {
2696 MachineIRBuilder MIRBuilder(
I);
2697 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2712 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2713 : SPIRV::OpULessThanEqual))
2716 .
addUse(
I.getOperand(1).getReg())
2717 .
addUse(
I.getOperand(2).getReg())
2723 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2726 .
addUse(
I.getOperand(1).getReg())
2727 .
addUse(
I.getOperand(2).getReg())
2735 unsigned SelectOpcode =
2736 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2741 .
addUse(buildOnesVal(
true, ResType,
I))
2742 .
addUse(buildZerosVal(ResType,
I))
2749 .
addUse(buildOnesVal(
false, ResType,
I))
2753bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2760 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2761 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2763 Register One = buildOnesVal(
false, IntTy,
I);
2779bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2781 MachineInstr &
I)
const {
2782 Register IntReg =
I.getOperand(1).getReg();
2785 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2786 if (ArgType == ResType)
2787 return BuildCOPY(ResVReg, IntReg,
I);
2789 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2790 return selectUnOp(ResVReg, ResType,
I, Opcode);
2793bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2795 MachineInstr &
I)
const {
2796 unsigned Opcode =
I.getOpcode();
2797 unsigned TpOpcode = ResType->
getOpcode();
2799 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2800 assert(Opcode == TargetOpcode::G_CONSTANT &&
2801 I.getOperand(1).getCImm()->isZero());
2802 MachineBasicBlock &DepMBB =
I.getMF()->front();
2805 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2812 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
2815bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2817 MachineInstr &
I)
const {
2818 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2824bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2826 MachineInstr &
I)
const {
2828 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2832 .
addUse(
I.getOperand(3).getReg())
2834 .
addUse(
I.getOperand(2).getReg());
2835 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2840bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2842 MachineInstr &
I)
const {
2844 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2847 .
addUse(
I.getOperand(2).getReg());
2848 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2853bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2855 MachineInstr &
I)
const {
2857 return selectInsertVal(ResVReg, ResType,
I);
2859 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2862 .
addUse(
I.getOperand(2).getReg())
2863 .
addUse(
I.getOperand(3).getReg())
2864 .
addUse(
I.getOperand(4).getReg())
2868bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2870 MachineInstr &
I)
const {
2872 return selectExtractVal(ResVReg, ResType,
I);
2874 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2877 .
addUse(
I.getOperand(2).getReg())
2878 .
addUse(
I.getOperand(3).getReg())
2882bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2884 MachineInstr &
I)
const {
2885 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2891 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2892 : SPIRV::OpAccessChain)
2893 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
2894 :
SPIRV::OpPtrAccessChain);
2896 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2900 .
addUse(
I.getOperand(3).getReg());
2902 const unsigned StartingIndex =
2903 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2906 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2907 Res.addUse(
I.getOperand(i).getReg());
2908 return Res.constrainAllUses(
TII,
TRI, RBI);
2912bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2915 unsigned Lim =
I.getNumExplicitOperands();
2916 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2917 Register OpReg =
I.getOperand(i).getReg();
2918 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
2920 SmallPtrSet<SPIRVType *, 4> Visited;
2921 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2922 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2929 MachineFunction *MF =
I.getMF();
2941 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2942 TII.get(SPIRV::OpSpecConstantOp))
2945 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
2947 GR.
add(OpDefine, MIB);
2955bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2957 MachineInstr &
I)
const {
2961 case Intrinsic::spv_load:
2962 return selectLoad(ResVReg, ResType,
I);
2963 case Intrinsic::spv_store:
2964 return selectStore(
I);
2965 case Intrinsic::spv_extractv:
2966 return selectExtractVal(ResVReg, ResType,
I);
2967 case Intrinsic::spv_insertv:
2968 return selectInsertVal(ResVReg, ResType,
I);
2969 case Intrinsic::spv_extractelt:
2970 return selectExtractElt(ResVReg, ResType,
I);
2971 case Intrinsic::spv_insertelt:
2972 return selectInsertElt(ResVReg, ResType,
I);
2973 case Intrinsic::spv_gep:
2974 return selectGEP(ResVReg, ResType,
I);
2975 case Intrinsic::spv_unref_global:
2976 case Intrinsic::spv_init_global: {
2977 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
2978 MachineInstr *Init =
I.getNumExplicitOperands() > 2
2979 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2982 Register GVarVReg =
MI->getOperand(0).getReg();
2983 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
2987 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
2989 MI->removeFromParent();
2993 case Intrinsic::spv_undef: {
2994 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2999 case Intrinsic::spv_const_composite: {
3001 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3007 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3009 MachineIRBuilder MIR(
I);
3011 MIR, SPIRV::OpConstantComposite, 3,
3012 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3014 for (
auto *Instr : Instructions) {
3015 Instr->setDebugLoc(
I.getDebugLoc());
3021 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3027 case Intrinsic::spv_assign_name: {
3028 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3029 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3030 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3031 i <
I.getNumExplicitOperands(); ++i) {
3032 MIB.
addImm(
I.getOperand(i).getImm());
3036 case Intrinsic::spv_switch: {
3037 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3038 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3039 if (
I.getOperand(i).isReg())
3040 MIB.
addReg(
I.getOperand(i).getReg());
3041 else if (
I.getOperand(i).isCImm())
3042 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3043 else if (
I.getOperand(i).isMBB())
3044 MIB.
addMBB(
I.getOperand(i).getMBB());
3050 case Intrinsic::spv_loop_merge: {
3051 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3052 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3053 if (
I.getOperand(i).isMBB())
3054 MIB.
addMBB(
I.getOperand(i).getMBB());
3060 case Intrinsic::spv_selection_merge: {
3062 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3063 assert(
I.getOperand(1).isMBB() &&
3064 "operand 1 to spv_selection_merge must be a basic block");
3065 MIB.
addMBB(
I.getOperand(1).getMBB());
3066 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3069 case Intrinsic::spv_cmpxchg:
3070 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3071 case Intrinsic::spv_unreachable:
3072 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3074 case Intrinsic::spv_alloca:
3075 return selectFrameIndex(ResVReg, ResType,
I);
3076 case Intrinsic::spv_alloca_array:
3077 return selectAllocaArray(ResVReg, ResType,
I);
3078 case Intrinsic::spv_assume:
3080 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3081 .
addUse(
I.getOperand(1).getReg())
3084 case Intrinsic::spv_expect:
3086 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3089 .
addUse(
I.getOperand(2).getReg())
3090 .
addUse(
I.getOperand(3).getReg())
3093 case Intrinsic::arithmetic_fence:
3096 TII.get(SPIRV::OpArithmeticFenceEXT))
3099 .
addUse(
I.getOperand(2).getReg())
3102 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3104 case Intrinsic::spv_thread_id:
3110 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3112 case Intrinsic::spv_thread_id_in_group:
3118 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3120 case Intrinsic::spv_group_id:
3126 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3128 case Intrinsic::spv_flattened_thread_id_in_group:
3135 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3137 case Intrinsic::spv_workgroup_size:
3138 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3140 case Intrinsic::spv_global_size:
3141 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3143 case Intrinsic::spv_global_offset:
3144 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3146 case Intrinsic::spv_num_workgroups:
3147 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3149 case Intrinsic::spv_subgroup_size:
3150 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3152 case Intrinsic::spv_num_subgroups:
3153 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3155 case Intrinsic::spv_subgroup_id:
3156 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3157 case Intrinsic::spv_subgroup_local_invocation_id:
3158 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3159 ResVReg, ResType,
I);
3160 case Intrinsic::spv_subgroup_max_size:
3161 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3163 case Intrinsic::spv_fdot:
3164 return selectFloatDot(ResVReg, ResType,
I);
3165 case Intrinsic::spv_udot:
3166 case Intrinsic::spv_sdot:
3167 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3169 return selectIntegerDot(ResVReg, ResType,
I,
3170 IID == Intrinsic::spv_sdot);
3171 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3172 case Intrinsic::spv_dot4add_i8packed:
3173 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3175 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3176 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3177 case Intrinsic::spv_dot4add_u8packed:
3178 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3180 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3181 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3182 case Intrinsic::spv_all:
3183 return selectAll(ResVReg, ResType,
I);
3184 case Intrinsic::spv_any:
3185 return selectAny(ResVReg, ResType,
I);
3186 case Intrinsic::spv_cross:
3187 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3188 case Intrinsic::spv_distance:
3189 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3190 case Intrinsic::spv_lerp:
3191 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3192 case Intrinsic::spv_length:
3193 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3194 case Intrinsic::spv_degrees:
3195 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3196 case Intrinsic::spv_faceforward:
3197 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3198 case Intrinsic::spv_frac:
3199 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3200 case Intrinsic::spv_isinf:
3201 return selectOpIsInf(ResVReg, ResType,
I);
3202 case Intrinsic::spv_normalize:
3203 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3204 case Intrinsic::spv_refract:
3205 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3206 case Intrinsic::spv_reflect:
3207 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3208 case Intrinsic::spv_rsqrt:
3209 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3210 case Intrinsic::spv_sign:
3211 return selectSign(ResVReg, ResType,
I);
3212 case Intrinsic::spv_smoothstep:
3213 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3214 case Intrinsic::spv_firstbituhigh:
3215 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3216 case Intrinsic::spv_firstbitshigh:
3217 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3218 case Intrinsic::spv_firstbitlow:
3219 return selectFirstBitLow(ResVReg, ResType,
I);
3220 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3222 auto MemSemConstant =
3223 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3224 Register MemSemReg = MemSemConstant.first;
3225 Result &= MemSemConstant.second;
3226 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3227 Register ScopeReg = ScopeConstant.first;
3228 Result &= ScopeConstant.second;
3231 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3237 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3238 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3239 SPIRV::StorageClass::StorageClass ResSC =
3243 "Generic storage class");
3245 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3252 case Intrinsic::spv_lifetime_start:
3253 case Intrinsic::spv_lifetime_end: {
3254 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3255 : SPIRV::OpLifetimeStop;
3256 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3257 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3265 case Intrinsic::spv_saturate:
3266 return selectSaturate(ResVReg, ResType,
I);
3267 case Intrinsic::spv_nclamp:
3268 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3269 case Intrinsic::spv_uclamp:
3270 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3271 case Intrinsic::spv_sclamp:
3272 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3273 case Intrinsic::spv_wave_active_countbits:
3274 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3275 case Intrinsic::spv_wave_all:
3276 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3277 case Intrinsic::spv_wave_any:
3278 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3279 case Intrinsic::spv_wave_is_first_lane:
3280 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3281 case Intrinsic::spv_wave_reduce_umax:
3282 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3283 case Intrinsic::spv_wave_reduce_max:
3284 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3285 case Intrinsic::spv_wave_reduce_sum:
3286 return selectWaveReduceSum(ResVReg, ResType,
I);
3287 case Intrinsic::spv_wave_readlane:
3288 return selectWaveOpInst(ResVReg, ResType,
I,
3289 SPIRV::OpGroupNonUniformShuffle);
3290 case Intrinsic::spv_step:
3291 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3292 case Intrinsic::spv_radians:
3293 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3297 case Intrinsic::instrprof_increment:
3298 case Intrinsic::instrprof_increment_step:
3299 case Intrinsic::instrprof_value_profile:
3302 case Intrinsic::spv_value_md:
3304 case Intrinsic::spv_resource_handlefrombinding: {
3305 return selectHandleFromBinding(ResVReg, ResType,
I);
3307 case Intrinsic::spv_resource_store_typedbuffer: {
3308 return selectImageWriteIntrinsic(
I);
3310 case Intrinsic::spv_resource_load_typedbuffer: {
3311 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3313 case Intrinsic::spv_resource_getpointer: {
3314 return selectResourceGetPointer(ResVReg, ResType,
I);
3316 case Intrinsic::spv_discard: {
3317 return selectDiscard(ResVReg, ResType,
I);
3319 case Intrinsic::modf: {
3320 return selectModf(ResVReg, ResType,
I);
3323 std::string DiagMsg;
3324 raw_string_ostream OS(DiagMsg);
3326 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3333bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3335 MachineInstr &
I)
const {
3338 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3345bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3354 Register ImageReg =
I.getOperand(2).getReg();
3356 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3362 Register IdxReg =
I.getOperand(3).getReg();
3364 MachineInstr &Pos =
I;
3366 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3369bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3373 MachineInstr &Pos)
const {
3376 "ImageReg is not an image type.");
3377 bool IsSignedInteger =
3381 if (ResultSize == 4) {
3388 if (IsSignedInteger)
3393 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3400 if (IsSignedInteger)
3406 if (ResultSize == 1) {
3408 TII.get(SPIRV::OpCompositeExtract))
3415 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3418bool SPIRVInstructionSelector::selectResourceGetPointer(
3420 Register ResourcePtr =
I.getOperand(2).getReg();
3422 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3431 MachineIRBuilder MIRBuilder(
I);
3433 Register IndexReg =
I.getOperand(3).getReg();
3436 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3437 TII.get(SPIRV::OpAccessChain))
3446bool SPIRVInstructionSelector::extractSubvector(
3448 MachineInstr &InsertionPoint)
const {
3450 [[maybe_unused]] uint64_t InputSize =
3453 assert(InputSize > 1 &&
"The input must be a vector.");
3454 assert(ResultSize > 1 &&
"The result must be a vector.");
3455 assert(ResultSize < InputSize &&
3456 "Cannot extract more element than there are in the input.");
3459 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3460 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3461 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3464 TII.get(SPIRV::OpCompositeExtract))
3475 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3477 TII.get(SPIRV::OpCompositeConstruct))
3481 for (
Register ComponentReg : ComponentRegisters)
3482 MIB.
addUse(ComponentReg);
3486bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3487 MachineInstr &
I)
const {
3494 Register ImageReg =
I.getOperand(1).getReg();
3496 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3502 Register CoordinateReg =
I.getOperand(2).getReg();
3503 Register DataReg =
I.getOperand(3).getReg();
3506 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3507 TII.get(SPIRV::OpImageWrite))
3514Register SPIRVInstructionSelector::buildPointerToResource(
3515 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3516 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3517 bool IsNonUniform, StringRef Name, MachineIRBuilder MIRBuilder)
const {
3519 if (ArraySize == 1) {
3523 "SpirvResType did not have an explicit layout.");
3528 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3532 VarPointerType, Set,
Binding, Name, MIRBuilder);
3541 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3542 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3554bool SPIRVInstructionSelector::selectFirstBitSet16(
3556 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3558 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3562 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3565bool SPIRVInstructionSelector::selectFirstBitSet32(
3567 Register SrcReg,
unsigned BitSetOpcode)
const {
3568 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3571 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3577bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3579 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3586 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3588 MachineIRBuilder MIRBuilder(
I);
3596 std::vector<Register> PartialRegs;
3599 unsigned CurrentComponent = 0;
3600 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3606 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3607 TII.get(SPIRV::OpVectorShuffle))
3612 .
addImm(CurrentComponent)
3613 .
addImm(CurrentComponent + 1);
3621 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3622 BitSetOpcode, SwapPrimarySide))
3625 PartialRegs.push_back(SubVecBitSetReg);
3629 if (CurrentComponent != ComponentCount) {
3635 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3636 SPIRV::OpVectorExtractDynamic))
3642 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3643 BitSetOpcode, SwapPrimarySide))
3646 PartialRegs.push_back(FinalElemBitSetReg);
3651 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
3652 SPIRV::OpCompositeConstruct);
3655bool SPIRVInstructionSelector::selectFirstBitSet64(
3657 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3670 if (ComponentCount > 2) {
3671 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3672 BitSetOpcode, SwapPrimarySide);
3676 MachineIRBuilder MIRBuilder(
I);
3678 BaseType, 2 * ComponentCount, MIRBuilder,
false);
3682 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3688 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3695 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3698 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3699 SPIRV::OpVectorExtractDynamic))
3701 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3702 SPIRV::OpVectorExtractDynamic))
3706 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3707 TII.get(SPIRV::OpVectorShuffle))
3715 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3722 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3723 TII.get(SPIRV::OpVectorShuffle))
3731 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3752 SelectOp = SPIRV::OpSelectSISCond;
3753 AddOp = SPIRV::OpIAddS;
3761 SelectOp = SPIRV::OpSelectVIVCond;
3762 AddOp = SPIRV::OpIAddV;
3772 if (SwapPrimarySide) {
3773 PrimaryReg = LowReg;
3774 SecondaryReg = HighReg;
3775 PrimaryShiftReg = Reg0;
3776 SecondaryShiftReg = Reg32;
3781 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3787 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
3793 if (!selectOpWithSrcs(ValReg, ResType,
I,
3794 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3797 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3800bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3803 bool IsSigned)
const {
3805 Register OpReg =
I.getOperand(2).getReg();
3808 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3809 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3813 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3815 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3817 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3821 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3825bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3827 MachineInstr &
I)
const {
3829 Register OpReg =
I.getOperand(2).getReg();
3834 unsigned ExtendOpcode = SPIRV::OpUConvert;
3835 unsigned BitSetOpcode = GL::FindILsb;
3839 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3841 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3843 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3850bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3852 MachineInstr &
I)
const {
3856 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3857 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3860 .
addUse(
I.getOperand(2).getReg())
3863 unsigned Alignment =
I.getOperand(3).getImm();
3869bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3871 MachineInstr &
I)
const {
3875 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3876 TII.get(SPIRV::OpVariable))
3879 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
3882 unsigned Alignment =
I.getOperand(2).getImm();
3889bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
3894 const MachineInstr *PrevI =
I.getPrevNode();
3896 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3897 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3900 .
addMBB(
I.getOperand(0).getMBB())
3904 .
addMBB(
I.getOperand(0).getMBB())
3908bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
3919 const MachineInstr *NextI =
I.getNextNode();
3921 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3927 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
3928 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3929 .
addUse(
I.getOperand(0).getReg())
3930 .
addMBB(
I.getOperand(1).getMBB())
3935bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3937 MachineInstr &
I)
const {
3938 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3941 const unsigned NumOps =
I.getNumOperands();
3942 for (
unsigned i = 1; i <
NumOps; i += 2) {
3943 MIB.
addUse(
I.getOperand(i + 0).getReg());
3944 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3952bool SPIRVInstructionSelector::selectGlobalValue(
3953 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
3955 MachineIRBuilder MIRBuilder(
I);
3956 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
3959 std::string GlobalIdent;
3961 unsigned &
ID = UnnamedGlobalIDs[GV];
3963 ID = UnnamedGlobalIDs.size();
3964 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
3991 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3998 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4001 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4002 MachineInstrBuilder MIB1 =
4003 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4006 MachineInstrBuilder MIB2 =
4008 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4012 GR.
add(ConstVal, MIB2);
4018 MachineInstrBuilder MIB3 =
4019 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4022 GR.
add(ConstVal, MIB3);
4025 assert(NewReg != ResVReg);
4026 return BuildCOPY(ResVReg, NewReg,
I);
4038 SPIRV::LinkageType::LinkageType LnkType =
4040 ? SPIRV::LinkageType::Import
4043 ? SPIRV::LinkageType::LinkOnceODR
4044 : SPIRV::LinkageType::Export);
4052 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
4056bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4058 MachineInstr &
I)
const {
4060 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4068 MachineIRBuilder MIRBuilder(
I);
4074 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4077 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4079 .
add(
I.getOperand(1))
4084 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4087 ResType->
getOpcode() == SPIRV::OpTypeVector
4094 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4095 ? SPIRV::OpVectorTimesScalar
4105bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4107 MachineInstr &
I)
const {
4123 MachineIRBuilder MIRBuilder(
I);
4126 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4137 MachineBasicBlock &EntryBB =
I.getMF()->front();
4141 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4144 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4148 assert(
I.getNumOperands() == 4 &&
4149 "Expected 4 operands for modf instruction");
4153 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4156 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4159 .
add(
I.getOperand(3))
4163 Register IntegralPartReg =
I.getOperand(1).getReg();
4164 if (IntegralPartReg.
isValid()) {
4166 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4175 assert(
false &&
"GLSL::Modf is deprecated.");
4186bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4187 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4188 const SPIRVType *ResType, MachineInstr &
I)
const {
4189 MachineIRBuilder MIRBuilder(
I);
4193 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4205 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4206 SPIRV::LinkageType::Import, MIRBuilder,
false);
4209 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4210 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4216 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4223 assert(
I.getOperand(2).isReg());
4224 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4228 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4238bool SPIRVInstructionSelector::loadBuiltinInputID(
4239 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4240 const SPIRVType *ResType, MachineInstr &
I)
const {
4241 MachineIRBuilder MIRBuilder(
I);
4243 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4258 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4259 SPIRV::LinkageType::Import, MIRBuilder,
false);
4262 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4271 MachineInstr &
I)
const {
4272 MachineIRBuilder MIRBuilder(
I);
4273 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4277 if (VectorSize == 4)
4285bool SPIRVInstructionSelector::loadHandleBeforePosition(
4287 MachineInstr &Pos)
const {
4290 Intrinsic::spv_resource_handlefrombinding);
4297 bool IsNonUniform =
false;
4301 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4302 MachineIRBuilder MIRBuilder(HandleDef);
4304 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4306 if (IsStructuredBuffer) {
4312 buildPointerToResource(VarType, SC, Set,
Binding, ArraySize, IndexReg,
4313 IsNonUniform, Name, MIRBuilder);
4321 uint32_t LoadOpcode =
4322 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4325 TII.get(LoadOpcode))
4333InstructionSelector *
4337 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE