32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
215 template <
bool Signed>
218 template <
bool Signed>
236 bool IsSigned,
unsigned Opcode)
const;
238 bool IsSigned)
const;
244 bool IsSigned)
const;
283 GL::GLSLExtInst GLInst)
const;
288 GL::GLSLExtInst GLInst)
const;
310 bool selectCounterHandleFromBinding(
Register &ResVReg,
326 std::pair<Register, bool>
328 const SPIRVType *ResType =
nullptr)
const;
340 SPIRV::StorageClass::StorageClass SC)
const;
347 SPIRV::StorageClass::StorageClass SC,
359 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
362 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
369bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
371 if (
TET->getTargetExtName() ==
"spirv.Image") {
374 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
375 return TET->getTypeParameter(0)->isIntegerTy();
379#define GET_GLOBALISEL_IMPL
380#include "SPIRVGenGlobalISel.inc"
381#undef GET_GLOBALISEL_IMPL
387 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
390#include
"SPIRVGenGlobalISel.inc"
393#include
"SPIRVGenGlobalISel.inc"
405 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
409void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
410 if (HasVRegsReset == &MF)
415 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
417 LLT RegType =
MRI.getType(
Reg);
425 for (
const auto &
MBB : MF) {
426 for (
const auto &
MI :
MBB) {
429 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
433 LLT DstType =
MRI.getType(DstReg);
435 LLT SrcType =
MRI.getType(SrcReg);
436 if (DstType != SrcType)
437 MRI.setType(DstReg,
MRI.getType(SrcReg));
439 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
440 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
441 if (DstRC != SrcRC && SrcRC)
442 MRI.setRegClass(DstReg, SrcRC);
458 case TargetOpcode::G_CONSTANT:
459 case TargetOpcode::G_FCONSTANT:
461 case TargetOpcode::G_INTRINSIC:
462 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
463 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
465 Intrinsic::spv_const_composite;
466 case TargetOpcode::G_BUILD_VECTOR:
467 case TargetOpcode::G_SPLAT_VECTOR: {
478 case SPIRV::OpConstantTrue:
479 case SPIRV::OpConstantFalse:
480 case SPIRV::OpConstantI:
481 case SPIRV::OpConstantF:
482 case SPIRV::OpConstantComposite:
483 case SPIRV::OpConstantCompositeContinuedINTEL:
484 case SPIRV::OpConstantSampler:
485 case SPIRV::OpConstantNull:
487 case SPIRV::OpConstantFunctionPointerINTEL:
503 for (
const auto &MO :
MI.all_defs()) {
505 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
508 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
509 MI.isLifetimeMarker())
513 if (
MI.mayStore() ||
MI.isCall() ||
514 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
515 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
520bool SPIRVInstructionSelector::select(MachineInstr &
I) {
521 resetVRegsType(*
I.getParent()->getParent());
523 assert(
I.getParent() &&
"Instruction should be in a basic block!");
524 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
529 if (Opcode == SPIRV::ASSIGN_TYPE) {
530 Register DstReg =
I.getOperand(0).getReg();
531 Register SrcReg =
I.getOperand(1).getReg();
532 auto *
Def =
MRI->getVRegDef(SrcReg);
534 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
535 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
537 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
538 Register SelectDstReg =
Def->getOperand(0).getReg();
542 Def->removeFromParent();
543 MRI->replaceRegWith(DstReg, SelectDstReg);
545 I.removeFromParent();
547 Res = selectImpl(
I, *CoverageInfo);
549 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
550 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
554 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
561 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
562 MRI->replaceRegWith(SrcReg, DstReg);
564 I.removeFromParent();
566 }
else if (
I.getNumDefs() == 1) {
573 if (DeadMIs.contains(&
I)) {
583 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
584 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
590 bool HasDefs =
I.getNumDefs() > 0;
593 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
594 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
595 if (spvSelect(ResVReg, ResType,
I)) {
597 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
600 I.removeFromParent();
608 case TargetOpcode::G_CONSTANT:
609 case TargetOpcode::G_FCONSTANT:
611 case TargetOpcode::G_SADDO:
612 case TargetOpcode::G_SSUBO:
619 MachineInstr &
I)
const {
620 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
621 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
622 if (DstRC != SrcRC && SrcRC)
623 MRI->setRegClass(DestReg, SrcRC);
624 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
625 TII.get(TargetOpcode::COPY))
631bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
633 MachineInstr &
I)
const {
634 const unsigned Opcode =
I.getOpcode();
636 return selectImpl(
I, *CoverageInfo);
638 case TargetOpcode::G_CONSTANT:
639 case TargetOpcode::G_FCONSTANT:
640 return selectConst(ResVReg, ResType,
I);
641 case TargetOpcode::G_GLOBAL_VALUE:
642 return selectGlobalValue(ResVReg,
I);
643 case TargetOpcode::G_IMPLICIT_DEF:
644 return selectOpUndef(ResVReg, ResType,
I);
645 case TargetOpcode::G_FREEZE:
646 return selectFreeze(ResVReg, ResType,
I);
648 case TargetOpcode::G_INTRINSIC:
649 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
650 case TargetOpcode::G_INTRINSIC_CONVERGENT:
651 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
652 return selectIntrinsic(ResVReg, ResType,
I);
653 case TargetOpcode::G_BITREVERSE:
654 return selectBitreverse(ResVReg, ResType,
I);
656 case TargetOpcode::G_BUILD_VECTOR:
657 return selectBuildVector(ResVReg, ResType,
I);
658 case TargetOpcode::G_SPLAT_VECTOR:
659 return selectSplatVector(ResVReg, ResType,
I);
661 case TargetOpcode::G_SHUFFLE_VECTOR: {
662 MachineBasicBlock &BB = *
I.getParent();
663 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
666 .
addUse(
I.getOperand(1).getReg())
667 .
addUse(
I.getOperand(2).getReg());
668 for (
auto V :
I.getOperand(3).getShuffleMask())
672 case TargetOpcode::G_MEMMOVE:
673 case TargetOpcode::G_MEMCPY:
674 case TargetOpcode::G_MEMSET:
675 return selectMemOperation(ResVReg,
I);
677 case TargetOpcode::G_ICMP:
678 return selectICmp(ResVReg, ResType,
I);
679 case TargetOpcode::G_FCMP:
680 return selectFCmp(ResVReg, ResType,
I);
682 case TargetOpcode::G_FRAME_INDEX:
683 return selectFrameIndex(ResVReg, ResType,
I);
685 case TargetOpcode::G_LOAD:
686 return selectLoad(ResVReg, ResType,
I);
687 case TargetOpcode::G_STORE:
688 return selectStore(
I);
690 case TargetOpcode::G_BR:
691 return selectBranch(
I);
692 case TargetOpcode::G_BRCOND:
693 return selectBranchCond(
I);
695 case TargetOpcode::G_PHI:
696 return selectPhi(ResVReg, ResType,
I);
698 case TargetOpcode::G_FPTOSI:
699 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
700 case TargetOpcode::G_FPTOUI:
701 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
703 case TargetOpcode::G_FPTOSI_SAT:
704 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
705 case TargetOpcode::G_FPTOUI_SAT:
706 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
708 case TargetOpcode::G_SITOFP:
709 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
710 case TargetOpcode::G_UITOFP:
711 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
713 case TargetOpcode::G_CTPOP:
714 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
715 case TargetOpcode::G_SMIN:
716 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
717 case TargetOpcode::G_UMIN:
718 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
720 case TargetOpcode::G_SMAX:
721 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
722 case TargetOpcode::G_UMAX:
723 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
725 case TargetOpcode::G_SCMP:
726 return selectSUCmp(ResVReg, ResType,
I,
true);
727 case TargetOpcode::G_UCMP:
728 return selectSUCmp(ResVReg, ResType,
I,
false);
729 case TargetOpcode::G_LROUND:
730 case TargetOpcode::G_LLROUND: {
732 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
733 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
735 regForLround, *(
I.getParent()->getParent()));
737 I, CL::round, GL::Round);
739 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
745 case TargetOpcode::G_STRICT_FMA:
746 case TargetOpcode::G_FMA:
747 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
749 case TargetOpcode::G_STRICT_FLDEXP:
750 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
752 case TargetOpcode::G_FPOW:
753 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
754 case TargetOpcode::G_FPOWI:
755 return selectExtInst(ResVReg, ResType,
I, CL::pown);
757 case TargetOpcode::G_FEXP:
758 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
759 case TargetOpcode::G_FEXP2:
760 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
761 case TargetOpcode::G_FMODF:
762 return selectModf(ResVReg, ResType,
I);
764 case TargetOpcode::G_FLOG:
765 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
766 case TargetOpcode::G_FLOG2:
767 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
768 case TargetOpcode::G_FLOG10:
769 return selectLog10(ResVReg, ResType,
I);
771 case TargetOpcode::G_FABS:
772 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
773 case TargetOpcode::G_ABS:
774 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
776 case TargetOpcode::G_FMINNUM:
777 case TargetOpcode::G_FMINIMUM:
778 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
779 case TargetOpcode::G_FMAXNUM:
780 case TargetOpcode::G_FMAXIMUM:
781 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
783 case TargetOpcode::G_FCOPYSIGN:
784 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
786 case TargetOpcode::G_FCEIL:
787 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
788 case TargetOpcode::G_FFLOOR:
789 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
791 case TargetOpcode::G_FCOS:
792 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
793 case TargetOpcode::G_FSIN:
794 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
795 case TargetOpcode::G_FTAN:
796 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
797 case TargetOpcode::G_FACOS:
798 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
799 case TargetOpcode::G_FASIN:
800 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
801 case TargetOpcode::G_FATAN:
802 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
803 case TargetOpcode::G_FATAN2:
804 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
805 case TargetOpcode::G_FCOSH:
806 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
807 case TargetOpcode::G_FSINH:
808 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
809 case TargetOpcode::G_FTANH:
810 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
812 case TargetOpcode::G_STRICT_FSQRT:
813 case TargetOpcode::G_FSQRT:
814 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
816 case TargetOpcode::G_CTTZ:
817 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
818 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
819 case TargetOpcode::G_CTLZ:
820 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
821 return selectExtInst(ResVReg, ResType,
I, CL::clz);
823 case TargetOpcode::G_INTRINSIC_ROUND:
824 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
825 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
826 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
827 case TargetOpcode::G_INTRINSIC_TRUNC:
828 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
829 case TargetOpcode::G_FRINT:
830 case TargetOpcode::G_FNEARBYINT:
831 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
833 case TargetOpcode::G_SMULH:
834 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
835 case TargetOpcode::G_UMULH:
836 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
838 case TargetOpcode::G_SADDSAT:
839 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
840 case TargetOpcode::G_UADDSAT:
841 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
842 case TargetOpcode::G_SSUBSAT:
843 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
844 case TargetOpcode::G_USUBSAT:
845 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
847 case TargetOpcode::G_FFREXP:
848 return selectFrexp(ResVReg, ResType,
I);
850 case TargetOpcode::G_UADDO:
851 return selectOverflowArith(ResVReg, ResType,
I,
852 ResType->
getOpcode() == SPIRV::OpTypeVector
853 ? SPIRV::OpIAddCarryV
854 : SPIRV::OpIAddCarryS);
855 case TargetOpcode::G_USUBO:
856 return selectOverflowArith(ResVReg, ResType,
I,
857 ResType->
getOpcode() == SPIRV::OpTypeVector
858 ? SPIRV::OpISubBorrowV
859 : SPIRV::OpISubBorrowS);
860 case TargetOpcode::G_UMULO:
861 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
862 case TargetOpcode::G_SMULO:
863 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
865 case TargetOpcode::G_SEXT:
866 return selectExt(ResVReg, ResType,
I,
true);
867 case TargetOpcode::G_ANYEXT:
868 case TargetOpcode::G_ZEXT:
869 return selectExt(ResVReg, ResType,
I,
false);
870 case TargetOpcode::G_TRUNC:
871 return selectTrunc(ResVReg, ResType,
I);
872 case TargetOpcode::G_FPTRUNC:
873 case TargetOpcode::G_FPEXT:
874 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
876 case TargetOpcode::G_PTRTOINT:
877 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
878 case TargetOpcode::G_INTTOPTR:
879 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
880 case TargetOpcode::G_BITCAST:
881 return selectBitcast(ResVReg, ResType,
I);
882 case TargetOpcode::G_ADDRSPACE_CAST:
883 return selectAddrSpaceCast(ResVReg, ResType,
I);
884 case TargetOpcode::G_PTR_ADD: {
886 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
890 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
891 (*II).getOpcode() == TargetOpcode::COPY ||
892 (*II).getOpcode() == SPIRV::OpVariable) &&
895 bool IsGVInit =
false;
897 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
898 UseEnd =
MRI->use_instr_end();
899 UseIt != UseEnd; UseIt = std::next(UseIt)) {
900 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
901 (*UseIt).getOpcode() == SPIRV::OpVariable) {
911 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
914 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
915 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
924 "incompatible result and operand types in a bitcast");
926 MachineInstrBuilder MIB =
927 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
934 ? SPIRV::OpInBoundsAccessChain
935 : SPIRV::OpInBoundsPtrAccessChain))
939 .
addUse(
I.getOperand(2).getReg())
942 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
946 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
948 .
addUse(
I.getOperand(2).getReg())
956 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
959 .
addImm(
static_cast<uint32_t
>(
960 SPIRV::Opcode::InBoundsPtrAccessChain))
963 .
addUse(
I.getOperand(2).getReg());
967 case TargetOpcode::G_ATOMICRMW_OR:
968 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
969 case TargetOpcode::G_ATOMICRMW_ADD:
970 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
971 case TargetOpcode::G_ATOMICRMW_AND:
972 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
973 case TargetOpcode::G_ATOMICRMW_MAX:
974 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
975 case TargetOpcode::G_ATOMICRMW_MIN:
976 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
977 case TargetOpcode::G_ATOMICRMW_SUB:
978 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
979 case TargetOpcode::G_ATOMICRMW_XOR:
980 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
981 case TargetOpcode::G_ATOMICRMW_UMAX:
982 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
983 case TargetOpcode::G_ATOMICRMW_UMIN:
984 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
985 case TargetOpcode::G_ATOMICRMW_XCHG:
986 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
987 case TargetOpcode::G_ATOMIC_CMPXCHG:
988 return selectAtomicCmpXchg(ResVReg, ResType,
I);
990 case TargetOpcode::G_ATOMICRMW_FADD:
991 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
992 case TargetOpcode::G_ATOMICRMW_FSUB:
994 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
996 case TargetOpcode::G_ATOMICRMW_FMIN:
997 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
998 case TargetOpcode::G_ATOMICRMW_FMAX:
999 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1001 case TargetOpcode::G_FENCE:
1002 return selectFence(
I);
1004 case TargetOpcode::G_STACKSAVE:
1005 return selectStackSave(ResVReg, ResType,
I);
1006 case TargetOpcode::G_STACKRESTORE:
1007 return selectStackRestore(
I);
1009 case TargetOpcode::G_UNMERGE_VALUES:
1015 case TargetOpcode::G_TRAP:
1016 case TargetOpcode::G_UBSANTRAP:
1017 case TargetOpcode::DBG_LABEL:
1019 case TargetOpcode::G_DEBUGTRAP:
1020 return selectDebugTrap(ResVReg, ResType,
I);
1027bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1029 MachineInstr &
I)
const {
1030 unsigned Opcode = SPIRV::OpNop;
1032 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1036bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1039 GL::GLSLExtInst GLInst)
const {
1041 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1042 std::string DiagMsg;
1043 raw_string_ostream OS(DiagMsg);
1044 I.print(OS,
true,
false,
false,
false);
1045 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1048 return selectExtInst(ResVReg, ResType,
I,
1049 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1052bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1055 CL::OpenCLExtInst CLInst)
const {
1056 return selectExtInst(ResVReg, ResType,
I,
1057 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1060bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1063 CL::OpenCLExtInst CLInst,
1064 GL::GLSLExtInst GLInst)
const {
1065 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1066 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1067 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1070bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1075 for (
const auto &Ex : Insts) {
1076 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1077 uint32_t Opcode = Ex.second;
1080 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1083 .
addImm(
static_cast<uint32_t
>(Set))
1086 const unsigned NumOps =
I.getNumOperands();
1089 I.getOperand(Index).getType() ==
1090 MachineOperand::MachineOperandType::MO_IntrinsicID)
1093 MIB.
add(
I.getOperand(Index));
1099bool SPIRVInstructionSelector::selectExtInstForLRound(
1101 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1102 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1103 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1104 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1107bool SPIRVInstructionSelector::selectExtInstForLRound(
1110 for (
const auto &Ex : Insts) {
1111 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1112 uint32_t Opcode = Ex.second;
1115 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1118 .
addImm(
static_cast<uint32_t
>(Set))
1120 const unsigned NumOps =
I.getNumOperands();
1123 I.getOperand(Index).getType() ==
1124 MachineOperand::MachineOperandType::MO_IntrinsicID)
1127 MIB.
add(
I.getOperand(Index));
1135bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1137 MachineInstr &
I)
const {
1138 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1139 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1140 for (
const auto &Ex : ExtInsts) {
1141 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1142 uint32_t Opcode = Ex.second;
1146 MachineIRBuilder MIRBuilder(
I);
1149 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1154 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1155 TII.get(SPIRV::OpVariable))
1158 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1162 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1165 .
addImm(
static_cast<uint32_t
>(Ex.first))
1167 .
add(
I.getOperand(2))
1172 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1173 .
addDef(
I.getOperand(1).getReg())
1182bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1185 std::vector<Register> Srcs,
1186 unsigned Opcode)
const {
1187 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1196bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1199 unsigned Opcode)
const {
1201 Register SrcReg =
I.getOperand(1).getReg();
1204 MRI->def_instr_begin(SrcReg);
1205 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1206 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1207 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1213 uint32_t SpecOpcode = 0;
1215 case SPIRV::OpConvertPtrToU:
1216 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1218 case SPIRV::OpConvertUToPtr:
1219 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1223 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1224 TII.get(SPIRV::OpSpecConstantOp))
1232 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1236bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1238 MachineInstr &
I)
const {
1239 Register OpReg =
I.getOperand(1).getReg();
1243 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1251 if (
MemOp->isVolatile())
1252 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1253 if (
MemOp->isNonTemporal())
1254 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1255 if (
MemOp->getAlign().value())
1256 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1262 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1263 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1267 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1269 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1273 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1277 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1279 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1291 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1293 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1295 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1299bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1301 MachineInstr &
I)
const {
1308 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1309 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1311 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1313 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1315 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1319 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1320 return generateImageRead(ResVReg, ResType, NewHandleReg, IdxReg,
1321 I.getDebugLoc(),
I);
1325 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1329 if (!
I.getNumMemOperands()) {
1330 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1332 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1335 MachineIRBuilder MIRBuilder(
I);
1341bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1343 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1349 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1350 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1352 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1355 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1359 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1360 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1361 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1362 TII.get(SPIRV::OpImageWrite))
1368 if (sampledTypeIsSignedInteger(LLVMHandleType))
1371 return BMI.constrainAllUses(
TII,
TRI, RBI);
1376 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1379 if (!
I.getNumMemOperands()) {
1380 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1382 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1385 MachineIRBuilder MIRBuilder(
I);
1391bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1393 MachineInstr &
I)
const {
1394 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1396 "llvm.stacksave intrinsic: this instruction requires the following "
1397 "SPIR-V extension: SPV_INTEL_variable_length_array",
1400 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1406bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1407 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1409 "llvm.stackrestore intrinsic: this instruction requires the following "
1410 "SPIR-V extension: SPV_INTEL_variable_length_array",
1412 if (!
I.getOperand(0).isReg())
1415 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1416 .
addUse(
I.getOperand(0).getReg())
1420bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1421 MachineInstr &
I)
const {
1423 Register SrcReg =
I.getOperand(1).getReg();
1425 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1426 MachineIRBuilder MIRBuilder(
I);
1427 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1430 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1431 Type *ArrTy = ArrayType::get(ValTy, Num);
1433 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1436 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1443 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1448 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1451 .
addImm(SPIRV::StorageClass::UniformConstant)
1460 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1462 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1464 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1465 .
addUse(
I.getOperand(0).getReg())
1467 .
addUse(
I.getOperand(2).getReg());
1468 if (
I.getNumMemOperands()) {
1469 MachineIRBuilder MIRBuilder(
I);
1478bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1482 unsigned NegateOpcode)
const {
1485 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1488 auto ScopeConstant = buildI32Constant(Scope,
I);
1489 Register ScopeReg = ScopeConstant.first;
1490 Result &= ScopeConstant.second;
1498 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1499 Register MemSemReg = MemSemConstant.first;
1500 Result &= MemSemConstant.second;
1502 Register ValueReg =
I.getOperand(2).getReg();
1503 if (NegateOpcode != 0) {
1506 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1511 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1521bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1522 unsigned ArgI =
I.getNumOperands() - 1;
1524 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1527 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1529 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1535 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1536 Register ResVReg =
I.getOperand(i).getReg();
1540 ResType = ScalarType;
1546 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1550 .
addImm(
static_cast<int64_t
>(i));
1556bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1559 auto MemSemConstant = buildI32Constant(MemSem,
I);
1560 Register MemSemReg = MemSemConstant.first;
1561 bool Result = MemSemConstant.second;
1563 uint32_t
Scope =
static_cast<uint32_t
>(
1565 auto ScopeConstant = buildI32Constant(Scope,
I);
1566 Register ScopeReg = ScopeConstant.first;
1567 Result &= ScopeConstant.second;
1570 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1576bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1579 unsigned Opcode)
const {
1580 Type *ResTy =
nullptr;
1584 "Not enough info to select the arithmetic with overflow instruction");
1587 "with overflow instruction");
1593 MachineIRBuilder MIRBuilder(
I);
1595 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1596 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1602 Register ZeroReg = buildZerosVal(ResType,
I);
1605 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1607 if (ResName.
size() > 0)
1612 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1615 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1616 MIB.
addUse(
I.getOperand(i).getReg());
1621 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1622 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1624 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1625 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1633 .
addDef(
I.getOperand(1).getReg())
1640bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1642 MachineInstr &
I)
const {
1650 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1653 auto ScopeConstant = buildI32Constant(Scope,
I);
1654 ScopeReg = ScopeConstant.first;
1655 Result &= ScopeConstant.second;
1657 unsigned ScSem =
static_cast<uint32_t
>(
1660 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1661 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1662 MemSemEqReg = MemSemEqConstant.first;
1663 Result &= MemSemEqConstant.second;
1665 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1666 if (MemSemEq == MemSemNeq)
1667 MemSemNeqReg = MemSemEqReg;
1669 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1670 MemSemNeqReg = MemSemNeqConstant.first;
1671 Result &= MemSemNeqConstant.second;
1674 ScopeReg =
I.getOperand(5).getReg();
1675 MemSemEqReg =
I.getOperand(6).getReg();
1676 MemSemNeqReg =
I.getOperand(7).getReg();
1680 Register Val =
I.getOperand(4).getReg();
1685 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1712 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1723 case SPIRV::StorageClass::DeviceOnlyINTEL:
1724 case SPIRV::StorageClass::HostOnlyINTEL:
1733 bool IsGRef =
false;
1734 bool IsAllowedRefs =
1735 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1736 unsigned Opcode = It.getOpcode();
1737 if (Opcode == SPIRV::OpConstantComposite ||
1738 Opcode == SPIRV::OpVariable ||
1739 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1740 return IsGRef = true;
1741 return Opcode == SPIRV::OpName;
1743 return IsAllowedRefs && IsGRef;
1746Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1747 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1749 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1753SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1755 uint32_t Opcode)
const {
1756 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1757 TII.get(SPIRV::OpSpecConstantOp))
1765SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1769 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1771 SPIRV::StorageClass::Generic),
1773 MachineFunction *MF =
I.getParent()->getParent();
1775 MachineInstrBuilder MIB = buildSpecConstantOp(
1777 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1787bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1789 MachineInstr &
I)
const {
1793 Register SrcPtr =
I.getOperand(1).getReg();
1797 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1798 ResType->
getOpcode() != SPIRV::OpTypePointer)
1799 return BuildCOPY(ResVReg, SrcPtr,
I);
1809 unsigned SpecOpcode =
1811 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1814 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1821 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1822 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1823 .constrainAllUses(
TII,
TRI, RBI);
1825 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1827 buildSpecConstantOp(
1829 getUcharPtrTypeReg(
I, DstSC),
1830 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1831 .constrainAllUses(
TII,
TRI, RBI);
1837 return BuildCOPY(ResVReg, SrcPtr,
I);
1839 if ((SrcSC == SPIRV::StorageClass::Function &&
1840 DstSC == SPIRV::StorageClass::Private) ||
1841 (DstSC == SPIRV::StorageClass::Function &&
1842 SrcSC == SPIRV::StorageClass::Private))
1843 return BuildCOPY(ResVReg, SrcPtr,
I);
1847 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1850 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1871 return selectUnOp(ResVReg, ResType,
I,
1872 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1874 return selectUnOp(ResVReg, ResType,
I,
1875 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1877 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1879 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1889 return SPIRV::OpFOrdEqual;
1891 return SPIRV::OpFOrdGreaterThanEqual;
1893 return SPIRV::OpFOrdGreaterThan;
1895 return SPIRV::OpFOrdLessThanEqual;
1897 return SPIRV::OpFOrdLessThan;
1899 return SPIRV::OpFOrdNotEqual;
1901 return SPIRV::OpOrdered;
1903 return SPIRV::OpFUnordEqual;
1905 return SPIRV::OpFUnordGreaterThanEqual;
1907 return SPIRV::OpFUnordGreaterThan;
1909 return SPIRV::OpFUnordLessThanEqual;
1911 return SPIRV::OpFUnordLessThan;
1913 return SPIRV::OpFUnordNotEqual;
1915 return SPIRV::OpUnordered;
1925 return SPIRV::OpIEqual;
1927 return SPIRV::OpINotEqual;
1929 return SPIRV::OpSGreaterThanEqual;
1931 return SPIRV::OpSGreaterThan;
1933 return SPIRV::OpSLessThanEqual;
1935 return SPIRV::OpSLessThan;
1937 return SPIRV::OpUGreaterThanEqual;
1939 return SPIRV::OpUGreaterThan;
1941 return SPIRV::OpULessThanEqual;
1943 return SPIRV::OpULessThan;
1952 return SPIRV::OpPtrEqual;
1954 return SPIRV::OpPtrNotEqual;
1965 return SPIRV::OpLogicalEqual;
1967 return SPIRV::OpLogicalNotEqual;
2001bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2004 unsigned OpAnyOrAll)
const {
2005 assert(
I.getNumOperands() == 3);
2006 assert(
I.getOperand(2).isReg());
2008 Register InputRegister =
I.getOperand(2).getReg();
2015 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2016 if (IsBoolTy && !IsVectorTy) {
2017 assert(ResVReg ==
I.getOperand(0).getReg());
2018 return BuildCOPY(ResVReg, InputRegister,
I);
2022 unsigned SpirvNotEqualId =
2023 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2030 IsBoolTy ? InputRegister
2039 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2059bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2061 MachineInstr &
I)
const {
2062 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2065bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2067 MachineInstr &
I)
const {
2068 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2072bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2074 MachineInstr &
I)
const {
2075 assert(
I.getNumOperands() == 4);
2076 assert(
I.getOperand(2).isReg());
2077 assert(
I.getOperand(3).isReg());
2084 "dot product requires a vector of at least 2 components");
2092 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2095 .
addUse(
I.getOperand(2).getReg())
2096 .
addUse(
I.getOperand(3).getReg())
2100bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2104 assert(
I.getNumOperands() == 4);
2105 assert(
I.getOperand(2).isReg());
2106 assert(
I.getOperand(3).isReg());
2109 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2113 .
addUse(
I.getOperand(2).getReg())
2114 .
addUse(
I.getOperand(3).getReg())
2120bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2122 assert(
I.getNumOperands() == 4);
2123 assert(
I.getOperand(2).isReg());
2124 assert(
I.getOperand(3).isReg());
2128 Register Vec0 =
I.getOperand(2).getReg();
2129 Register Vec1 =
I.getOperand(3).getReg();
2142 "dot product requires a vector of at least 2 components");
2156 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2179bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2181 MachineInstr &
I)
const {
2183 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2186 .
addUse(
I.getOperand(2).getReg())
2190bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2192 MachineInstr &
I)
const {
2194 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2197 .
addUse(
I.getOperand(2).getReg())
2201template <
bool Signed>
2202bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2204 MachineInstr &
I)
const {
2205 assert(
I.getNumOperands() == 5);
2206 assert(
I.getOperand(2).isReg());
2207 assert(
I.getOperand(3).isReg());
2208 assert(
I.getOperand(4).isReg());
2211 Register Acc =
I.getOperand(2).getReg();
2215 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2235template <
bool Signed>
2236bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2238 assert(
I.getNumOperands() == 5);
2239 assert(
I.getOperand(2).isReg());
2240 assert(
I.getOperand(3).isReg());
2241 assert(
I.getOperand(4).isReg());
2246 Register Acc =
I.getOperand(2).getReg();
2252 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2256 for (
unsigned i = 0; i < 4; i++) {
2258 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2269 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2289 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2301 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2317bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2319 MachineInstr &
I)
const {
2320 assert(
I.getNumOperands() == 3);
2321 assert(
I.getOperand(2).isReg());
2323 Register VZero = buildZerosValF(ResType,
I);
2324 Register VOne = buildOnesValF(ResType,
I);
2326 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2329 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2331 .
addUse(
I.getOperand(2).getReg())
2337bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2339 MachineInstr &
I)
const {
2340 assert(
I.getNumOperands() == 3);
2341 assert(
I.getOperand(2).isReg());
2343 Register InputRegister =
I.getOperand(2).getReg();
2345 auto &
DL =
I.getDebugLoc();
2355 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2357 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2359 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2366 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2371 if (NeedsConversion) {
2372 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2383bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2386 unsigned Opcode)
const {
2390 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2396 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2397 BMI.addUse(
I.getOperand(J).getReg());
2403bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2409 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2410 SPIRV::OpGroupNonUniformBallot);
2414 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2419 .
addImm(SPIRV::GroupOperation::Reduce)
2426bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2429 bool IsUnsigned)
const {
2430 assert(
I.getNumOperands() == 3);
2431 assert(
I.getOperand(2).isReg());
2433 Register InputRegister =
I.getOperand(2).getReg();
2442 auto IntegerOpcodeType =
2443 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2444 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2445 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2450 .
addImm(SPIRV::GroupOperation::Reduce)
2451 .
addUse(
I.getOperand(2).getReg())
2455bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2457 MachineInstr &
I)
const {
2458 assert(
I.getNumOperands() == 3);
2459 assert(
I.getOperand(2).isReg());
2461 Register InputRegister =
I.getOperand(2).getReg();
2471 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2472 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2477 .
addImm(SPIRV::GroupOperation::Reduce)
2478 .
addUse(
I.getOperand(2).getReg());
2481bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2483 MachineInstr &
I)
const {
2485 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2488 .
addUse(
I.getOperand(1).getReg())
2492bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2494 MachineInstr &
I)
const {
2500 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2502 Register OpReg =
I.getOperand(1).getReg();
2503 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2504 if (
Def->getOpcode() == TargetOpcode::COPY)
2505 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2507 switch (
Def->getOpcode()) {
2508 case SPIRV::ASSIGN_TYPE:
2509 if (MachineInstr *AssignToDef =
2510 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2511 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2512 Reg =
Def->getOperand(2).getReg();
2515 case SPIRV::OpUndef:
2516 Reg =
Def->getOperand(1).getReg();
2519 unsigned DestOpCode;
2521 DestOpCode = SPIRV::OpConstantNull;
2523 DestOpCode = TargetOpcode::COPY;
2526 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2527 .
addDef(
I.getOperand(0).getReg())
2534bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2536 MachineInstr &
I)
const {
2538 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2540 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2544 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2549 for (
unsigned i =
I.getNumExplicitDefs();
2550 i <
I.getNumExplicitOperands() && IsConst; ++i)
2554 if (!IsConst &&
N < 2)
2556 "There must be at least two constituent operands in a vector");
2559 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2560 TII.get(IsConst ? SPIRV::OpConstantComposite
2561 : SPIRV::OpCompositeConstruct))
2564 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2565 MIB.
addUse(
I.getOperand(i).getReg());
2569bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2571 MachineInstr &
I)
const {
2573 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2575 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2581 if (!
I.getOperand(
OpIdx).isReg())
2588 if (!IsConst &&
N < 2)
2590 "There must be at least two constituent operands in a vector");
2593 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2594 TII.get(IsConst ? SPIRV::OpConstantComposite
2595 : SPIRV::OpCompositeConstruct))
2598 for (
unsigned i = 0; i <
N; ++i)
2603bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2605 MachineInstr &
I)
const {
2610 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2612 Opcode = SPIRV::OpDemoteToHelperInvocation;
2614 Opcode = SPIRV::OpKill;
2616 if (MachineInstr *NextI =
I.getNextNode()) {
2618 NextI->removeFromParent();
2623 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2627bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2630 MachineInstr &
I)
const {
2631 Register Cmp0 =
I.getOperand(2).getReg();
2632 Register Cmp1 =
I.getOperand(3).getReg();
2635 "CMP operands should have the same type");
2636 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2645bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2647 MachineInstr &
I)
const {
2648 auto Pred =
I.getOperand(1).getPredicate();
2651 Register CmpOperand =
I.getOperand(2).getReg();
2658 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2661std::pair<Register, bool>
2662SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2668 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2676 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2679 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2682 .
addImm(APInt(32, Val).getZExtValue());
2684 GR.
add(ConstInt,
MI);
2689bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2691 MachineInstr &
I)
const {
2693 return selectCmp(ResVReg, ResType, CmpOp,
I);
2697 MachineInstr &
I)
const {
2700 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2706 MachineInstr &
I)
const {
2710 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2716 MachineInstr &
I)
const {
2720 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2727 MachineInstr &
I)
const {
2731 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2736bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2738 MachineInstr &
I)
const {
2739 Register SelectFirstArg =
I.getOperand(2).getReg();
2740 Register SelectSecondArg =
I.getOperand(3).getReg();
2749 SPIRV::OpTypeVector;
2756 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2757 }
else if (IsPtrTy) {
2758 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2760 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2764 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2765 }
else if (IsPtrTy) {
2766 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2768 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2771 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2774 .
addUse(
I.getOperand(1).getReg())
2780bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2783 bool IsSigned)
const {
2785 Register ZeroReg = buildZerosVal(ResType,
I);
2786 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2790 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2791 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2794 .
addUse(
I.getOperand(1).getReg())
2800bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2802 MachineInstr &
I,
bool IsSigned,
2803 unsigned Opcode)
const {
2804 Register SrcReg =
I.getOperand(1).getReg();
2810 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2815 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2817 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2820bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2822 MachineInstr &
I,
bool IsSigned)
const {
2823 Register SrcReg =
I.getOperand(1).getReg();
2825 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2828 if (SrcType == ResType)
2829 return BuildCOPY(ResVReg, SrcReg,
I);
2831 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2832 return selectUnOp(ResVReg, ResType,
I, Opcode);
2835bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2838 bool IsSigned)
const {
2839 MachineIRBuilder MIRBuilder(
I);
2840 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2855 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2856 : SPIRV::OpULessThanEqual))
2859 .
addUse(
I.getOperand(1).getReg())
2860 .
addUse(
I.getOperand(2).getReg())
2866 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2869 .
addUse(
I.getOperand(1).getReg())
2870 .
addUse(
I.getOperand(2).getReg())
2878 unsigned SelectOpcode =
2879 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2884 .
addUse(buildOnesVal(
true, ResType,
I))
2885 .
addUse(buildZerosVal(ResType,
I))
2892 .
addUse(buildOnesVal(
false, ResType,
I))
2896bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2903 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2904 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2906 Register One = buildOnesVal(
false, IntTy,
I);
2922bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2924 MachineInstr &
I)
const {
2925 Register IntReg =
I.getOperand(1).getReg();
2928 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2929 if (ArgType == ResType)
2930 return BuildCOPY(ResVReg, IntReg,
I);
2932 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2933 return selectUnOp(ResVReg, ResType,
I, Opcode);
2936bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2938 MachineInstr &
I)
const {
2939 unsigned Opcode =
I.getOpcode();
2940 unsigned TpOpcode = ResType->
getOpcode();
2942 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2943 assert(Opcode == TargetOpcode::G_CONSTANT &&
2944 I.getOperand(1).getCImm()->isZero());
2945 MachineBasicBlock &DepMBB =
I.getMF()->front();
2948 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2955 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
2958bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2960 MachineInstr &
I)
const {
2961 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2967bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2969 MachineInstr &
I)
const {
2971 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2975 .
addUse(
I.getOperand(3).getReg())
2977 .
addUse(
I.getOperand(2).getReg());
2978 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2983bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2985 MachineInstr &
I)
const {
2987 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2990 .
addUse(
I.getOperand(2).getReg());
2991 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2996bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2998 MachineInstr &
I)
const {
3000 return selectInsertVal(ResVReg, ResType,
I);
3002 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3005 .
addUse(
I.getOperand(2).getReg())
3006 .
addUse(
I.getOperand(3).getReg())
3007 .
addUse(
I.getOperand(4).getReg())
3011bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3013 MachineInstr &
I)
const {
3015 return selectExtractVal(ResVReg, ResType,
I);
3017 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3020 .
addUse(
I.getOperand(2).getReg())
3021 .
addUse(
I.getOperand(3).getReg())
3025bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3027 MachineInstr &
I)
const {
3028 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3034 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3035 : SPIRV::OpAccessChain)
3036 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3037 :
SPIRV::OpPtrAccessChain);
3039 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3043 .
addUse(
I.getOperand(3).getReg());
3045 const unsigned StartingIndex =
3046 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3049 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3050 Res.addUse(
I.getOperand(i).getReg());
3051 return Res.constrainAllUses(
TII,
TRI, RBI);
3055bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3058 unsigned Lim =
I.getNumExplicitOperands();
3059 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3060 Register OpReg =
I.getOperand(i).getReg();
3061 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3063 SmallPtrSet<SPIRVType *, 4> Visited;
3064 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3065 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3072 MachineFunction *MF =
I.getMF();
3084 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3085 TII.get(SPIRV::OpSpecConstantOp))
3088 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3090 GR.
add(OpDefine, MIB);
3098bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3100 MachineInstr &
I)
const {
3104 case Intrinsic::spv_load:
3105 return selectLoad(ResVReg, ResType,
I);
3106 case Intrinsic::spv_store:
3107 return selectStore(
I);
3108 case Intrinsic::spv_extractv:
3109 return selectExtractVal(ResVReg, ResType,
I);
3110 case Intrinsic::spv_insertv:
3111 return selectInsertVal(ResVReg, ResType,
I);
3112 case Intrinsic::spv_extractelt:
3113 return selectExtractElt(ResVReg, ResType,
I);
3114 case Intrinsic::spv_insertelt:
3115 return selectInsertElt(ResVReg, ResType,
I);
3116 case Intrinsic::spv_gep:
3117 return selectGEP(ResVReg, ResType,
I);
3118 case Intrinsic::spv_unref_global:
3119 case Intrinsic::spv_init_global: {
3120 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3121 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3122 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3125 Register GVarVReg =
MI->getOperand(0).getReg();
3126 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3130 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3132 MI->removeFromParent();
3136 case Intrinsic::spv_undef: {
3137 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3142 case Intrinsic::spv_const_composite: {
3144 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3150 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3152 MachineIRBuilder MIR(
I);
3154 MIR, SPIRV::OpConstantComposite, 3,
3155 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3157 for (
auto *Instr : Instructions) {
3158 Instr->setDebugLoc(
I.getDebugLoc());
3164 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3170 case Intrinsic::spv_assign_name: {
3171 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3172 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3173 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3174 i <
I.getNumExplicitOperands(); ++i) {
3175 MIB.
addImm(
I.getOperand(i).getImm());
3179 case Intrinsic::spv_switch: {
3180 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3181 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3182 if (
I.getOperand(i).isReg())
3183 MIB.
addReg(
I.getOperand(i).getReg());
3184 else if (
I.getOperand(i).isCImm())
3185 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3186 else if (
I.getOperand(i).isMBB())
3187 MIB.
addMBB(
I.getOperand(i).getMBB());
3193 case Intrinsic::spv_loop_merge: {
3194 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3195 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3196 if (
I.getOperand(i).isMBB())
3197 MIB.
addMBB(
I.getOperand(i).getMBB());
3203 case Intrinsic::spv_selection_merge: {
3205 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3206 assert(
I.getOperand(1).isMBB() &&
3207 "operand 1 to spv_selection_merge must be a basic block");
3208 MIB.
addMBB(
I.getOperand(1).getMBB());
3209 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3212 case Intrinsic::spv_cmpxchg:
3213 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3214 case Intrinsic::spv_unreachable:
3215 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3217 case Intrinsic::spv_alloca:
3218 return selectFrameIndex(ResVReg, ResType,
I);
3219 case Intrinsic::spv_alloca_array:
3220 return selectAllocaArray(ResVReg, ResType,
I);
3221 case Intrinsic::spv_assume:
3223 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3224 .
addUse(
I.getOperand(1).getReg())
3227 case Intrinsic::spv_expect:
3229 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3232 .
addUse(
I.getOperand(2).getReg())
3233 .
addUse(
I.getOperand(3).getReg())
3236 case Intrinsic::arithmetic_fence:
3239 TII.get(SPIRV::OpArithmeticFenceEXT))
3242 .
addUse(
I.getOperand(2).getReg())
3245 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3247 case Intrinsic::spv_thread_id:
3253 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3255 case Intrinsic::spv_thread_id_in_group:
3261 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3263 case Intrinsic::spv_group_id:
3269 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3271 case Intrinsic::spv_flattened_thread_id_in_group:
3278 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3280 case Intrinsic::spv_workgroup_size:
3281 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3283 case Intrinsic::spv_global_size:
3284 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3286 case Intrinsic::spv_global_offset:
3287 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3289 case Intrinsic::spv_num_workgroups:
3290 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3292 case Intrinsic::spv_subgroup_size:
3293 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3295 case Intrinsic::spv_num_subgroups:
3296 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3298 case Intrinsic::spv_subgroup_id:
3299 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3300 case Intrinsic::spv_subgroup_local_invocation_id:
3301 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3302 ResVReg, ResType,
I);
3303 case Intrinsic::spv_subgroup_max_size:
3304 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3306 case Intrinsic::spv_fdot:
3307 return selectFloatDot(ResVReg, ResType,
I);
3308 case Intrinsic::spv_udot:
3309 case Intrinsic::spv_sdot:
3310 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3312 return selectIntegerDot(ResVReg, ResType,
I,
3313 IID == Intrinsic::spv_sdot);
3314 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3315 case Intrinsic::spv_dot4add_i8packed:
3316 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3318 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3319 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3320 case Intrinsic::spv_dot4add_u8packed:
3321 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3323 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3324 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3325 case Intrinsic::spv_all:
3326 return selectAll(ResVReg, ResType,
I);
3327 case Intrinsic::spv_any:
3328 return selectAny(ResVReg, ResType,
I);
3329 case Intrinsic::spv_cross:
3330 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3331 case Intrinsic::spv_distance:
3332 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3333 case Intrinsic::spv_lerp:
3334 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3335 case Intrinsic::spv_length:
3336 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3337 case Intrinsic::spv_degrees:
3338 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3339 case Intrinsic::spv_faceforward:
3340 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3341 case Intrinsic::spv_frac:
3342 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3343 case Intrinsic::spv_isinf:
3344 return selectOpIsInf(ResVReg, ResType,
I);
3345 case Intrinsic::spv_isnan:
3346 return selectOpIsNan(ResVReg, ResType,
I);
3347 case Intrinsic::spv_normalize:
3348 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3349 case Intrinsic::spv_refract:
3350 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3351 case Intrinsic::spv_reflect:
3352 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3353 case Intrinsic::spv_rsqrt:
3354 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3355 case Intrinsic::spv_sign:
3356 return selectSign(ResVReg, ResType,
I);
3357 case Intrinsic::spv_smoothstep:
3358 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3359 case Intrinsic::spv_firstbituhigh:
3360 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3361 case Intrinsic::spv_firstbitshigh:
3362 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3363 case Intrinsic::spv_firstbitlow:
3364 return selectFirstBitLow(ResVReg, ResType,
I);
3365 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3367 auto MemSemConstant =
3368 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3369 Register MemSemReg = MemSemConstant.first;
3370 Result &= MemSemConstant.second;
3371 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3372 Register ScopeReg = ScopeConstant.first;
3373 Result &= ScopeConstant.second;
3376 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3382 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3383 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3384 SPIRV::StorageClass::StorageClass ResSC =
3388 "Generic storage class");
3390 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3397 case Intrinsic::spv_lifetime_start:
3398 case Intrinsic::spv_lifetime_end: {
3399 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3400 : SPIRV::OpLifetimeStop;
3401 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3402 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3410 case Intrinsic::spv_saturate:
3411 return selectSaturate(ResVReg, ResType,
I);
3412 case Intrinsic::spv_nclamp:
3413 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3414 case Intrinsic::spv_uclamp:
3415 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3416 case Intrinsic::spv_sclamp:
3417 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3418 case Intrinsic::spv_wave_active_countbits:
3419 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3420 case Intrinsic::spv_wave_all:
3421 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3422 case Intrinsic::spv_wave_any:
3423 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3424 case Intrinsic::spv_wave_is_first_lane:
3425 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3426 case Intrinsic::spv_wave_reduce_umax:
3427 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3428 case Intrinsic::spv_wave_reduce_max:
3429 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3430 case Intrinsic::spv_wave_reduce_sum:
3431 return selectWaveReduceSum(ResVReg, ResType,
I);
3432 case Intrinsic::spv_wave_readlane:
3433 return selectWaveOpInst(ResVReg, ResType,
I,
3434 SPIRV::OpGroupNonUniformShuffle);
3435 case Intrinsic::spv_step:
3436 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3437 case Intrinsic::spv_radians:
3438 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3442 case Intrinsic::instrprof_increment:
3443 case Intrinsic::instrprof_increment_step:
3444 case Intrinsic::instrprof_value_profile:
3447 case Intrinsic::spv_value_md:
3449 case Intrinsic::spv_resource_handlefrombinding: {
3450 return selectHandleFromBinding(ResVReg, ResType,
I);
3452 case Intrinsic::spv_resource_counterhandlefrombinding:
3453 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
3454 case Intrinsic::spv_resource_updatecounter:
3455 return selectUpdateCounter(ResVReg, ResType,
I);
3456 case Intrinsic::spv_resource_store_typedbuffer: {
3457 return selectImageWriteIntrinsic(
I);
3459 case Intrinsic::spv_resource_load_typedbuffer: {
3460 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3462 case Intrinsic::spv_resource_getpointer: {
3463 return selectResourceGetPointer(ResVReg, ResType,
I);
3465 case Intrinsic::spv_discard: {
3466 return selectDiscard(ResVReg, ResType,
I);
3469 std::string DiagMsg;
3470 raw_string_ostream OS(DiagMsg);
3472 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3479bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3481 MachineInstr &
I)
const {
3484 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3491bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3494 assert(Intr.getIntrinsicID() ==
3495 Intrinsic::spv_resource_counterhandlefrombinding);
3498 Register MainHandleReg = Intr.getOperand(2).getReg();
3500 assert(MainHandleDef->getIntrinsicID() ==
3501 Intrinsic::spv_resource_handlefrombinding);
3505 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
3506 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3507 const bool IsNonUniform =
false;
3508 std::string CounterName =
3513 MachineIRBuilder MIRBuilder(
I);
3514 Register CounterVarReg = buildPointerToResource(
3516 Binding, ArraySize, IndexReg, IsNonUniform, CounterName, MIRBuilder);
3518 return BuildCOPY(ResVReg, CounterVarReg,
I);
3521bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
3523 MachineInstr &
I)
const {
3525 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3527 Register CounterHandleReg = Intr.getOperand(2).getReg();
3528 Register IncrReg = Intr.getOperand(3).getReg();
3536 assert(CounterVarPointeeType &&
3537 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
3538 "Counter variable must be a struct");
3540 SPIRV::StorageClass::StorageBuffer &&
3541 "Counter variable must be in the storage buffer storage class");
3543 "Counter variable must have exactly 1 member in the struct");
3547 "Counter variable struct must have a single i32 member");
3551 MachineIRBuilder MIRBuilder(
I);
3553 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
3556 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3558 auto Zero = buildI32Constant(0,
I);
3564 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3565 TII.get(SPIRV::OpAccessChain))
3568 .
addUse(CounterHandleReg)
3576 auto Scope = buildI32Constant(SPIRV::Scope::Device,
I);
3579 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
3580 if (!Semantics.second)
3584 auto Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
3589 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
3600 return BuildCOPY(ResVReg, AtomicRes,
I);
3608 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
3615bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3624 Register ImageReg =
I.getOperand(2).getReg();
3626 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3632 Register IdxReg =
I.getOperand(3).getReg();
3634 MachineInstr &Pos =
I;
3636 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3639bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3643 MachineInstr &Pos)
const {
3646 "ImageReg is not an image type.");
3647 bool IsSignedInteger =
3651 if (ResultSize == 4) {
3658 if (IsSignedInteger)
3663 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3670 if (IsSignedInteger)
3676 if (ResultSize == 1) {
3678 TII.get(SPIRV::OpCompositeExtract))
3685 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3688bool SPIRVInstructionSelector::selectResourceGetPointer(
3690 Register ResourcePtr =
I.getOperand(2).getReg();
3692 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3701 MachineIRBuilder MIRBuilder(
I);
3703 Register IndexReg =
I.getOperand(3).getReg();
3706 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3707 TII.get(SPIRV::OpAccessChain))
3716bool SPIRVInstructionSelector::extractSubvector(
3718 MachineInstr &InsertionPoint)
const {
3720 [[maybe_unused]] uint64_t InputSize =
3723 assert(InputSize > 1 &&
"The input must be a vector.");
3724 assert(ResultSize > 1 &&
"The result must be a vector.");
3725 assert(ResultSize < InputSize &&
3726 "Cannot extract more element than there are in the input.");
3729 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3730 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3731 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3734 TII.get(SPIRV::OpCompositeExtract))
3745 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3747 TII.get(SPIRV::OpCompositeConstruct))
3751 for (
Register ComponentReg : ComponentRegisters)
3752 MIB.
addUse(ComponentReg);
3756bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3757 MachineInstr &
I)
const {
3764 Register ImageReg =
I.getOperand(1).getReg();
3766 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3772 Register CoordinateReg =
I.getOperand(2).getReg();
3773 Register DataReg =
I.getOperand(3).getReg();
3776 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3777 TII.get(SPIRV::OpImageWrite))
3784Register SPIRVInstructionSelector::buildPointerToResource(
3785 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3786 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3787 bool IsNonUniform, StringRef Name, MachineIRBuilder MIRBuilder)
const {
3789 if (ArraySize == 1) {
3793 "SpirvResType did not have an explicit layout.");
3798 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3802 VarPointerType, Set,
Binding, Name, MIRBuilder);
3811 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3812 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3824bool SPIRVInstructionSelector::selectFirstBitSet16(
3826 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3828 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3832 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3835bool SPIRVInstructionSelector::selectFirstBitSet32(
3837 Register SrcReg,
unsigned BitSetOpcode)
const {
3838 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3841 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3847bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3849 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3856 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3858 MachineIRBuilder MIRBuilder(
I);
3866 std::vector<Register> PartialRegs;
3869 unsigned CurrentComponent = 0;
3870 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3876 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3877 TII.get(SPIRV::OpVectorShuffle))
3882 .
addImm(CurrentComponent)
3883 .
addImm(CurrentComponent + 1);
3891 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3892 BitSetOpcode, SwapPrimarySide))
3895 PartialRegs.push_back(SubVecBitSetReg);
3899 if (CurrentComponent != ComponentCount) {
3905 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3906 SPIRV::OpVectorExtractDynamic))
3912 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3913 BitSetOpcode, SwapPrimarySide))
3916 PartialRegs.push_back(FinalElemBitSetReg);
3921 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
3922 SPIRV::OpCompositeConstruct);
3925bool SPIRVInstructionSelector::selectFirstBitSet64(
3927 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3940 if (ComponentCount > 2) {
3941 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3942 BitSetOpcode, SwapPrimarySide);
3946 MachineIRBuilder MIRBuilder(
I);
3948 BaseType, 2 * ComponentCount, MIRBuilder,
false);
3952 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3958 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3965 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3968 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3969 SPIRV::OpVectorExtractDynamic))
3971 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3972 SPIRV::OpVectorExtractDynamic))
3976 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3977 TII.get(SPIRV::OpVectorShuffle))
3985 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3992 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3993 TII.get(SPIRV::OpVectorShuffle))
4001 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
4022 SelectOp = SPIRV::OpSelectSISCond;
4023 AddOp = SPIRV::OpIAddS;
4031 SelectOp = SPIRV::OpSelectVIVCond;
4032 AddOp = SPIRV::OpIAddV;
4042 if (SwapPrimarySide) {
4043 PrimaryReg = LowReg;
4044 SecondaryReg = HighReg;
4045 PrimaryShiftReg = Reg0;
4046 SecondaryShiftReg = Reg32;
4051 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
4057 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
4063 if (!selectOpWithSrcs(ValReg, ResType,
I,
4064 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4067 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
4070bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
4073 bool IsSigned)
const {
4075 Register OpReg =
I.getOperand(2).getReg();
4078 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4079 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4083 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4085 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4087 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4091 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4095bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
4097 MachineInstr &
I)
const {
4099 Register OpReg =
I.getOperand(2).getReg();
4104 unsigned ExtendOpcode = SPIRV::OpUConvert;
4105 unsigned BitSetOpcode = GL::FindILsb;
4109 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4111 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4113 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4120bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
4122 MachineInstr &
I)
const {
4126 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
4127 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4130 .
addUse(
I.getOperand(2).getReg())
4133 unsigned Alignment =
I.getOperand(3).getImm();
4139bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4141 MachineInstr &
I)
const {
4145 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4146 TII.get(SPIRV::OpVariable))
4149 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4152 unsigned Alignment =
I.getOperand(2).getImm();
4159bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4164 const MachineInstr *PrevI =
I.getPrevNode();
4166 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4167 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4170 .
addMBB(
I.getOperand(0).getMBB())
4174 .
addMBB(
I.getOperand(0).getMBB())
4178bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4189 const MachineInstr *NextI =
I.getNextNode();
4191 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4197 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4198 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4199 .
addUse(
I.getOperand(0).getReg())
4200 .
addMBB(
I.getOperand(1).getMBB())
4205bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4207 MachineInstr &
I)
const {
4208 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4211 const unsigned NumOps =
I.getNumOperands();
4212 for (
unsigned i = 1; i <
NumOps; i += 2) {
4213 MIB.
addUse(
I.getOperand(i + 0).getReg());
4214 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4222bool SPIRVInstructionSelector::selectGlobalValue(
4223 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4225 MachineIRBuilder MIRBuilder(
I);
4226 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4229 std::string GlobalIdent;
4231 unsigned &
ID = UnnamedGlobalIDs[GV];
4233 ID = UnnamedGlobalIDs.size();
4234 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4261 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4268 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4271 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4272 MachineInstrBuilder MIB1 =
4273 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4276 MachineInstrBuilder MIB2 =
4278 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4282 GR.
add(ConstVal, MIB2);
4288 MachineInstrBuilder MIB3 =
4289 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4292 GR.
add(ConstVal, MIB3);
4295 assert(NewReg != ResVReg);
4296 return BuildCOPY(ResVReg, NewReg,
I);
4308 SPIRV::LinkageType::LinkageType LnkType =
4310 ? SPIRV::LinkageType::Import
4313 ? SPIRV::LinkageType::LinkOnceODR
4314 : SPIRV::LinkageType::Export);
4322 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
4326bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4328 MachineInstr &
I)
const {
4330 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4338 MachineIRBuilder MIRBuilder(
I);
4344 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4347 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4349 .
add(
I.getOperand(1))
4354 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4357 ResType->
getOpcode() == SPIRV::OpTypeVector
4364 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4365 ? SPIRV::OpVectorTimesScalar
4375bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4377 MachineInstr &
I)
const {
4393 MachineIRBuilder MIRBuilder(
I);
4396 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4408 MachineBasicBlock &EntryBB =
I.getMF()->front();
4412 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4415 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4421 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4424 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4427 .
add(
I.getOperand(
I.getNumExplicitDefs()))
4431 Register IntegralPartReg =
I.getOperand(1).getReg();
4432 if (IntegralPartReg.
isValid()) {
4434 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4443 assert(
false &&
"GLSL::Modf is deprecated.");
4454bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4455 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4456 const SPIRVType *ResType, MachineInstr &
I)
const {
4457 MachineIRBuilder MIRBuilder(
I);
4461 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4473 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4474 SPIRV::LinkageType::Import, MIRBuilder,
false);
4477 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4478 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4484 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4491 assert(
I.getOperand(2).isReg());
4492 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4496 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4506bool SPIRVInstructionSelector::loadBuiltinInputID(
4507 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4508 const SPIRVType *ResType, MachineInstr &
I)
const {
4509 MachineIRBuilder MIRBuilder(
I);
4511 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4526 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4527 SPIRV::LinkageType::Import, MIRBuilder,
false);
4530 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4539 MachineInstr &
I)
const {
4540 MachineIRBuilder MIRBuilder(
I);
4541 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4545 if (VectorSize == 4)
4553bool SPIRVInstructionSelector::loadHandleBeforePosition(
4555 MachineInstr &Pos)
const {
4558 Intrinsic::spv_resource_handlefrombinding);
4565 bool IsNonUniform =
false;
4569 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4570 MachineIRBuilder MIRBuilder(HandleDef);
4572 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4574 if (IsStructuredBuffer) {
4580 buildPointerToResource(VarType, SC, Set,
Binding, ArraySize, IndexReg,
4581 IsNonUniform, Name, MIRBuilder);
4589 uint32_t LoadOpcode =
4590 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4593 TII.get(LoadOpcode))
4601InstructionSelector *
4605 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMIntType(unsigned NumBits)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE