32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
39namespace CL = SPIRV::OpenCLExtInst;
40namespace GL = SPIRV::GLSLExtInst;
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
207 template <
bool Signed>
210 template <
bool Signed>
228 bool IsSigned,
unsigned Opcode)
const;
230 bool IsSigned)
const;
236 bool IsSigned)
const;
275 GL::GLSLExtInst GLInst)
const;
305 std::pair<Register, bool>
307 const SPIRVType *ResType =
nullptr)
const;
319 SPIRV::StorageClass::StorageClass SC)
const;
326 SPIRV::StorageClass::StorageClass SC,
338 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
341 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
348bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
350 if (
TET->getTargetExtName() ==
"spirv.Image") {
353 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
354 return TET->getTypeParameter(0)->isIntegerTy();
358#define GET_GLOBALISEL_IMPL
359#include "SPIRVGenGlobalISel.inc"
360#undef GET_GLOBALISEL_IMPL
366 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
369#include
"SPIRVGenGlobalISel.inc"
372#include
"SPIRVGenGlobalISel.inc"
383 GR.setCurrentFunc(MF);
384 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
389 if (HasVRegsReset == &MF)
394 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
396 LLT RegType =
MRI.getType(Reg);
404 for (
const auto &
MBB : MF) {
405 for (
const auto &
MI :
MBB) {
408 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
412 LLT DstType =
MRI.getType(DstReg);
414 LLT SrcType =
MRI.getType(SrcReg);
415 if (DstType != SrcType)
416 MRI.setType(DstReg,
MRI.getType(SrcReg));
420 if (DstRC != SrcRC && SrcRC)
421 MRI.setRegClass(DstReg, SrcRC);
437 case TargetOpcode::G_CONSTANT:
438 case TargetOpcode::G_FCONSTANT:
440 case TargetOpcode::G_INTRINSIC:
441 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
442 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
443 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
444 Intrinsic::spv_const_composite;
445 case TargetOpcode::G_BUILD_VECTOR:
446 case TargetOpcode::G_SPLAT_VECTOR: {
457 case SPIRV::OpConstantTrue:
458 case SPIRV::OpConstantFalse:
459 case SPIRV::OpConstantI:
460 case SPIRV::OpConstantF:
461 case SPIRV::OpConstantComposite:
462 case SPIRV::OpConstantCompositeContinuedINTEL:
463 case SPIRV::OpConstantSampler:
464 case SPIRV::OpConstantNull:
466 case SPIRV::OpConstantFunctionPointerINTEL:
482 for (
const auto &MO :
MI.all_defs()) {
484 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
487 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
488 MI.isLifetimeMarker())
492 if (
MI.mayStore() ||
MI.isCall() ||
493 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
494 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
500 resetVRegsType(*
I.getParent()->getParent());
502 assert(
I.getParent() &&
"Instruction should be in a basic block!");
503 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
508 if (Opcode == SPIRV::ASSIGN_TYPE) {
509 Register DstReg =
I.getOperand(0).getReg();
510 Register SrcReg =
I.getOperand(1).getReg();
511 auto *
Def =
MRI->getVRegDef(SrcReg);
513 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
514 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
516 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
517 Register SelectDstReg =
Def->getOperand(0).getReg();
518 Res = selectSelect(SelectDstReg, GR.getSPIRVTypeForVReg(SelectDstReg),
520 GR.invalidateMachineInstr(Def);
521 Def->removeFromParent();
522 MRI->replaceRegWith(DstReg, SelectDstReg);
523 GR.invalidateMachineInstr(&
I);
524 I.removeFromParent();
526 Res = selectImpl(
I, *CoverageInfo);
528 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
529 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
533 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
540 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
541 MRI->replaceRegWith(SrcReg, DstReg);
542 GR.invalidateMachineInstr(&
I);
543 I.removeFromParent();
545 }
else if (
I.getNumDefs() == 1) {
552 if (DeadMIs.contains(&
I)) {
557 GR.invalidateMachineInstr(&
I);
562 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
563 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
569 bool HasDefs =
I.getNumDefs() > 0;
571 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
572 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
573 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
574 if (spvSelect(ResVReg, ResType,
I)) {
576 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
578 GR.invalidateMachineInstr(&
I);
579 I.removeFromParent();
587 case TargetOpcode::G_CONSTANT:
588 case TargetOpcode::G_FCONSTANT:
590 case TargetOpcode::G_SADDO:
591 case TargetOpcode::G_SSUBO:
601 if (DstRC != SrcRC && SrcRC)
602 MRI->setRegClass(DestReg, SrcRC);
603 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
604 TII.get(TargetOpcode::COPY))
610bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
613 const unsigned Opcode =
I.getOpcode();
615 return selectImpl(
I, *CoverageInfo);
617 case TargetOpcode::G_CONSTANT:
618 case TargetOpcode::G_FCONSTANT:
619 return selectConst(ResVReg, ResType,
I);
620 case TargetOpcode::G_GLOBAL_VALUE:
621 return selectGlobalValue(ResVReg,
I);
622 case TargetOpcode::G_IMPLICIT_DEF:
623 return selectOpUndef(ResVReg, ResType,
I);
624 case TargetOpcode::G_FREEZE:
625 return selectFreeze(ResVReg, ResType,
I);
627 case TargetOpcode::G_INTRINSIC:
628 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
629 case TargetOpcode::G_INTRINSIC_CONVERGENT:
630 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
631 return selectIntrinsic(ResVReg, ResType,
I);
632 case TargetOpcode::G_BITREVERSE:
633 return selectBitreverse(ResVReg, ResType,
I);
635 case TargetOpcode::G_BUILD_VECTOR:
636 return selectBuildVector(ResVReg, ResType,
I);
637 case TargetOpcode::G_SPLAT_VECTOR:
638 return selectSplatVector(ResVReg, ResType,
I);
640 case TargetOpcode::G_SHUFFLE_VECTOR: {
642 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
644 .
addUse(GR.getSPIRVTypeID(ResType))
645 .
addUse(
I.getOperand(1).getReg())
646 .
addUse(
I.getOperand(2).getReg());
647 for (
auto V :
I.getOperand(3).getShuffleMask())
651 case TargetOpcode::G_MEMMOVE:
652 case TargetOpcode::G_MEMCPY:
653 case TargetOpcode::G_MEMSET:
654 return selectMemOperation(ResVReg,
I);
656 case TargetOpcode::G_ICMP:
657 return selectICmp(ResVReg, ResType,
I);
658 case TargetOpcode::G_FCMP:
659 return selectFCmp(ResVReg, ResType,
I);
661 case TargetOpcode::G_FRAME_INDEX:
662 return selectFrameIndex(ResVReg, ResType,
I);
664 case TargetOpcode::G_LOAD:
665 return selectLoad(ResVReg, ResType,
I);
666 case TargetOpcode::G_STORE:
667 return selectStore(
I);
669 case TargetOpcode::G_BR:
670 return selectBranch(
I);
671 case TargetOpcode::G_BRCOND:
672 return selectBranchCond(
I);
674 case TargetOpcode::G_PHI:
675 return selectPhi(ResVReg, ResType,
I);
677 case TargetOpcode::G_FPTOSI:
678 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
679 case TargetOpcode::G_FPTOUI:
680 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
682 case TargetOpcode::G_FPTOSI_SAT:
683 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
684 case TargetOpcode::G_FPTOUI_SAT:
685 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
687 case TargetOpcode::G_SITOFP:
688 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
689 case TargetOpcode::G_UITOFP:
690 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
692 case TargetOpcode::G_CTPOP:
693 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
694 case TargetOpcode::G_SMIN:
695 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
696 case TargetOpcode::G_UMIN:
697 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
699 case TargetOpcode::G_SMAX:
700 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
701 case TargetOpcode::G_UMAX:
702 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
704 case TargetOpcode::G_SCMP:
705 return selectSUCmp(ResVReg, ResType,
I,
true);
706 case TargetOpcode::G_UCMP:
707 return selectSUCmp(ResVReg, ResType,
I,
false);
709 case TargetOpcode::G_STRICT_FMA:
710 case TargetOpcode::G_FMA:
711 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
713 case TargetOpcode::G_STRICT_FLDEXP:
714 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
716 case TargetOpcode::G_FPOW:
717 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
718 case TargetOpcode::G_FPOWI:
719 return selectExtInst(ResVReg, ResType,
I, CL::pown);
721 case TargetOpcode::G_FEXP:
722 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
723 case TargetOpcode::G_FEXP2:
724 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
726 case TargetOpcode::G_FLOG:
727 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
728 case TargetOpcode::G_FLOG2:
729 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
730 case TargetOpcode::G_FLOG10:
731 return selectLog10(ResVReg, ResType,
I);
733 case TargetOpcode::G_FABS:
734 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
735 case TargetOpcode::G_ABS:
736 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
738 case TargetOpcode::G_FMINNUM:
739 case TargetOpcode::G_FMINIMUM:
740 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
741 case TargetOpcode::G_FMAXNUM:
742 case TargetOpcode::G_FMAXIMUM:
743 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
745 case TargetOpcode::G_FCOPYSIGN:
746 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
748 case TargetOpcode::G_FCEIL:
749 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
750 case TargetOpcode::G_FFLOOR:
751 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
753 case TargetOpcode::G_FCOS:
754 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
755 case TargetOpcode::G_FSIN:
756 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
757 case TargetOpcode::G_FTAN:
758 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
759 case TargetOpcode::G_FACOS:
760 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
761 case TargetOpcode::G_FASIN:
762 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
763 case TargetOpcode::G_FATAN:
764 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
765 case TargetOpcode::G_FATAN2:
766 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
767 case TargetOpcode::G_FCOSH:
768 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
769 case TargetOpcode::G_FSINH:
770 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
771 case TargetOpcode::G_FTANH:
772 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
774 case TargetOpcode::G_STRICT_FSQRT:
775 case TargetOpcode::G_FSQRT:
776 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
778 case TargetOpcode::G_CTTZ:
779 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
780 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
781 case TargetOpcode::G_CTLZ:
782 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
783 return selectExtInst(ResVReg, ResType,
I, CL::clz);
785 case TargetOpcode::G_INTRINSIC_ROUND:
786 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
787 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
788 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
789 case TargetOpcode::G_INTRINSIC_TRUNC:
790 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
791 case TargetOpcode::G_FRINT:
792 case TargetOpcode::G_FNEARBYINT:
793 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
795 case TargetOpcode::G_SMULH:
796 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
797 case TargetOpcode::G_UMULH:
798 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
800 case TargetOpcode::G_SADDSAT:
801 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
802 case TargetOpcode::G_UADDSAT:
803 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
804 case TargetOpcode::G_SSUBSAT:
805 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
806 case TargetOpcode::G_USUBSAT:
807 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
809 case TargetOpcode::G_UADDO:
810 return selectOverflowArith(ResVReg, ResType,
I,
811 ResType->
getOpcode() == SPIRV::OpTypeVector
812 ? SPIRV::OpIAddCarryV
813 : SPIRV::OpIAddCarryS);
814 case TargetOpcode::G_USUBO:
815 return selectOverflowArith(ResVReg, ResType,
I,
816 ResType->
getOpcode() == SPIRV::OpTypeVector
817 ? SPIRV::OpISubBorrowV
818 : SPIRV::OpISubBorrowS);
819 case TargetOpcode::G_UMULO:
820 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
821 case TargetOpcode::G_SMULO:
822 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
824 case TargetOpcode::G_SEXT:
825 return selectExt(ResVReg, ResType,
I,
true);
826 case TargetOpcode::G_ANYEXT:
827 case TargetOpcode::G_ZEXT:
828 return selectExt(ResVReg, ResType,
I,
false);
829 case TargetOpcode::G_TRUNC:
830 return selectTrunc(ResVReg, ResType,
I);
831 case TargetOpcode::G_FPTRUNC:
832 case TargetOpcode::G_FPEXT:
833 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
835 case TargetOpcode::G_PTRTOINT:
836 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
837 case TargetOpcode::G_INTTOPTR:
838 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
839 case TargetOpcode::G_BITCAST:
840 return selectBitcast(ResVReg, ResType,
I);
841 case TargetOpcode::G_ADDRSPACE_CAST:
842 return selectAddrSpaceCast(ResVReg, ResType,
I);
843 case TargetOpcode::G_PTR_ADD: {
845 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
849 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
850 (*II).getOpcode() == TargetOpcode::COPY ||
851 (*II).getOpcode() == SPIRV::OpVariable) &&
854 bool IsGVInit =
false;
856 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
857 UseEnd =
MRI->use_instr_end();
858 UseIt != UseEnd; UseIt = std::next(UseIt)) {
859 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
860 (*UseIt).getOpcode() == SPIRV::OpVariable) {
867 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
868 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
869 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
870 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
873 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
874 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
881 if (!GR.isBitcastCompatible(ResType, GVType))
883 "incompatible result and operand types in a bitcast");
884 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
886 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
892 TII.get(STI.isLogicalSPIRV()
893 ? SPIRV::OpInBoundsAccessChain
894 : SPIRV::OpInBoundsPtrAccessChain))
898 .
addUse(
I.getOperand(2).getReg())
901 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
903 .
addUse(GR.getSPIRVTypeID(ResType))
905 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
907 .
addUse(
I.getOperand(2).getReg())
914 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
915 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
917 .
addUse(GR.getSPIRVTypeID(ResType))
919 SPIRV::Opcode::InBoundsPtrAccessChain))
922 .
addUse(
I.getOperand(2).getReg());
926 case TargetOpcode::G_ATOMICRMW_OR:
927 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
928 case TargetOpcode::G_ATOMICRMW_ADD:
929 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
930 case TargetOpcode::G_ATOMICRMW_AND:
931 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
932 case TargetOpcode::G_ATOMICRMW_MAX:
933 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
934 case TargetOpcode::G_ATOMICRMW_MIN:
935 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
936 case TargetOpcode::G_ATOMICRMW_SUB:
937 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
938 case TargetOpcode::G_ATOMICRMW_XOR:
939 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
940 case TargetOpcode::G_ATOMICRMW_UMAX:
941 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
942 case TargetOpcode::G_ATOMICRMW_UMIN:
943 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
944 case TargetOpcode::G_ATOMICRMW_XCHG:
945 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
946 case TargetOpcode::G_ATOMIC_CMPXCHG:
947 return selectAtomicCmpXchg(ResVReg, ResType,
I);
949 case TargetOpcode::G_ATOMICRMW_FADD:
950 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
951 case TargetOpcode::G_ATOMICRMW_FSUB:
953 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
955 case TargetOpcode::G_ATOMICRMW_FMIN:
956 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
957 case TargetOpcode::G_ATOMICRMW_FMAX:
958 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
960 case TargetOpcode::G_FENCE:
961 return selectFence(
I);
963 case TargetOpcode::G_STACKSAVE:
964 return selectStackSave(ResVReg, ResType,
I);
965 case TargetOpcode::G_STACKRESTORE:
966 return selectStackRestore(
I);
968 case TargetOpcode::G_UNMERGE_VALUES:
974 case TargetOpcode::G_TRAP:
975 case TargetOpcode::G_DEBUGTRAP:
976 case TargetOpcode::G_UBSANTRAP:
977 case TargetOpcode::DBG_LABEL:
985bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
988 GL::GLSLExtInst GLInst)
const {
989 if (!STI.canUseExtInstSet(
990 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
993 I.print(
OS,
true,
false,
false,
false);
994 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
997 return selectExtInst(ResVReg, ResType,
I,
998 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1001bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1004 CL::OpenCLExtInst CLInst)
const {
1005 return selectExtInst(ResVReg, ResType,
I,
1006 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1009bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1012 CL::OpenCLExtInst CLInst,
1013 GL::GLSLExtInst GLInst)
const {
1014 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1015 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1016 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1019bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1024 for (
const auto &Ex : Insts) {
1025 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1027 if (STI.canUseExtInstSet(Set)) {
1029 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1031 .
addUse(GR.getSPIRVTypeID(ResType))
1034 const unsigned NumOps =
I.getNumOperands();
1036 if (Index < NumOps &&
1037 I.getOperand(Index).getType() ==
1038 MachineOperand::MachineOperandType::MO_IntrinsicID)
1041 MIB.
add(
I.getOperand(Index));
1048bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1051 std::vector<Register> Srcs,
1052 unsigned Opcode)
const {
1053 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1055 .
addUse(GR.getSPIRVTypeID(ResType));
1062bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1065 unsigned Opcode)
const {
1066 if (STI.isPhysicalSPIRV() &&
I.getOperand(1).isReg()) {
1067 Register SrcReg =
I.getOperand(1).getReg();
1070 MRI->def_instr_begin(SrcReg);
1071 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1072 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1073 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1081 case SPIRV::OpConvertPtrToU:
1082 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
1084 case SPIRV::OpConvertUToPtr:
1085 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
1089 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1090 TII.get(SPIRV::OpSpecConstantOp))
1092 .
addUse(GR.getSPIRVTypeID(ResType))
1098 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
1102bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1105 Register OpReg =
I.getOperand(1).getReg();
1106 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
1107 if (!GR.isBitcastCompatible(ResType, OpType))
1109 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1117 if (
MemOp->isVolatile())
1118 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1119 if (
MemOp->isNonTemporal())
1120 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1121 if (
MemOp->getAlign().value())
1122 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1128 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1129 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1133 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1135 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1139 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1143 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1145 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1156 if (Flags & MachineMemOperand::Flags::MOVolatile)
1157 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1158 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1159 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1161 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1165bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1168 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1172 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1174 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1175 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1176 SPIRVType *HandleType = GR.getSPIRVTypeForVReg(HandleReg);
1177 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1179 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1180 auto *HandleDef = cast<GIntrinsic>(
getVRegDef(*
MRI, HandleReg));
1181 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1185 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1186 return generateImageRead(ResVReg, ResType, NewHandleReg, IdxReg,
1187 I.getDebugLoc(),
I);
1191 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1193 .
addUse(GR.getSPIRVTypeID(ResType))
1195 if (!
I.getNumMemOperands()) {
1196 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1198 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1207bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1208 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1209 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1213 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1215 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1216 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1218 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1219 auto *HandleDef = cast<GIntrinsic>(
getVRegDef(*
MRI, HandleReg));
1220 SPIRVType *HandleType = GR.getSPIRVTypeForVReg(HandleReg);
1221 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1225 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1226 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1227 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1228 TII.get(SPIRV::OpImageWrite))
1233 const llvm::Type *LLVMHandleType = GR.getTypeForSPIRVType(HandleType);
1234 if (sampledTypeIsSignedInteger(LLVMHandleType))
1237 return BMI.constrainAllUses(
TII,
TRI, RBI);
1242 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1245 if (!
I.getNumMemOperands()) {
1246 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1248 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1257bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1260 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1262 "llvm.stacksave intrinsic: this instruction requires the following "
1263 "SPIR-V extension: SPV_INTEL_variable_length_array",
1266 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1268 .
addUse(GR.getSPIRVTypeID(ResType))
1272bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1273 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1275 "llvm.stackrestore intrinsic: this instruction requires the following "
1276 "SPIR-V extension: SPV_INTEL_variable_length_array",
1278 if (!
I.getOperand(0).isReg())
1281 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1282 .
addUse(
I.getOperand(0).getReg())
1286bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1289 Register SrcReg =
I.getOperand(1).getReg();
1291 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1293 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1297 Type *ArrTy = ArrayType::get(ValTy, Num);
1298 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1299 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1301 SPIRVType *SpvArrTy = GR.getOrCreateSPIRVType(
1302 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1314 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1316 .
addUse(GR.getSPIRVTypeID(VarTy))
1317 .
addImm(SPIRV::StorageClass::UniformConstant)
1322 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1325 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1326 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1328 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1330 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1331 .
addUse(
I.getOperand(0).getReg())
1333 .
addUse(
I.getOperand(2).getReg());
1334 if (
I.getNumMemOperands()) {
1344bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1348 unsigned NegateOpcode)
const {
1353 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1354 auto ScopeConstant = buildI32Constant(Scope,
I);
1355 Register ScopeReg = ScopeConstant.first;
1356 Result &= ScopeConstant.second;
1364 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1365 Register MemSemReg = MemSemConstant.first;
1366 Result &= MemSemConstant.second;
1368 Register ValueReg =
I.getOperand(2).getReg();
1369 if (NegateOpcode != 0) {
1372 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1377 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1379 .
addUse(GR.getSPIRVTypeID(ResType))
1387bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1388 unsigned ArgI =
I.getNumOperands() - 1;
1390 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1392 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1393 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1395 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1401 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1402 Register ResVReg =
I.getOperand(i).getReg();
1403 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1406 ResType = ScalarType;
1407 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1408 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1409 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1412 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1414 .
addUse(GR.getSPIRVTypeID(ResType))
1416 .
addImm(
static_cast<int64_t
>(i));
1422bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1425 auto MemSemConstant = buildI32Constant(MemSem,
I);
1426 Register MemSemReg = MemSemConstant.first;
1427 bool Result = MemSemConstant.second;
1430 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1431 auto ScopeConstant = buildI32Constant(Scope,
I);
1432 Register ScopeReg = ScopeConstant.first;
1433 Result &= ScopeConstant.second;
1436 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1442bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1445 unsigned Opcode)
const {
1446 Type *ResTy =
nullptr;
1448 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1450 "Not enough info to select the arithmetic with overflow instruction");
1453 "with overflow instruction");
1456 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1461 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1462 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1464 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1466 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1467 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1468 Register ZeroReg = buildZerosVal(ResType,
I);
1471 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1473 if (ResName.
size() > 0)
1478 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1481 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1482 MIB.
addUse(
I.getOperand(i).getReg());
1487 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1488 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1490 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1491 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1492 .
addUse(GR.getSPIRVTypeID(ResType))
1499 .
addDef(
I.getOperand(1).getReg())
1506bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1514 if (!isa<GIntrinsic>(
I)) {
1518 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1519 auto ScopeConstant = buildI32Constant(Scope,
I);
1520 ScopeReg = ScopeConstant.first;
1521 Result &= ScopeConstant.second;
1523 unsigned ScSem =
static_cast<uint32_t>(
1527 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1528 MemSemEqReg = MemSemEqConstant.first;
1529 Result &= MemSemEqConstant.second;
1532 if (MemSemEq == MemSemNeq)
1533 MemSemNeqReg = MemSemEqReg;
1535 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1536 MemSemNeqReg = MemSemNeqConstant.first;
1537 Result &= MemSemNeqConstant.second;
1540 ScopeReg =
I.getOperand(5).getReg();
1541 MemSemEqReg =
I.getOperand(6).getReg();
1542 MemSemNeqReg =
I.getOperand(7).getReg();
1546 Register Val =
I.getOperand(4).getReg();
1547 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1551 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1553 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1565 .
addUse(GR.getSPIRVTypeID(BoolTy))
1572 .
addUse(GR.getSPIRVTypeID(ResType))
1574 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1578 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1580 .
addUse(GR.getSPIRVTypeID(ResType))
1589 case SPIRV::StorageClass::DeviceOnlyINTEL:
1590 case SPIRV::StorageClass::HostOnlyINTEL:
1599 bool IsGRef =
false;
1600 bool IsAllowedRefs =
1601 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1602 unsigned Opcode = It.getOpcode();
1603 if (Opcode == SPIRV::OpConstantComposite ||
1604 Opcode == SPIRV::OpVariable ||
1605 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1606 return IsGRef = true;
1607 return Opcode == SPIRV::OpName;
1609 return IsAllowedRefs && IsGRef;
1612Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1613 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1614 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1622 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1623 TII.get(SPIRV::OpSpecConstantOp))
1634 GR.changePointerStorageClass(SrcPtrTy, SPIRV::StorageClass::Generic,
I);
1635 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1637 SPIRV::StorageClass::Generic),
1638 GR.getPointerSize()));
1640 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1642 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1643 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1653bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1659 Register SrcPtr =
I.getOperand(1).getReg();
1660 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1663 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1664 ResType->
getOpcode() != SPIRV::OpTypePointer)
1665 return BuildCOPY(ResVReg, SrcPtr,
I);
1667 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1668 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1675 unsigned SpecOpcode =
1677 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1678 : (SrcSC == SPIRV::StorageClass::Generic &&
1680 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1687 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1688 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1689 .constrainAllUses(
TII,
TRI, RBI);
1693 buildSpecConstantOp(
1695 getUcharPtrTypeReg(
I, DstSC),
1696 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1697 .constrainAllUses(
TII,
TRI, RBI);
1703 return BuildCOPY(ResVReg, SrcPtr,
I);
1705 if ((SrcSC == SPIRV::StorageClass::Function &&
1706 DstSC == SPIRV::StorageClass::Private) ||
1707 (DstSC == SPIRV::StorageClass::Function &&
1708 SrcSC == SPIRV::StorageClass::Private))
1709 return BuildCOPY(ResVReg, SrcPtr,
I);
1713 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1716 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1720 GR.changePointerStorageClass(SrcPtrTy, SPIRV::StorageClass::Generic,
I);
1724 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1729 .
addUse(GR.getSPIRVTypeID(ResType))
1737 return selectUnOp(ResVReg, ResType,
I,
1738 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1740 return selectUnOp(ResVReg, ResType,
I,
1741 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1743 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1745 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1755 return SPIRV::OpFOrdEqual;
1757 return SPIRV::OpFOrdGreaterThanEqual;
1759 return SPIRV::OpFOrdGreaterThan;
1761 return SPIRV::OpFOrdLessThanEqual;
1763 return SPIRV::OpFOrdLessThan;
1765 return SPIRV::OpFOrdNotEqual;
1767 return SPIRV::OpOrdered;
1769 return SPIRV::OpFUnordEqual;
1771 return SPIRV::OpFUnordGreaterThanEqual;
1773 return SPIRV::OpFUnordGreaterThan;
1775 return SPIRV::OpFUnordLessThanEqual;
1777 return SPIRV::OpFUnordLessThan;
1779 return SPIRV::OpFUnordNotEqual;
1781 return SPIRV::OpUnordered;
1791 return SPIRV::OpIEqual;
1793 return SPIRV::OpINotEqual;
1795 return SPIRV::OpSGreaterThanEqual;
1797 return SPIRV::OpSGreaterThan;
1799 return SPIRV::OpSLessThanEqual;
1801 return SPIRV::OpSLessThan;
1803 return SPIRV::OpUGreaterThanEqual;
1805 return SPIRV::OpUGreaterThan;
1807 return SPIRV::OpULessThanEqual;
1809 return SPIRV::OpULessThan;
1818 return SPIRV::OpPtrEqual;
1820 return SPIRV::OpPtrNotEqual;
1831 return SPIRV::OpLogicalEqual;
1833 return SPIRV::OpLogicalNotEqual;
1867bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1870 unsigned OpAnyOrAll)
const {
1871 assert(
I.getNumOperands() == 3);
1872 assert(
I.getOperand(2).isReg());
1874 Register InputRegister =
I.getOperand(2).getReg();
1875 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1880 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1881 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1882 if (IsBoolTy && !IsVectorTy) {
1883 assert(ResVReg ==
I.getOperand(0).getReg());
1884 return BuildCOPY(ResVReg, InputRegister,
I);
1887 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1888 unsigned SpirvNotEqualId =
1889 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1890 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1896 IsBoolTy ? InputRegister
1899 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1905 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1909 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1920 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1925bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1928 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1931bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1934 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1938bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1941 assert(
I.getNumOperands() == 4);
1942 assert(
I.getOperand(2).isReg());
1943 assert(
I.getOperand(3).isReg());
1946 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1949 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1950 "dot product requires a vector of at least 2 components");
1958 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1960 .
addUse(GR.getSPIRVTypeID(ResType))
1961 .
addUse(
I.getOperand(2).getReg())
1962 .
addUse(
I.getOperand(3).getReg())
1966bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1970 assert(
I.getNumOperands() == 4);
1971 assert(
I.getOperand(2).isReg());
1972 assert(
I.getOperand(3).isReg());
1975 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1978 .
addUse(GR.getSPIRVTypeID(ResType))
1979 .
addUse(
I.getOperand(2).getReg())
1980 .
addUse(
I.getOperand(3).getReg())
1986bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1988 assert(
I.getNumOperands() == 4);
1989 assert(
I.getOperand(2).isReg());
1990 assert(
I.getOperand(3).isReg());
1994 Register Vec0 =
I.getOperand(2).getReg();
1995 Register Vec1 =
I.getOperand(3).getReg();
1996 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1997 SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);
2001 .
addUse(GR.getSPIRVTypeID(VecType))
2007 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
2008 "dot product requires a vector of at least 2 components");
2010 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2013 .
addUse(GR.getSPIRVTypeID(ResType))
2018 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
2019 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2022 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2024 .
addUse(GR.getSPIRVTypeID(ResType))
2029 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
2030 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
2035 .
addUse(GR.getSPIRVTypeID(ResType))
2045template <
bool Signed>
2046bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2049 assert(
I.getNumOperands() == 5);
2050 assert(
I.getOperand(2).isReg());
2051 assert(
I.getOperand(3).isReg());
2052 assert(
I.getOperand(4).isReg());
2055 Register Acc =
I.getOperand(2).getReg();
2059 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2060 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2063 .
addUse(GR.getSPIRVTypeID(ResType))
2070 .
addUse(GR.getSPIRVTypeID(ResType))
2079template <
bool Signed>
2080bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2082 assert(
I.getNumOperands() == 5);
2083 assert(
I.getOperand(2).isReg());
2084 assert(
I.getOperand(3).isReg());
2085 assert(
I.getOperand(4).isReg());
2090 Register Acc =
I.getOperand(2).getReg();
2094 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
2096 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2098 bool ZeroAsNull = !STI.isShader();
2100 for (
unsigned i = 0; i < 4; i++) {
2102 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2106 .
addUse(GR.getSPIRVTypeID(ResType))
2108 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII, ZeroAsNull))
2109 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII, ZeroAsNull))
2113 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2117 .
addUse(GR.getSPIRVTypeID(ResType))
2119 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII, ZeroAsNull))
2120 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII, ZeroAsNull))
2127 .
addUse(GR.getSPIRVTypeID(ResType))
2133 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2137 .
addUse(GR.getSPIRVTypeID(ResType))
2139 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII, ZeroAsNull))
2140 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII, ZeroAsNull))
2145 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2148 .
addUse(GR.getSPIRVTypeID(ResType))
2161bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2164 assert(
I.getNumOperands() == 3);
2165 assert(
I.getOperand(2).isReg());
2167 Register VZero = buildZerosValF(ResType,
I);
2168 Register VOne = buildOnesValF(ResType,
I);
2170 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2172 .
addUse(GR.getSPIRVTypeID(ResType))
2173 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2175 .
addUse(
I.getOperand(2).getReg())
2181bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2184 assert(
I.getNumOperands() == 3);
2185 assert(
I.getOperand(2).isReg());
2187 Register InputRegister =
I.getOperand(2).getReg();
2188 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2189 auto &
DL =
I.getDebugLoc();
2194 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2196 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2197 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2199 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2201 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2203 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2209 .
addUse(GR.getSPIRVTypeID(InputType))
2210 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2215 if (NeedsConversion) {
2216 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2219 .
addUse(GR.getSPIRVTypeID(ResType))
2227bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2230 unsigned Opcode)
const {
2232 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2234 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2236 .
addUse(GR.getSPIRVTypeID(ResType))
2237 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2238 IntTy,
TII, !STI.isShader()));
2240 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2241 BMI.
addUse(
I.getOperand(J).getReg());
2247bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2250 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2251 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2252 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2253 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2254 SPIRV::OpGroupNonUniformBallot);
2258 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2260 .
addUse(GR.getSPIRVTypeID(ResType))
2261 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
2262 TII, !STI.isShader()))
2263 .
addImm(SPIRV::GroupOperation::Reduce)
2270bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2273 bool IsUnsigned)
const {
2274 assert(
I.getNumOperands() == 3);
2275 assert(
I.getOperand(2).isReg());
2277 Register InputRegister =
I.getOperand(2).getReg();
2278 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2283 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2285 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2286 auto IntegerOpcodeType =
2287 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2288 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2289 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2291 .
addUse(GR.getSPIRVTypeID(ResType))
2292 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII,
2294 .
addImm(SPIRV::GroupOperation::Reduce)
2295 .
addUse(
I.getOperand(2).getReg())
2299bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2302 assert(
I.getNumOperands() == 3);
2303 assert(
I.getOperand(2).isReg());
2305 Register InputRegister =
I.getOperand(2).getReg();
2306 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2311 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2313 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2315 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2316 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2318 .
addUse(GR.getSPIRVTypeID(ResType))
2319 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII,
2321 .
addImm(SPIRV::GroupOperation::Reduce)
2322 .
addUse(
I.getOperand(2).getReg());
2325bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2329 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2331 .
addUse(GR.getSPIRVTypeID(ResType))
2332 .
addUse(
I.getOperand(1).getReg())
2336bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2344 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2346 Register OpReg =
I.getOperand(1).getReg();
2348 if (
Def->getOpcode() == TargetOpcode::COPY)
2349 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2351 switch (
Def->getOpcode()) {
2352 case SPIRV::ASSIGN_TYPE:
2354 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2355 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2356 Reg =
Def->getOperand(2).getReg();
2359 case SPIRV::OpUndef:
2360 Reg =
Def->getOperand(1).getReg();
2363 unsigned DestOpCode;
2364 if (
Reg.isValid()) {
2365 DestOpCode = SPIRV::OpConstantNull;
2367 DestOpCode = TargetOpcode::COPY;
2370 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2371 .
addDef(
I.getOperand(0).getReg())
2378bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2382 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2383 N = GR.getScalarOrVectorComponentCount(ResType);
2384 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2388 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2393 for (
unsigned i =
I.getNumExplicitDefs();
2394 i <
I.getNumExplicitOperands() && IsConst; ++i)
2398 if (!IsConst &&
N < 2)
2400 "There must be at least two constituent operands in a vector");
2402 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2403 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2404 TII.get(IsConst ? SPIRV::OpConstantComposite
2405 : SPIRV::OpCompositeConstruct))
2407 .
addUse(GR.getSPIRVTypeID(ResType));
2408 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2409 MIB.
addUse(
I.getOperand(i).getReg());
2413bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2417 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2418 N = GR.getScalarOrVectorComponentCount(ResType);
2419 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2424 unsigned OpIdx =
I.getNumExplicitDefs();
2425 if (!
I.getOperand(
OpIdx).isReg())
2432 if (!IsConst &&
N < 2)
2434 "There must be at least two constituent operands in a vector");
2436 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2437 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2438 TII.get(IsConst ? SPIRV::OpConstantComposite
2439 : SPIRV::OpCompositeConstruct))
2441 .
addUse(GR.getSPIRVTypeID(ResType));
2442 for (
unsigned i = 0; i <
N; ++i)
2447bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2453 if (STI.canUseExtension(
2454 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2456 Opcode = SPIRV::OpDemoteToHelperInvocation;
2458 Opcode = SPIRV::OpKill;
2461 GR.invalidateMachineInstr(NextI);
2462 NextI->removeFromParent();
2467 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2471bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2475 Register Cmp0 =
I.getOperand(2).getReg();
2476 Register Cmp1 =
I.getOperand(3).getReg();
2477 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2478 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2479 "CMP operands should have the same type");
2480 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2482 .
addUse(GR.getSPIRVTypeID(ResType))
2488bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2491 auto Pred =
I.getOperand(1).getPredicate();
2494 Register CmpOperand =
I.getOperand(2).getReg();
2495 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2497 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2501 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2504std::pair<Register, bool>
2509 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2511 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2512 Register NewReg = GR.find(ConstInt, GR.CurMF);
2519 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2521 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2522 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2524 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2527 GR.add(ConstInt,
MI);
2532bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2536 return selectCmp(ResVReg, ResType, CmpOp,
I);
2542 bool ZeroAsNull = !STI.isShader();
2543 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2544 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2545 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2551 bool ZeroAsNull = !STI.isShader();
2553 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2554 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2555 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2561 bool ZeroAsNull = !STI.isShader();
2563 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2564 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2565 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2571 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2574 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2579bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2582 Register SelectFirstArg =
I.getOperand(2).getReg();
2583 Register SelectSecondArg =
I.getOperand(3).getReg();
2584 assert(ResType == GR.getSPIRVTypeForVReg(SelectFirstArg) &&
2585 ResType == GR.getSPIRVTypeForVReg(SelectSecondArg));
2588 GR.isScalarOrVectorOfType(SelectFirstArg, SPIRV::OpTypeFloat);
2590 GR.isScalarOrVectorOfType(SelectFirstArg, SPIRV::OpTypePointer);
2591 bool IsVectorTy = GR.getSPIRVTypeForVReg(SelectFirstArg)->getOpcode() ==
2592 SPIRV::OpTypeVector;
2595 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2599 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2600 }
else if (IsPtrTy) {
2601 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2603 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2607 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2608 }
else if (IsPtrTy) {
2609 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2611 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2614 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2616 .
addUse(GR.getSPIRVTypeID(ResType))
2617 .
addUse(
I.getOperand(1).getReg())
2623bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2626 bool IsSigned)
const {
2628 Register ZeroReg = buildZerosVal(ResType,
I);
2629 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2631 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2633 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2634 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2636 .
addUse(GR.getSPIRVTypeID(ResType))
2637 .
addUse(
I.getOperand(1).getReg())
2643bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2646 unsigned Opcode)
const {
2647 Register SrcReg =
I.getOperand(1).getReg();
2650 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2651 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2653 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2655 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2658 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2660 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2663bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2666 Register SrcReg =
I.getOperand(1).getReg();
2667 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2668 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2670 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2671 if (SrcType == ResType)
2672 return BuildCOPY(ResVReg, SrcReg,
I);
2674 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2675 return selectUnOp(ResVReg, ResType,
I, Opcode);
2678bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2681 bool IsSigned)
const {
2687 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2689 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2690 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2694 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2696 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2698 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2699 : SPIRV::OpULessThanEqual))
2702 .
addUse(
I.getOperand(1).getReg())
2703 .
addUse(
I.getOperand(2).getReg())
2705 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2707 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2709 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2712 .
addUse(
I.getOperand(1).getReg())
2713 .
addUse(
I.getOperand(2).getReg())
2716 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2718 MRI->createVirtualRegister(GR.getRegClass(ResType));
2720 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2721 unsigned SelectOpcode =
2722 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2727 .
addUse(buildOnesVal(
true, ResType,
I))
2728 .
addUse(buildZerosVal(ResType,
I))
2735 .
addUse(buildOnesVal(
false, ResType,
I))
2739bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2746 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2747 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2749 Register One = buildOnesVal(
false, IntTy,
I);
2753 .
addUse(GR.getSPIRVTypeID(IntTy))
2759 .
addUse(GR.getSPIRVTypeID(BoolTy))
2765bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2768 Register IntReg =
I.getOperand(1).getReg();
2769 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2770 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2771 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2772 if (ArgType == ResType)
2773 return BuildCOPY(ResVReg, IntReg,
I);
2774 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2775 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2776 return selectUnOp(ResVReg, ResType,
I, Opcode);
2779bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2782 unsigned Opcode =
I.getOpcode();
2783 unsigned TpOpcode = ResType->
getOpcode();
2785 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2786 assert(Opcode == TargetOpcode::G_CONSTANT &&
2787 I.getOperand(1).getCImm()->isZero());
2790 Reg = GR.getOrCreateConstNullPtr(MIRBuilder, ResType);
2791 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2792 Reg = GR.getOrCreateConstFP(
I.getOperand(1).getFPImm()->getValue(),
I,
2793 ResType,
TII, !STI.isShader());
2795 Reg = GR.getOrCreateConstInt(
I.getOperand(1).getCImm()->getZExtValue(),
I,
2796 ResType,
TII, !STI.isShader());
2798 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2801bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2804 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2806 .
addUse(GR.getSPIRVTypeID(ResType))
2810bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2814 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2816 .
addUse(GR.getSPIRVTypeID(ResType))
2818 .
addUse(
I.getOperand(3).getReg())
2820 .
addUse(
I.getOperand(2).getReg());
2821 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2826bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2830 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2832 .
addUse(GR.getSPIRVTypeID(ResType))
2833 .
addUse(
I.getOperand(2).getReg());
2834 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2839bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2843 return selectInsertVal(ResVReg, ResType,
I);
2845 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2847 .
addUse(GR.getSPIRVTypeID(ResType))
2848 .
addUse(
I.getOperand(2).getReg())
2849 .
addUse(
I.getOperand(3).getReg())
2850 .
addUse(
I.getOperand(4).getReg())
2854bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2858 return selectExtractVal(ResVReg, ResType,
I);
2860 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2862 .
addUse(GR.getSPIRVTypeID(ResType))
2863 .
addUse(
I.getOperand(2).getReg())
2864 .
addUse(
I.getOperand(3).getReg())
2868bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2871 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2876 const unsigned Opcode = STI.isLogicalSPIRV()
2877 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2878 : SPIRV::OpAccessChain)
2879 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2880 : SPIRV::OpPtrAccessChain);
2882 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2884 .
addUse(GR.getSPIRVTypeID(ResType))
2886 .
addUse(
I.getOperand(3).getReg());
2888 const unsigned StartingIndex =
2889 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2892 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2893 Res.addUse(
I.getOperand(i).getReg());
2894 return Res.constrainAllUses(
TII,
TRI, RBI);
2898bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2901 unsigned Lim =
I.getNumExplicitOperands();
2902 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2903 Register OpReg =
I.getOperand(i).getReg();
2905 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2907 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2908 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2909 GR.isAggregateType(OpType)) {
2916 Register WrapReg = GR.find(OpDefine, MF);
2922 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2926 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2927 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2928 TII.get(SPIRV::OpSpecConstantOp))
2930 .
addUse(GR.getSPIRVTypeID(OpType))
2933 GR.
add(OpDefine, MIB);
2941bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2947 case Intrinsic::spv_load:
2948 return selectLoad(ResVReg, ResType,
I);
2949 case Intrinsic::spv_store:
2950 return selectStore(
I);
2951 case Intrinsic::spv_extractv:
2952 return selectExtractVal(ResVReg, ResType,
I);
2953 case Intrinsic::spv_insertv:
2954 return selectInsertVal(ResVReg, ResType,
I);
2955 case Intrinsic::spv_extractelt:
2956 return selectExtractElt(ResVReg, ResType,
I);
2957 case Intrinsic::spv_insertelt:
2958 return selectInsertElt(ResVReg, ResType,
I);
2959 case Intrinsic::spv_gep:
2960 return selectGEP(ResVReg, ResType,
I);
2961 case Intrinsic::spv_unref_global:
2962 case Intrinsic::spv_init_global: {
2965 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2968 Register GVarVReg =
MI->getOperand(0).getReg();
2969 bool Res = selectGlobalValue(GVarVReg, *
MI,
Init);
2973 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
2974 GR.invalidateMachineInstr(
MI);
2975 MI->removeFromParent();
2979 case Intrinsic::spv_undef: {
2980 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2982 .
addUse(GR.getSPIRVTypeID(ResType));
2985 case Intrinsic::spv_const_composite: {
2987 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2989 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2993 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2997 MIR, SPIRV::OpConstantComposite, 3,
2998 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
2999 GR.getSPIRVTypeID(ResType));
3000 for (
auto *Instr : Instructions) {
3001 Instr->setDebugLoc(
I.getDebugLoc());
3007 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3009 .
addUse(GR.getSPIRVTypeID(ResType));
3013 case Intrinsic::spv_assign_name: {
3014 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3015 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3016 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3017 i <
I.getNumExplicitOperands(); ++i) {
3018 MIB.
addImm(
I.getOperand(i).getImm());
3022 case Intrinsic::spv_switch: {
3023 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3024 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3025 if (
I.getOperand(i).isReg())
3026 MIB.
addReg(
I.getOperand(i).getReg());
3027 else if (
I.getOperand(i).isCImm())
3028 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3029 else if (
I.getOperand(i).isMBB())
3030 MIB.
addMBB(
I.getOperand(i).getMBB());
3036 case Intrinsic::spv_loop_merge: {
3037 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3038 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3039 if (
I.getOperand(i).isMBB())
3040 MIB.
addMBB(
I.getOperand(i).getMBB());
3046 case Intrinsic::spv_selection_merge: {
3048 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3049 assert(
I.getOperand(1).isMBB() &&
3050 "operand 1 to spv_selection_merge must be a basic block");
3051 MIB.
addMBB(
I.getOperand(1).getMBB());
3052 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3055 case Intrinsic::spv_cmpxchg:
3056 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3057 case Intrinsic::spv_unreachable:
3058 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3060 case Intrinsic::spv_alloca:
3061 return selectFrameIndex(ResVReg, ResType,
I);
3062 case Intrinsic::spv_alloca_array:
3063 return selectAllocaArray(ResVReg, ResType,
I);
3064 case Intrinsic::spv_assume:
3065 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
3066 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3067 .
addUse(
I.getOperand(1).getReg())
3070 case Intrinsic::spv_expect:
3071 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
3072 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3074 .
addUse(GR.getSPIRVTypeID(ResType))
3075 .
addUse(
I.getOperand(2).getReg())
3076 .
addUse(
I.getOperand(3).getReg())
3079 case Intrinsic::arithmetic_fence:
3080 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
3082 TII.get(SPIRV::OpArithmeticFenceEXT))
3084 .
addUse(GR.getSPIRVTypeID(ResType))
3085 .
addUse(
I.getOperand(2).getReg())
3088 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3090 case Intrinsic::spv_thread_id:
3096 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3098 case Intrinsic::spv_thread_id_in_group:
3104 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3106 case Intrinsic::spv_group_id:
3112 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3114 case Intrinsic::spv_flattened_thread_id_in_group:
3121 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3123 case Intrinsic::spv_workgroup_size:
3124 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3126 case Intrinsic::spv_global_size:
3127 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3129 case Intrinsic::spv_global_offset:
3130 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3132 case Intrinsic::spv_num_workgroups:
3133 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3135 case Intrinsic::spv_subgroup_size:
3136 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3138 case Intrinsic::spv_num_subgroups:
3139 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3141 case Intrinsic::spv_subgroup_id:
3142 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3143 case Intrinsic::spv_subgroup_local_invocation_id:
3144 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3145 ResVReg, ResType,
I);
3146 case Intrinsic::spv_subgroup_max_size:
3147 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3149 case Intrinsic::spv_fdot:
3150 return selectFloatDot(ResVReg, ResType,
I);
3151 case Intrinsic::spv_udot:
3152 case Intrinsic::spv_sdot:
3153 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3155 return selectIntegerDot(ResVReg, ResType,
I,
3156 IID == Intrinsic::spv_sdot);
3157 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3158 case Intrinsic::spv_dot4add_i8packed:
3159 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3161 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3162 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3163 case Intrinsic::spv_dot4add_u8packed:
3164 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3166 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3167 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3168 case Intrinsic::spv_all:
3169 return selectAll(ResVReg, ResType,
I);
3170 case Intrinsic::spv_any:
3171 return selectAny(ResVReg, ResType,
I);
3172 case Intrinsic::spv_cross:
3173 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3174 case Intrinsic::spv_distance:
3175 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3176 case Intrinsic::spv_lerp:
3177 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3178 case Intrinsic::spv_length:
3179 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3180 case Intrinsic::spv_degrees:
3181 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3182 case Intrinsic::spv_faceforward:
3183 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3184 case Intrinsic::spv_frac:
3185 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3186 case Intrinsic::spv_normalize:
3187 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3188 case Intrinsic::spv_refract:
3189 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3190 case Intrinsic::spv_reflect:
3191 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3192 case Intrinsic::spv_rsqrt:
3193 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3194 case Intrinsic::spv_sign:
3195 return selectSign(ResVReg, ResType,
I);
3196 case Intrinsic::spv_smoothstep:
3197 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3198 case Intrinsic::spv_firstbituhigh:
3199 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3200 case Intrinsic::spv_firstbitshigh:
3201 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3202 case Intrinsic::spv_firstbitlow:
3203 return selectFirstBitLow(ResVReg, ResType,
I);
3204 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3206 auto MemSemConstant =
3207 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3208 Register MemSemReg = MemSemConstant.first;
3209 Result &= MemSemConstant.second;
3210 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3211 Register ScopeReg = ScopeConstant.first;
3212 Result &= ScopeConstant.second;
3215 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3221 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3222 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3223 SPIRV::StorageClass::StorageClass ResSC =
3224 GR.getPointerStorageClass(ResType);
3227 "Generic storage class");
3229 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3231 .
addUse(GR.getSPIRVTypeID(ResType))
3236 case Intrinsic::spv_lifetime_start:
3237 case Intrinsic::spv_lifetime_end: {
3238 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3239 : SPIRV::OpLifetimeStop;
3240 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3241 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3249 case Intrinsic::spv_saturate:
3250 return selectSaturate(ResVReg, ResType,
I);
3251 case Intrinsic::spv_nclamp:
3252 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3253 case Intrinsic::spv_uclamp:
3254 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3255 case Intrinsic::spv_sclamp:
3256 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3257 case Intrinsic::spv_wave_active_countbits:
3258 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3259 case Intrinsic::spv_wave_all:
3260 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3261 case Intrinsic::spv_wave_any:
3262 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3263 case Intrinsic::spv_wave_is_first_lane:
3264 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3265 case Intrinsic::spv_wave_reduce_umax:
3266 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3267 case Intrinsic::spv_wave_reduce_max:
3268 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3269 case Intrinsic::spv_wave_reduce_sum:
3270 return selectWaveReduceSum(ResVReg, ResType,
I);
3271 case Intrinsic::spv_wave_readlane:
3272 return selectWaveOpInst(ResVReg, ResType,
I,
3273 SPIRV::OpGroupNonUniformShuffle);
3274 case Intrinsic::spv_step:
3275 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3276 case Intrinsic::spv_radians:
3277 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3281 case Intrinsic::instrprof_increment:
3282 case Intrinsic::instrprof_increment_step:
3283 case Intrinsic::instrprof_value_profile:
3286 case Intrinsic::spv_value_md:
3288 case Intrinsic::spv_resource_handlefrombinding: {
3289 return selectHandleFromBinding(ResVReg, ResType,
I);
3291 case Intrinsic::spv_resource_store_typedbuffer: {
3292 return selectImageWriteIntrinsic(
I);
3294 case Intrinsic::spv_resource_load_typedbuffer: {
3295 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3297 case Intrinsic::spv_resource_getpointer: {
3298 return selectResourceGetPointer(ResVReg, ResType,
I);
3300 case Intrinsic::spv_discard: {
3301 return selectDiscard(ResVReg, ResType,
I);
3303 case Intrinsic::modf: {
3304 return selectModf(ResVReg, ResType,
I);
3307 std::string DiagMsg;
3310 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3317bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3322 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3325 return loadHandleBeforePosition(ResVReg, GR.getSPIRVTypeForVReg(ResVReg),
3326 *cast<GIntrinsic>(&
I),
I);
3329bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3338 Register ImageReg =
I.getOperand(2).getReg();
3339 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3340 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3341 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3346 Register IdxReg =
I.getOperand(3).getReg();
3350 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3353bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3358 SPIRVType *ImageType = GR.getSPIRVTypeForVReg(ImageReg);
3360 "ImageReg is not an image type.");
3361 bool IsSignedInteger =
3362 sampledTypeIsSignedInteger(GR.getTypeForSPIRVType(ImageType));
3364 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3365 if (ResultSize == 4) {
3368 .
addUse(GR.getSPIRVTypeID(ResType))
3372 if (IsSignedInteger)
3377 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3378 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3381 .
addUse(GR.getSPIRVTypeID(ReadType))
3384 if (IsSignedInteger)
3390 if (ResultSize == 1) {
3392 TII.get(SPIRV::OpCompositeExtract))
3394 .
addUse(GR.getSPIRVTypeID(ResType))
3399 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3402bool SPIRVInstructionSelector::selectResourceGetPointer(
3404 Register ResourcePtr =
I.getOperand(2).getReg();
3405 SPIRVType *RegType = GR.getSPIRVTypeForVReg(ResourcePtr,
I.getMF());
3406 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3417 Register IndexReg =
I.getOperand(3).getReg();
3419 buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
3420 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3421 TII.get(SPIRV::OpAccessChain))
3423 .
addUse(GR.getSPIRVTypeID(ResType))
3430bool SPIRVInstructionSelector::extractSubvector(
3433 SPIRVType *InputType = GR.getResultType(ReadReg);
3434 [[maybe_unused]]
uint64_t InputSize =
3435 GR.getScalarOrVectorComponentCount(InputType);
3436 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3437 assert(InputSize > 1 &&
"The input must be a vector.");
3438 assert(ResultSize > 1 &&
"The result must be a vector.");
3439 assert(ResultSize < InputSize &&
3440 "Cannot extract more element than there are in the input.");
3442 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3445 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3448 TII.get(SPIRV::OpCompositeExtract))
3461 TII.get(SPIRV::OpCompositeConstruct))
3463 .
addUse(GR.getSPIRVTypeID(ResType));
3465 for (
Register ComponentReg : ComponentRegisters)
3466 MIB.
addUse(ComponentReg);
3470bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3478 Register ImageReg =
I.getOperand(1).getReg();
3479 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3480 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3481 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3486 Register CoordinateReg =
I.getOperand(2).getReg();
3487 Register DataReg =
I.getOperand(3).getReg();
3488 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3489 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3490 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3491 TII.get(SPIRV::OpImageWrite))
3498Register SPIRVInstructionSelector::buildPointerToResource(
3499 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3502 const Type *ResType = GR.getTypeForSPIRVType(SpirvResType);
3503 if (ArraySize == 1) {
3505 GR.getOrCreateSPIRVPointerType(ResType, MIRBuilder, SC);
3506 assert(GR.getPointeeType(PtrType) == SpirvResType &&
3507 "SpirvResType did not have an explicit layout.");
3508 return GR.getOrCreateGlobalVariableWithBinding(PtrType, Set, Binding,
Name,
3512 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3514 GR.getOrCreateSPIRVPointerType(VarType, MIRBuilder, SC);
3515 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3516 VarPointerType, Set, Binding,
Name, MIRBuilder);
3519 GR.getOrCreateSPIRVPointerType(ResType, MIRBuilder, SC);
3521 Register AcReg =
MRI->createVirtualRegister(GR.getRegClass(ResPointerType));
3525 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3526 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3531 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3538bool SPIRVInstructionSelector::selectFirstBitSet16(
3540 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3541 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3542 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()},
3546 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3549bool SPIRVInstructionSelector::selectFirstBitSet32(
3551 Register SrcReg,
unsigned BitSetOpcode)
const {
3552 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3554 .
addUse(GR.getSPIRVTypeID(ResType))
3555 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3561bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3563 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3569 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3570 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3574 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3576 GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder,
false);
3578 GR.getOrCreateSPIRVVectorType(
BaseType, 2, MIRBuilder,
false);
3580 std::vector<Register> PartialRegs;
3583 unsigned CurrentComponent = 0;
3584 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3588 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3590 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3591 TII.get(SPIRV::OpVectorShuffle))
3593 .
addUse(GR.getSPIRVTypeID(I64x2Type))
3596 .
addImm(CurrentComponent)
3597 .
addImm(CurrentComponent + 1);
3603 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3605 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3606 BitSetOpcode, SwapPrimarySide))
3609 PartialRegs.push_back(SubVecBitSetReg);
3613 if (CurrentComponent != ComponentCount) {
3614 bool ZeroAsNull = !STI.isShader();
3615 Register FinalElemReg =
MRI->createVirtualRegister(GR.getRegClass(I64Type));
3616 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3619 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3620 SPIRV::OpVectorExtractDynamic))
3624 MRI->createVirtualRegister(GR.getRegClass(
BaseType));
3626 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3627 BitSetOpcode, SwapPrimarySide))
3630 PartialRegs.push_back(FinalElemBitSetReg);
3635 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
3636 SPIRV::OpCompositeConstruct);
3639bool SPIRVInstructionSelector::selectFirstBitSet64(
3641 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3642 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3644 bool ZeroAsNull = !STI.isShader();
3646 GR.getOrCreateConstInt(0,
I,
BaseType,
TII, ZeroAsNull);
3648 GR.getOrCreateConstInt(1,
I,
BaseType,
TII, ZeroAsNull);
3654 if (ComponentCount > 2) {
3655 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3656 BitSetOpcode, SwapPrimarySide);
3661 SPIRVType *PostCastType = GR.getOrCreateSPIRVVectorType(
3662 BaseType, 2 * ComponentCount, MIRBuilder,
false);
3664 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3666 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3671 Register FBSReg =
MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3672 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3676 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3677 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3679 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3682 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3683 SPIRV::OpVectorExtractDynamic))
3685 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3686 SPIRV::OpVectorExtractDynamic))
3690 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3691 TII.get(SPIRV::OpVectorShuffle))
3693 .
addUse(GR.getSPIRVTypeID(ResType))
3699 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3706 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3707 TII.get(SPIRV::OpVectorShuffle))
3709 .
addUse(GR.getSPIRVTypeID(ResType))
3715 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3733 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3734 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3735 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3736 SelectOp = SPIRV::OpSelectSISCond;
3737 AddOp = SPIRV::OpIAddS;
3739 BoolType = GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount,
3742 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3743 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3744 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3745 SelectOp = SPIRV::OpSelectVIVCond;
3746 AddOp = SPIRV::OpIAddV;
3756 if (SwapPrimarySide) {
3757 PrimaryReg = LowReg;
3758 SecondaryReg = HighReg;
3759 PrimaryShiftReg = Reg0;
3760 SecondaryShiftReg = Reg32;
3765 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3770 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3771 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
3776 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3777 if (!selectOpWithSrcs(ValReg, ResType,
I,
3778 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3781 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3784bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3787 bool IsSigned)
const {
3789 Register OpReg =
I.getOperand(2).getReg();
3790 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3792 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3793 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3795 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3797 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3799 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3801 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3805 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3809bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3813 Register OpReg =
I.getOperand(2).getReg();
3814 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3818 unsigned ExtendOpcode = SPIRV::OpUConvert;
3819 unsigned BitSetOpcode = GL::FindILsb;
3821 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3823 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3825 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3827 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3834bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3840 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3841 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3843 .
addUse(GR.getSPIRVTypeID(ResType))
3844 .
addUse(
I.getOperand(2).getReg())
3846 if (!STI.isShader()) {
3847 unsigned Alignment =
I.getOperand(3).getImm();
3853bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3859 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3860 TII.get(SPIRV::OpVariable))
3862 .
addUse(GR.getSPIRVTypeID(ResType))
3865 if (!STI.isShader()) {
3866 unsigned Alignment =
I.getOperand(2).getImm();
3873bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3880 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3881 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3884 .
addMBB(
I.getOperand(0).getMBB())
3888 .
addMBB(
I.getOperand(0).getMBB())
3892bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3905 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3912 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3913 .
addUse(
I.getOperand(0).getReg())
3914 .
addMBB(
I.getOperand(1).getMBB())
3919bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3922 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3924 .
addUse(GR.getSPIRVTypeID(ResType));
3925 const unsigned NumOps =
I.getNumOperands();
3926 for (
unsigned i = 1; i < NumOps; i += 2) {
3927 MIB.
addUse(
I.getOperand(i + 0).getReg());
3928 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3936bool SPIRVInstructionSelector::selectGlobalValue(
3943 std::string GlobalIdent;
3945 unsigned &
ID = UnnamedGlobalIDs[GV];
3947 ID = UnnamedGlobalIDs.size();
3948 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3963 if (isa<Function>(GV)) {
3966 Register NewReg = GR.find(ConstVal, GR.CurMF);
3970 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3971 ? dyn_cast<Function>(GV)
3973 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3975 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3981 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3984 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3985 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3987 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3992 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3996 GR.
add(ConstVal, MIB2);
4003 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4005 .
addUse(GR.getSPIRVTypeID(ResType));
4006 GR.
add(ConstVal, MIB3);
4009 assert(NewReg != ResVReg);
4010 return BuildCOPY(ResVReg, NewReg,
I);
4012 auto GlobalVar = cast<GlobalVariable>(GV);
4022 SPIRV::LinkageType::LinkageType LnkType =
4024 ? SPIRV::LinkageType::Import
4026 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
4027 ? SPIRV::LinkageType::LinkOnceODR
4028 : SPIRV::LinkageType::Export);
4036 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
4037 return Reg.isValid();
4040bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4043 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
4044 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4056 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
4058 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4060 .
addUse(GR.getSPIRVTypeID(ResType))
4061 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
4063 .
add(
I.getOperand(1))
4068 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4071 ResType->
getOpcode() == SPIRV::OpTypeVector
4075 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
4078 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4079 ? SPIRV::OpVectorTimesScalar
4083 .
addUse(GR.getSPIRVTypeID(ResType))
4089bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4106 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
4109 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
4110 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4117 GR.getPointerSize()));
4120 GR.assignSPIRVTypeToVReg(PtrType, PtrTyReg, MIRBuilder.
getMF());
4125 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4127 .
addUse(GR.getSPIRVTypeID(PtrType))
4129 Register Variable = AllocaMIB->getOperand(0).getReg();
4132 assert(
I.getNumOperands() == 4 &&
4133 "Expected 4 operands for modf instruction");
4137 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4139 .
addUse(GR.getSPIRVTypeID(ResType))
4140 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
4143 .
add(
I.getOperand(3))
4147 Register IntegralPartReg =
I.getOperand(1).getReg();
4148 if (IntegralPartReg.
isValid()) {
4150 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4152 .
addUse(GR.getSPIRVTypeID(ResType))
4158 }
else if (STI.canUseExtInstSet(SPIRV::InstructionSet::GLSL_std_450)) {
4159 assert(
false &&
"GLSL::Modf is deprecated.");
4170bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4171 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4175 GR.getOrCreateSPIRVVectorType(ResType, 3, MIRBuilder,
false);
4176 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
4177 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4183 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
4187 Register Variable = GR.buildGlobalVariable(
4189 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4190 SPIRV::LinkageType::Import, MIRBuilder,
false);
4194 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4196 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
4200 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4202 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
4207 assert(
I.getOperand(2).isReg());
4212 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4214 .
addUse(GR.getSPIRVTypeID(ResType))
4222bool SPIRVInstructionSelector::loadBuiltinInputID(
4223 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4226 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
4227 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4235 GR.getPointerSize()));
4236 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
4240 Register Variable = GR.buildGlobalVariable(
4242 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4243 SPIRV::LinkageType::Import, MIRBuilder,
false);
4246 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4248 .
addUse(GR.getSPIRVTypeID(ResType))
4257 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4258 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder,
false);
4261 if (VectorSize == 4)
4265 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
4266 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder,
false);
4269bool SPIRVInstructionSelector::loadHandleBeforePosition(
4274 Intrinsic::spv_resource_handlefrombinding);
4281 bool IsNonUniform =
false;
4285 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4288 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4290 if (IsStructuredBuffer) {
4291 VarType = GR.getPointeeType(ResType);
4292 SC = GR.getPointerStorageClass(ResType);
4296 buildPointerToResource(VarType, SC, Set, Binding, ArraySize, IndexReg,
4297 IsNonUniform,
Name, MIRBuilder);
4306 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4307 GR.assignSPIRVTypeToVReg(ResType, HandleReg, *Pos.
getMF());
4309 TII.get(LoadOpcode))
4311 .
addUse(GR.getSPIRVTypeID(ResType))
4321 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Resource Implicit Binding
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static StringRef getName(Value *V)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static unsigned getFCmpOpcode(unsigned PredNum)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< InstrNode * > Instr
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...