LLVM 22.0.0git
RISCVISelLowering.cpp File Reference
#include "RISCVISelLowering.h"
#include "MCTargetDesc/RISCVMatInt.h"
#include "RISCV.h"
#include "RISCVConstantPoolValue.h"
#include "RISCVMachineFunctionInfo.h"
#include "RISCVRegisterInfo.h"
#include "RISCVSelectionDAGInfo.h"
#include "RISCVSubtarget.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
#include "RISCVGenAsmMatcher.inc"
#include "RISCVGenSearchableTables.inc"

Go to the source code of this file.

Classes

struct  VIDSequence

Namespaces

namespace  llvm
 This is an optimization pass for GlobalISel generic memory operations.
namespace  llvm::RISCVVIntrinsicsTable

Macros

#define DEBUG_TYPE   "riscv-lower"
#define OP_CASE(NODE)
#define VP_CASE(NODE)
#define CC_VLS_CASE(ABI_VLEN)
#define GET_REGISTER_MATCHER
#define GET_RISCVVIntrinsicsTable_IMPL

Functions

 STATISTIC (NumTailCalls, "Number of tail calls")
static void translateSetCCForBranch (const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue getVLOperand (SDValue Op)
static bool useRVVForFixedLengthVectorVT (MVT VT, const RISCVSubtarget &Subtarget)
static MVT getContainerForFixedLengthVector (const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget)
static MVT getContainerForFixedLengthVector (SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget)
static SDValue convertToScalableVector (EVT VT, SDValue V, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue convertFromScalableVector (EVT VT, SDValue V, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MVT getMaskTypeFor (MVT VecVT)
 Return the type of the mask type suitable for masking the provided vector type.
static SDValue getAllOnesMask (MVT VecVT, SDValue VL, const SDLoc &DL, SelectionDAG &DAG)
 Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static std::pair< SDValue, SDValuegetDefaultScalableVLOps (MVT VecVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::pair< SDValue, SDValuegetDefaultVLOps (uint64_t NumElts, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::pair< SDValue, SDValuegetDefaultVLOps (MVT VecVT, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerINT_TO_FP (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFP_TO_INT_SAT (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFP_TO_INT (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static RISCVFPRndMode::RoundingMode matchRoundingOp (unsigned Opc)
static SDValue lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFTRUNC_FCEIL_FFLOOR_FROUND (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerVectorXRINT_XROUND (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue getVSlidedown (SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static SDValue getVSlideup (SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static std::optional< APIntgetExactInteger (const APFloat &APF, uint32_t BitWidth)
static std::optional< VIDSequenceisSimpleVIDSequence (SDValue Op, unsigned EltSizeInBits)
static SDValue matchSplatAsGather (SDValue SplatVal, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerBuildVectorViaVID (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerBuildVectorViaDominantValues (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Try and optimize BUILD_VECTORs with "dominant values" - these are values which constitute a large proportion of the elements.
static SDValue lowerBuildVectorOfConstants (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static unsigned getPACKOpcode (unsigned DestBW, const RISCVSubtarget &Subtarget)
static SDValue lowerBuildVectorViaPacking (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Double the element size of the build vector to reduce the number of vslide1down in the build vector chain.
static SDValue lowerBUILD_VECTOR (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue splatPartsI64WithVL (const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Lo, SDValue Hi, SDValue VL, SelectionDAG &DAG)
static SDValue splatSplitI64WithVL (const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Scalar, SDValue VL, SelectionDAG &DAG)
static SDValue lowerScalarSplat (SDValue Passthru, SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerScalarInsert (SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue foldConcatVector (SDValue V1, SDValue V2)
 If concat_vector(V1,V2) could be folded away to some existing vector source, return it.
static SDValue getSingleShuffleSrc (MVT VT, SDValue V1, SDValue V2)
static bool isInterleaveShuffle (ArrayRef< int > Mask, MVT VT, int &EvenSrc, int &OddSrc, const RISCVSubtarget &Subtarget)
 Is this shuffle interleaving contiguous elements from one vector into the even elements and contiguous elements from another vector into the odd elements.
static bool isMaskedSlidePair (ArrayRef< int > Mask, std::array< std::pair< int, int >, 2 > &SrcInfo)
 Is this mask representing a masked combination of two slides?
static bool isElementRotate (const std::array< std::pair< int, int >, 2 > &SrcInfo, unsigned NumElts)
static bool isAlternating (const std::array< std::pair< int, int >, 2 > &SrcInfo, ArrayRef< int > Mask, unsigned Factor, bool RequiredPolarity)
static bool isZipEven (const std::array< std::pair< int, int >, 2 > &SrcInfo, ArrayRef< int > Mask, unsigned &Factor)
 Given a shuffle which can be represented as a pair of two slides, see if it is a zipeven idiom.
static bool isZipOdd (const std::array< std::pair< int, int >, 2 > &SrcInfo, ArrayRef< int > Mask, unsigned &Factor)
 Given a shuffle which can be represented as a pair of two slides, see if it is a zipodd idiom.
static SDValue getDeinterleaveShiftAndTrunc (const SDLoc &DL, MVT VT, SDValue Src, unsigned Factor, unsigned Index, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLEAsVRGatherVX (ShuffleVectorSDNode *SVN, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
 Match a single source shuffle which is an identity except that some particular element is repeated.
static SDValue lowerVECTOR_SHUFFLEAsVSlidedown (const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLEAsVSlideup (const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLEAsVSlide1 (const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
 Match v(f)slide1up/down idioms.
static SDValue lowerVZIP (unsigned Opc, SDValue Op0, SDValue Op1, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue getWideningSpread (SDValue V, unsigned Factor, unsigned Index, const SDLoc &DL, SelectionDAG &DAG)
static SDValue getWideningInterleave (SDValue EvenV, SDValue OddV, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerBitreverseShuffle (ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isLegalBitRotate (ArrayRef< int > Mask, EVT VT, const RISCVSubtarget &Subtarget, MVT &RotateVT, unsigned &RotateAmt)
static SDValue lowerVECTOR_SHUFFLEAsRotate (ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerShuffleViaVRegSplitting (ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isCompressMask (ArrayRef< int > Mask)
static SDValue lowerDisjointIndicesShuffle (ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Given a shuffle where the indices are disjoint between the two sources, e.g.:
static bool isLocalRepeatingShuffle (ArrayRef< int > Mask, int Span)
 Is this mask local (i.e.
static bool isLowSourceShuffle (ArrayRef< int > Mask, int Span)
 Is this mask only using elements from the first span of the input?
static bool isSpanSplatShuffle (ArrayRef< int > Mask, int Span)
 Return true for a mask which performs an arbitrary shuffle within the first span, and then repeats that same result across all remaining spans.
static SDValue tryWidenMaskForShuffle (SDValue Op, SelectionDAG &DAG)
 Try to widen element type to get a new mask value for a better permutation sequence.
static SDValue lowerVECTOR_SHUFFLE (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerConstant (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue LowerPREFETCH (SDValue Op, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerATOMIC_FENCE (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFMAXIMUM_FMINIMUM (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFABSorFNEG (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFCOPYSIGN (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static unsigned getRISCVVLOp (SDValue Op)
 Get a RISC-V target specified VL op for a given SDNode.
static bool isPromotedOpNeedingSplit (SDValue Op, const RISCVSubtarget &Subtarget)
static SDValue SplitVectorOp (SDValue Op, SelectionDAG &DAG)
static SDValue SplitVPOp (SDValue Op, SelectionDAG &DAG)
static SDValue SplitVectorReductionOp (SDValue Op, SelectionDAG &DAG)
static SDValue SplitStrictFPVectorOp (SDValue Op, SelectionDAG &DAG)
static SDValue getTargetNode (GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static SDValue getTargetNode (BlockAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static SDValue getTargetNode (ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static SDValue getTargetNode (JumpTableSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static SDValue getLargeGlobalAddress (GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG)
static SDValue getLargeExternalSymbol (ExternalSymbolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG)
static std::optional< boolmatchSetCC (SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static bool isSimm12Constant (SDValue V)
static SDValue lowerSelectToBinOp (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue foldBinOpIntoSelectIfProfitable (SDNode *BO, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::optional< MVTgetSmallestVTForIndex (MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isValidVisniInsertExtractIndex (SDValue Idx)
static SDValue lowerVectorIntrinsicScalars (SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerGetVectorLength (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerCttzElts (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static void promoteVCIXScalar (const SDValue &Op, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static void processVCIXOperands (SDValue &OrigOp, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static bool isValidEGW (int EGS, EVT VT, const RISCVSubtarget &Subtarget)
static SDValue getVCIXISDNodeWCHAIN (SDValue &Op, SelectionDAG &DAG, unsigned Type)
static SDValue getVCIXISDNodeVOID (SDValue &Op, SelectionDAG &DAG, unsigned Type)
static SDValue lowerFixedVectorSegLoadIntrinsics (unsigned IntNo, SDValue Op, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue lowerFixedVectorSegStoreIntrinsics (unsigned IntNo, SDValue Op, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static unsigned getRVVReductionOp (unsigned ISDOpcode)
static bool isNonZeroAVL (SDValue AVL)
static SDValue lowerReductionSeq (unsigned RVVOpcode, MVT ResVT, SDValue StartValue, SDValue Vec, SDValue Mask, SDValue VL, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Helper to lower a reduction sequence of the form: scalar = reduce_op vec, scalar_start.
static std::tuple< unsigned, SDValue, SDValuegetRVVFPReductionOpAndOperands (SDValue Op, SelectionDAG &DAG, EVT EltVT, const RISCVSubtarget &Subtarget)
static SDValue widenVectorOpsToi8 (SDValue N, const SDLoc &DL, SelectionDAG &DAG)
static unsigned getRISCVWOpcode (unsigned Opcode)
static SDValue customLegalizeToWOp (SDNode *N, SelectionDAG &DAG, unsigned ExtOpc=ISD::ANY_EXTEND)
static SDValue customLegalizeToWOpWithSExt (SDNode *N, SelectionDAG &DAG)
static unsigned getVecReduceOpcode (unsigned Opc)
 Given a binary operator, return the associative generic ISD::VECREDUCE_OP which corresponds to it.
static SDValue combineBinOpOfExtractToReduceTree (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Perform two related transforms whose purpose is to incrementally recognize an explode_vector followed by scalar reduction as a vector reduction node.
static SDValue combineBinOpToReduce (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue transformAddShlImm (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineShlAddIAddImpl (SDNode *N, SDValue AddI, SDValue Other, SelectionDAG &DAG)
static SDValue combineShlAddIAdd (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineSelectAndUse (SDNode *N, SDValue Slct, SDValue OtherOp, SelectionDAG &DAG, bool AllOnes, const RISCVSubtarget &Subtarget)
static SDValue combineSelectAndUseCommutative (SDNode *N, SelectionDAG &DAG, bool AllOnes, const RISCVSubtarget &Subtarget)
static SDValue transformAddImmMulImm (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineBinOpOfZExt (SDNode *N, SelectionDAG &DAG)
static SDValue combineAddOfBooleanXor (SDNode *N, SelectionDAG &DAG)
static SDValue performADDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue combineSubOfBoolean (SDNode *N, SelectionDAG &DAG)
static SDValue combineSubShiftToOrcB (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performSUBCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineDeMorganOfBoolean (SDNode *N, SelectionDAG &DAG)
static SDValue combineTruncSelectToSMaxUSat (SDNode *N, SelectionDAG &DAG)
static SDValue performTRUNCATECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue reverseZExtICmpCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue reduceANDOfAtomicLoad (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performANDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue combineOrOfCZERO (SDNode *N, SDValue N0, SDValue N1, SelectionDAG &DAG)
static SDValue combineXorToBitfieldInsert (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performORCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue performXORCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue expandMulToNAFSequence (SDNode *N, SelectionDAG &DAG, uint64_t MulAmt)
static SDValue expandMulToAddOrSubOfShl (SDNode *N, SelectionDAG &DAG, uint64_t MulAmt)
static SDValue expandMul (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue combineVectorMulToSraBitcast (SDNode *N, SelectionDAG &DAG)
static SDValue performMULCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static bool narrowIndex (SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG)
 According to the property that indexed load/store instructions zero-extend their indices, try to narrow the type of index operand.
static SDValue combineVectorSizedSetCCEquality (EVT VT, SDValue X, SDValue Y, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
 Try to map an integer comparison with size > XLEN to vector instructions before type legalization splits it up into chunks.
static SDValue performSETCCCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue performSIGN_EXTEND_INREGCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue simplifyOp_VL (SDNode *N)
static SDValue combineOp_VLToVWOp_VL (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
 Combine a binary or FMA operation to its equivalent VW or VW_W form.
static SDValue combineVWADDSUBWSelect (SDNode *N, SelectionDAG &DAG)
static SDValue performVWADDSUBW_VLCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue tryMemPairCombine (SelectionDAG &DAG, LSBaseSDNode *LSNode1, LSBaseSDNode *LSNode2, SDValue BasePtr, uint64_t Imm)
static SDValue performMemPairCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performFP_TO_INTCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue performFP_TO_INT_SATCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue performBITREVERSECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performVP_REVERSECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performVP_STORECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performVP_TRUNCATECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static unsigned negateFMAOpcode (unsigned Opcode, bool NegMul, bool NegAcc)
static SDValue combineVFMADD_VLWithVFNEG_VL (SDNode *N, SelectionDAG &DAG)
static SDValue performVFMADD_VLCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue performSRACombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue tryDemorganOfBooleanCondition (SDValue Cond, SelectionDAG &DAG)
static bool combine_CC (SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue tryFoldSelectIntoOp (SDNode *N, SelectionDAG &DAG, SDValue TrueVal, SDValue FalseVal, bool Swapped)
static SDValue foldSelectOfCTTZOrCTLZ (SDNode *N, SelectionDAG &DAG)
static SDValue useInversedSetcc (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool matchSelectAddSub (SDValue TrueVal, SDValue FalseVal, bool &SwapCC)
static SDValue performVSELECTCombine (SDNode *N, SelectionDAG &DAG)
 Convert vselect CC, (add a, b), (sub a, b) to add a, (vselect CC, -b, b).
static SDValue performSELECTCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performBUILD_VECTORCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
 If we have a build_vector where each lane is binop X, C, where C is a constant (but not necessarily the same constant on all lanes), form binop (build_vector x1, x2, ...), (build_vector c1, c2, c3, ..).
static MVT getQDOTXResultType (MVT OpVT)
static SDValue getZeroPaddedAdd (const SDLoc &DL, SDValue A, SDValue B, SelectionDAG &DAG)
 Given fixed length vectors A and B with equal element types, but possibly different number of elements, return A + B where either A or B is zero padded to the larger number of elements.
static SDValue foldReduceOperandViaVQDOT (SDValue InVec, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue performVECREDUCECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue performINSERT_VECTOR_ELTCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue performCONCAT_VECTORSCombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue performVECTOR_SHUFFLECombine (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue combineToVWMACC (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineVqdotAccum (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool legalizeScatterGatherIndexType (SDLoc DL, SDValue &Index, ISD::MemIndexType &IndexType, RISCVTargetLowering::DAGCombinerInfo &DCI)
static bool matchIndexAsShuffle (EVT VT, SDValue Index, SDValue Mask, SmallVector< int > &ShuffleMask)
 Match the index vector of a scatter or gather node as the shuffle mask which performs the rearrangement if possible.
static bool matchIndexAsWiderOp (EVT VT, SDValue Index, SDValue Mask, Align BaseAlign, const RISCVSubtarget &ST)
 Match the index of a gather or scatter operation as an operation with twice the element width and half the number of elements.
static SDValue combineTruncOfSraSext (SDNode *N, SelectionDAG &DAG)
static SDValue combineTruncToVnclip (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineToVCPOP (SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performSHLCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static uint64_t computeGREVOrGORC (uint64_t x, unsigned ShAmt, bool IsGORC)
static MachineBasicBlockemitReadCounterWidePseudo (MachineInstr &MI, MachineBasicBlock *BB)
static MachineBasicBlockemitSplitF64Pseudo (MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static MachineBasicBlockemitBuildPairF64Pseudo (MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static MachineBasicBlockemitQuietFCMP (MachineInstr &MI, MachineBasicBlock *BB, unsigned RelOpcode, unsigned EqOpcode, const RISCVSubtarget &Subtarget)
static MachineBasicBlockEmitLoweredCascadedSelect (MachineInstr &First, MachineInstr &Second, MachineBasicBlock *ThisMBB, const RISCVSubtarget &Subtarget)
static MachineBasicBlockemitSelectPseudo (MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static const RISCV::RISCVMaskedPseudoInfolookupMaskedIntrinsic (uint16_t MCOpcode, RISCVVType::VLMUL LMul, unsigned SEW)
static MachineBasicBlockemitVFROUND_NOEXCEPT_MASK (MachineInstr &MI, MachineBasicBlock *BB, unsigned CVTXOpc)
static MachineBasicBlockemitFROUND (MachineInstr &MI, MachineBasicBlock *MBB, const RISCVSubtarget &Subtarget)
static SDValue convertLocVTToValVT (SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL, const RISCVSubtarget &Subtarget)
static SDValue unpackFromRegLoc (SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL, const ISD::InputArg &In, const RISCVTargetLowering &TLI)
static SDValue convertValVTToLocVT (SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL, const RISCVSubtarget &Subtarget)
static SDValue unpackFromMemLoc (SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackF64OnRV32DSoftABI (SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static Align getPrefTypeAlign (EVT VT, SelectionDAG &DAG)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp (unsigned XLen, AtomicRMWInst::BinOp BinOp)
static ValueuseTpOffset (IRBuilderBase &IRB, unsigned Offset)

Variables

static cl::opt< unsignedExtensionMaxWebSize (DEBUG_TYPE "-ext-max-web-size", cl::Hidden, cl::desc("Give the maximum size (in number of nodes) of the web of " "instructions that we will consider for VW expansion"), cl::init(18))
static cl::opt< boolAllowSplatInVW_W (DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden, cl::desc("Allow the formation of VW_W operations (e.g., " "VWADD_W) with splat constants"), cl::init(false))
static cl::opt< unsignedNumRepeatedDivisors (DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden, cl::desc("Set the minimum number of repetitions of a divisor to allow " "transformation to multiplications by the reciprocal"), cl::init(2))
static cl::opt< int > FPImmCost (DEBUG_TYPE "-fpimm-cost", cl::Hidden, cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value"), cl::init(2))
static cl::opt< boolReassocShlAddiAdd ("reassoc-shl-addi-add", cl::Hidden, cl::desc("Swap add and addi in cases where the add may " "be combined with a shift"), cl::init(true))
const uint64_t ModeMask64 = ~RISCVExceptFlags::ALL
const uint32_t ModeMask32 = ~RISCVExceptFlags::ALL

Macro Definition Documentation

◆ CC_VLS_CASE

#define CC_VLS_CASE ( ABI_VLEN)
Value:
case CallingConv::RISCV_VLSCall_##ABI_VLEN:

◆ DEBUG_TYPE

#define DEBUG_TYPE   "riscv-lower"

Definition at line 56 of file RISCVISelLowering.cpp.

◆ GET_REGISTER_MATCHER

#define GET_REGISTER_MATCHER

Definition at line 24784 of file RISCVISelLowering.cpp.

◆ GET_RISCVVIntrinsicsTable_IMPL

#define GET_RISCVVIntrinsicsTable_IMPL

Definition at line 25013 of file RISCVISelLowering.cpp.

◆ OP_CASE

#define OP_CASE ( NODE)
Value:
case ISD::NODE: \
return RISCVISD::NODE##_VL;

Referenced by getRISCVVLOp().

◆ VP_CASE

#define VP_CASE ( NODE)
Value:
case ISD::VP_##NODE: \
return RISCVISD::NODE##_VL;
#define NODE(NodeKind)

Referenced by getRISCVVLOp().

Function Documentation

◆ combine_CC()

◆ combineAddOfBooleanXor()

◆ combineBinOpOfExtractToReduceTree()

SDValue combineBinOpOfExtractToReduceTree ( SDNode * N,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

◆ combineBinOpOfZExt()

◆ combineBinOpToReduce()

◆ combineDeMorganOfBoolean()

◆ combineOp_VLToVWOp_VL()

SDValue combineOp_VLToVWOp_VL ( SDNode * N,
TargetLowering::DAGCombinerInfo & DCI,
const RISCVSubtarget & Subtarget )
static

Combine a binary or FMA operation to its equivalent VW or VW_W form.

The supported combines are: add | add_vl | or disjoint | or_vl disjoint -> vwadd(u) | vwadd(u)_w sub | sub_vl -> vwsub(u) | vwsub(u)_w mul | mul_vl -> vwmul(u) | vwmul_su shl | shl_vl -> vwsll fadd_vl -> vfwadd | vfwadd_w fsub_vl -> vfwsub | vfwsub_w fmul_vl -> vfwmul vwadd_w(u) -> vwadd(u) vwsub_w(u) -> vwsub(u) vfwadd_w -> vfwadd vfwsub_w -> vfwsub

Definition at line 17718 of file RISCVISelLowering.cpp.

References llvm::TargetLowering::DAGCombinerInfo::AddToWorklist(), assert(), llvm::TargetLowering::DAGCombinerInfo::DAG, llvm::SmallVectorImpl< T >::emplace_back(), llvm::SmallVectorTemplateCommon< T, typename >::empty(), ExtensionMaxWebSize, llvm::Use::getOperandNo(), llvm::Use::getUser(), llvm::TargetLowering::DAGCombinerInfo::isBeforeLegalize(), LHS, N, llvm::SmallVectorImpl< T >::pop_back_val(), llvm::SmallVectorTemplateBase< T, bool >::push_back(), llvm::SelectionDAG::ReplaceAllUsesOfValueWith(), llvm::SmallVectorImpl< T >::reserve(), RHS, SDValue(), llvm::SmallVectorTemplateCommon< T, typename >::size(), and std::swap().

Referenced by llvm::RISCVTargetLowering::PerformDAGCombine(), performSHLCombine(), performVFMADD_VLCombine(), and performVWADDSUBW_VLCombine().

◆ combineOrOfCZERO()

◆ combineSelectAndUse()

◆ combineSelectAndUseCommutative()

SDValue combineSelectAndUseCommutative ( SDNode * N,
SelectionDAG & DAG,
bool AllOnes,
const RISCVSubtarget & Subtarget )
static

Definition at line 15549 of file RISCVISelLowering.cpp.

References llvm::AllOnes, combineSelectAndUse(), N, and SDValue().

◆ combineShlAddIAdd()

◆ combineShlAddIAddImpl()

◆ combineSubOfBoolean()

◆ combineSubShiftToOrcB()

◆ combineToVCPOP()

◆ combineToVWMACC()

◆ combineTruncOfSraSext()

◆ combineTruncSelectToSMaxUSat()

◆ combineTruncToVnclip()

◆ combineVectorMulToSraBitcast()

◆ combineVectorSizedSetCCEquality()

◆ combineVFMADD_VLWithVFNEG_VL()

◆ combineVqdotAccum()

◆ combineVWADDSUBWSelect()

◆ combineXorToBitfieldInsert()

◆ computeGREVOrGORC()

◆ convertFromScalableVector()

SDValue convertFromScalableVector ( EVT VT,
SDValue V,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

◆ convertLocVTToValVT()

◆ convertToScalableVector()

SDValue convertToScalableVector ( EVT VT,
SDValue V,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

◆ convertValVTToLocVT()

◆ customLegalizeToWOp()

SDValue customLegalizeToWOp ( SDNode * N,
SelectionDAG & DAG,
unsigned ExtOpc = ISD::ANY_EXTEND )
static

◆ customLegalizeToWOpWithSExt()

◆ emitBuildPairF64Pseudo()

◆ emitFROUND()

◆ EmitLoweredCascadedSelect()

◆ emitQuietFCMP()

◆ emitReadCounterWidePseudo()

◆ emitSelectPseudo()

◆ emitSplitF64Pseudo()

◆ emitVFROUND_NOEXCEPT_MASK()

◆ expandMul()

◆ expandMulToAddOrSubOfShl()

◆ expandMulToNAFSequence()

◆ foldBinOpIntoSelectIfProfitable()

◆ foldConcatVector()

SDValue foldConcatVector ( SDValue V1,
SDValue V2 )
static

If concat_vector(V1,V2) could be folded away to some existing vector source, return it.

Note that the source may be larger than the requested concat_vector (i.e. a extract_subvector might be required.)

Definition at line 4789 of file RISCVISelLowering.cpp.

References assert(), llvm::ISD::EXTRACT_SUBVECTOR, llvm::SDValue::getConstantOperandVal(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::SDValue::getValueType(), llvm::EVT::getVectorMinNumElements(), llvm::EVT::isScalableVector(), and SDValue().

Referenced by getSingleShuffleSrc(), and lowerVECTOR_SHUFFLE().

◆ foldReduceOperandViaVQDOT()

◆ foldSelectOfCTTZOrCTLZ()

◆ getAllOnesMask()

SDValue getAllOnesMask ( MVT VecVT,
SDValue VL,
const SDLoc & DL,
SelectionDAG & DAG )
static

Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.

Definition at line 2938 of file RISCVISelLowering.cpp.

References DL, getMaskTypeFor(), and llvm::SelectionDAG::getNode().

Referenced by getDefaultScalableVLOps(), getDefaultVLOps(), lowerVectorIntrinsicScalars(), and lowerVZIP().

◆ getContainerForFixedLengthVector() [1/2]

◆ getContainerForFixedLengthVector() [2/2]

MVT getContainerForFixedLengthVector ( SelectionDAG & DAG,
MVT VT,
const RISCVSubtarget & Subtarget )
static

◆ getDefaultScalableVLOps()

◆ getDefaultVLOps() [1/2]

std::pair< SDValue, SDValue > getDefaultVLOps ( MVT VecVT,
MVT ContainerVT,
const SDLoc & DL,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

◆ getDefaultVLOps() [2/2]

◆ getDeinterleaveShiftAndTrunc()

◆ getExactInteger()

std::optional< APInt > getExactInteger ( const APFloat & APF,
uint32_t BitWidth )
static

◆ getIntrinsicForMaskedAtomicRMWBinOp()

◆ getLargeExternalSymbol()

◆ getLargeGlobalAddress()

◆ getMaskTypeFor()

MVT getMaskTypeFor ( MVT VecVT)
static

Return the type of the mask type suitable for masking the provided vector type.

This is simply an i1 element type vector of the same (possibly scalable) length.

Definition at line 2930 of file RISCVISelLowering.cpp.

References assert(), llvm::MVT::getVectorElementCount(), llvm::MVT::getVectorVT(), and llvm::MVT::isVector().

◆ getPACKOpcode()

unsigned getPACKOpcode ( unsigned DestBW,
const RISCVSubtarget & Subtarget )
static

◆ getPrefTypeAlign()

◆ getQDOTXResultType()

◆ getRISCVVLOp()

◆ getRISCVWOpcode()

◆ getRVVFPReductionOpAndOperands()

std::tuple< unsigned, SDValue, SDValue > getRVVFPReductionOpAndOperands ( SDValue Op,
SelectionDAG & DAG,
EVT EltVT,
const RISCVSubtarget & Subtarget )
static

◆ getRVVReductionOp()

unsigned getRVVReductionOp ( unsigned ISDOpcode)
static

Definition at line 11353 of file RISCVISelLowering.cpp.

References llvm_unreachable.

◆ getSingleShuffleSrc()

◆ getSmallestVTForIndex()

◆ getTargetNode() [1/4]

SDValue getTargetNode ( BlockAddressSDNode * N,
const SDLoc & DL,
EVT Ty,
SelectionDAG & DAG,
unsigned Flags )
static

Definition at line 8810 of file RISCVISelLowering.cpp.

References DL, llvm::SelectionDAG::getTargetBlockAddress(), and N.

◆ getTargetNode() [2/4]

SDValue getTargetNode ( ConstantPoolSDNode * N,
const SDLoc & DL,
EVT Ty,
SelectionDAG & DAG,
unsigned Flags )
static

Definition at line 8816 of file RISCVISelLowering.cpp.

References DL, llvm::SelectionDAG::getTargetConstantPool(), and N.

◆ getTargetNode() [3/4]

SDValue getTargetNode ( GlobalAddressSDNode * N,
const SDLoc & DL,
EVT Ty,
SelectionDAG & DAG,
unsigned Flags )
static

Definition at line 8805 of file RISCVISelLowering.cpp.

References DL, llvm::SelectionDAG::getTargetGlobalAddress(), and N.

◆ getTargetNode() [4/4]

SDValue getTargetNode ( JumpTableSDNode * N,
const SDLoc & DL,
EVT Ty,
SelectionDAG & DAG,
unsigned Flags )
static

Definition at line 8822 of file RISCVISelLowering.cpp.

References DL, llvm::SelectionDAG::getTargetJumpTable(), and N.

◆ getVCIXISDNodeVOID()

SDValue getVCIXISDNodeVOID ( SDValue & Op,
SelectionDAG & DAG,
unsigned Type )
inlinestatic

◆ getVCIXISDNodeWCHAIN()

◆ getVecReduceOpcode()

unsigned getVecReduceOpcode ( unsigned Opc)
static

Given a binary operator, return the associative generic ISD::VECREDUCE_OP which corresponds to it.

Definition at line 15124 of file RISCVISelLowering.cpp.

References llvm::ISD::ADD, llvm::ISD::AND, llvm::ISD::FADD, llvm_unreachable, Opc, llvm::ISD::OR, llvm::ISD::SMAX, llvm::ISD::SMIN, llvm::ISD::UMAX, llvm::ISD::UMIN, and llvm::ISD::XOR.

Referenced by combineBinOpOfExtractToReduceTree().

◆ getVLOperand()

SDValue getVLOperand ( SDValue Op)
static

◆ getVSlidedown()

◆ getVSlideup()

◆ getWideningInterleave()

◆ getWideningSpread()

◆ getZeroPaddedAdd()

SDValue getZeroPaddedAdd ( const SDLoc & DL,
SDValue A,
SDValue B,
SelectionDAG & DAG )
static

Given fixed length vectors A and B with equal element types, but possibly different number of elements, return A + B where either A or B is zero padded to the larger number of elements.

Definition at line 19113 of file RISCVISelLowering.cpp.

References A(), llvm::ISD::ADD, assert(), B(), DL, llvm::SelectionDAG::getExtractSubvector(), llvm::SelectionDAG::getInsertSubvector(), llvm::SelectionDAG::getNode(), llvm::EVT::getVectorElementType(), llvm::EVT::getVectorMinNumElements(), and std::swap().

Referenced by foldReduceOperandViaVQDOT().

◆ isAlternating()

bool isAlternating ( const std::array< std::pair< int, int >, 2 > & SrcInfo,
ArrayRef< int > Mask,
unsigned Factor,
bool RequiredPolarity )
static

Definition at line 4895 of file RISCVISelLowering.cpp.

References assert(), llvm::CallingConv::C, and llvm::enumerate().

Referenced by isZipEven(), and isZipOdd().

◆ isCompressMask()

bool isCompressMask ( ArrayRef< int > Mask)
static

Definition at line 5630 of file RISCVISelLowering.cpp.

References llvm::enumerate(), and llvm::Last.

Referenced by lowerVECTOR_SHUFFLE().

◆ isElementRotate()

bool isElementRotate ( const std::array< std::pair< int, int >, 2 > & SrcInfo,
unsigned NumElts )
static

◆ isInterleaveShuffle()

bool isInterleaveShuffle ( ArrayRef< int > Mask,
MVT VT,
int & EvenSrc,
int & OddSrc,
const RISCVSubtarget & Subtarget )
static

Is this shuffle interleaving contiguous elements from one vector into the even elements and contiguous elements from another vector into the odd elements.

EvenSrc will contain the element that should be in the first even element. OddSrc will contain the element that should be in the first odd element. These can be the first element in a source or the element half way through the source.

Definition at line 4833 of file RISCVISelLowering.cpp.

References assert(), llvm::RISCVSubtarget::getELen(), llvm::MVT::getScalarSizeInBits(), llvm::MVT::getVectorNumElements(), llvm::ShuffleVectorInst::isInterleaveMask(), and Size.

Referenced by llvm::RISCVTargetLowering::isShuffleMaskLegal(), and lowerVECTOR_SHUFFLE().

◆ isLegalBitRotate()

◆ isLocalRepeatingShuffle()

bool isLocalRepeatingShuffle ( ArrayRef< int > Mask,
int Span )
static

Is this mask local (i.e.

elements only move within their local span), and repeating (that is, the same rearrangement is being done within each span)?

Definition at line 5712 of file RISCVISelLowering.cpp.

References llvm::enumerate(), and I.

Referenced by lowerVECTOR_SHUFFLE().

◆ isLowSourceShuffle()

bool isLowSourceShuffle ( ArrayRef< int > Mask,
int Span )
static

Is this mask only using elements from the first span of the input?

Definition at line 5729 of file RISCVISelLowering.cpp.

References llvm::all_of().

Referenced by lowerVECTOR_SHUFFLE().

◆ isMaskedSlidePair()

bool isMaskedSlidePair ( ArrayRef< int > Mask,
std::array< std::pair< int, int >, 2 > & SrcInfo )
static

Is this mask representing a masked combination of two slides?

Definition at line 4868 of file RISCVISelLowering.cpp.

References assert(), llvm::isMaskedSlidePair(), and std::swap().

◆ isNonZeroAVL()

bool isNonZeroAVL ( SDValue AVL)
static

Definition at line 11476 of file RISCVISelLowering.cpp.

References llvm::dyn_cast().

Referenced by combineBinOpToReduce(), and lowerReductionSeq().

◆ isPromotedOpNeedingSplit()

◆ isSimm12Constant()

bool isSimm12Constant ( SDValue V)
static

Definition at line 9115 of file RISCVISelLowering.cpp.

References llvm::isa().

Referenced by lowerSelectToBinOp().

◆ isSimpleVIDSequence()

◆ isSpanSplatShuffle()

bool isSpanSplatShuffle ( ArrayRef< int > Mask,
int Span )
static

Return true for a mask which performs an arbitrary shuffle within the first span, and then repeats that same result across all remaining spans.

Note that this doesn't check if all the inputs come from a single span!

Definition at line 5736 of file RISCVISelLowering.cpp.

References llvm::enumerate(), and I.

Referenced by lowerVECTOR_SHUFFLE().

◆ isValidEGW()

◆ isValidVisniInsertExtractIndex()

bool isValidVisniInsertExtractIndex ( SDValue Idx)
static

Definition at line 10117 of file RISCVISelLowering.cpp.

References llvm::dyn_cast(), llvm::isNullConstant(), and llvm::isUInt().

◆ isZipEven()

bool isZipEven ( const std::array< std::pair< int, int >, 2 > & SrcInfo,
ArrayRef< int > Mask,
unsigned & Factor )
static

Given a shuffle which can be represented as a pair of two slides, see if it is a zipeven idiom.

Zipeven is: vs2: a0 a1 a2 a3 vs1: b0 b1 b2 b3 vd: a0 b0 a2 b2

Definition at line 4918 of file RISCVISelLowering.cpp.

References isAlternating(), and llvm::isPowerOf2_32().

Referenced by lowerVECTOR_SHUFFLE().

◆ isZipOdd()

bool isZipOdd ( const std::array< std::pair< int, int >, 2 > & SrcInfo,
ArrayRef< int > Mask,
unsigned & Factor )
static

Given a shuffle which can be represented as a pair of two slides, see if it is a zipodd idiom.

Zipodd is: vs2: a0 a1 a2 a3 vs1: b0 b1 b2 b3 vd: a1 b1 a3 b3 Note that the operand order is swapped due to the way we canonicalize the slides, so SrCInfo[0] is vs1, and SrcInfo[1] is vs2.

Definition at line 4933 of file RISCVISelLowering.cpp.

References isAlternating(), and llvm::isPowerOf2_32().

Referenced by lowerVECTOR_SHUFFLE().

◆ legalizeScatterGatherIndexType()

◆ lookupMaskedIntrinsic()

const RISCV::RISCVMaskedPseudoInfo * lookupMaskedIntrinsic ( uint16_t MCOpcode,
RISCVVType::VLMUL LMul,
unsigned SEW )
static

Definition at line 22294 of file RISCVISelLowering.cpp.

References assert(), and llvm::Masked.

Referenced by emitVFROUND_NOEXCEPT_MASK().

◆ LowerATOMIC_FENCE()

◆ lowerBitreverseShuffle()

◆ lowerBUILD_VECTOR()

SDValue lowerBUILD_VECTOR ( SDValue Op,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

Definition at line 4290 of file RISCVISelLowering.cpp.

References AbstractManglingParser< Derived, Alloc >::Ops, llvm::all_of(), llvm::ISD::AND, llvm::ISD::ANY_EXTEND, llvm::ArrayRef(), assert(), llvm::MVT::bitsLE(), llvm::ISD::BUILD_VECTOR, llvm::CallingConv::C, llvm::cast(), llvm::MVT::changeVectorElementType(), convertFromScalableVector(), convertToScalableVector(), llvm::count_if(), DL, llvm::dyn_cast(), llvm::enumerate(), llvm::SelectionDAG::getBitcast(), llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getConstant(), getContainerForFixedLengthVector(), getDefaultVLOps(), llvm::SelectionDAG::getExtractSubvector(), llvm::MVT::getFixedSizeInBits(), llvm::RISCVSubtarget::getFLen(), llvm::SelectionDAG::getInsertSubvector(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::getKnownMinValue(), llvm::RISCVTargetLowering::getLMUL(), llvm::RISCVTargetLowering::getM1VT(), llvm::SelectionDAG::getNode(), llvm::RISCVSubtarget::getRealVLen(), llvm::SelectionDAG::getSetCC(), llvm::SDValue::getSimpleValueType(), llvm::MVT::getSizeInBits(), llvm::SelectionDAG::getSplatBuildVector(), llvm::getSplatValue(), llvm::SelectionDAG::getSplatVector(), llvm::SelectionDAG::getUNDEF(), llvm::MVT::getVectorElementType(), llvm::MVT::getVectorMinNumElements(), llvm::MVT::getVectorNumElements(), llvm::MVT::getVectorVT(), getVSlidedown(), getVSlideup(), llvm::RISCVSubtarget::getXLenVT(), I, llvm::ISD::isBuildVectorOfConstantFPSDNodes(), llvm::ISD::isBuildVectorOfConstantSDNodes(), llvm::MVT::isFixedLengthVector(), llvm::MVT::isFloatingPoint(), llvm::RISCVVType::LMUL_2, llvm::RISCVVType::LMUL_4, llvm::RISCVVType::LMUL_8, lowerBuildVectorOfConstants(), lowerBuildVectorViaDominantValues(), lowerBuildVectorViaPacking(), llvm::PatternMatch::m_ExtractElt(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_Zero(), llvm::RISCVVType::MASK_AGNOSTIC, matchSplatAsGather(), llvm::Offset, Opc, Operands, llvm::SmallVectorTemplateBase< T, bool >::push_back(), llvm::SmallVectorImpl< T >::reserve(), SDValue(), llvm::ISD::SETNE, llvm::SmallVectorTemplateCommon< T, typename >::size(), llvm::Splat, llvm::RISCVVType::TAIL_AGNOSTIC, and llvm::ISD::VSELECT.

Referenced by llvm::RISCVTargetLowering::LowerOperation().

◆ lowerBuildVectorOfConstants()

SDValue lowerBuildVectorOfConstants ( SDValue Op,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

Definition at line 3973 of file RISCVISelLowering.cpp.

References llvm::ISD::ANY_EXTEND, assert(), llvm::MVT::bitsLE(), llvm::cast(), llvm::MVT::changeVectorElementType(), llvm::SelectionDAG::ComputeMaxSignificantBits(), convertFromScalableVector(), convertToScalableVector(), llvm::divideCeil(), DL, llvm::enumerate(), llvm::SelectionDAG::getBitcast(), llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getConstant(), getContainerForFixedLengthVector(), getDefaultVLOps(), llvm::RISCVSubtarget::getELen(), llvm::SelectionDAG::getExtractSubvector(), llvm::SelectionDAG::getInsertVectorElt(), llvm::MVT::getIntegerVT(), llvm::SelectionDAG::getNode(), llvm::RISCVSubtarget::getRealMinVLen(), llvm::MVT::getScalarSizeInBits(), llvm::SelectionDAG::getSignedConstant(), llvm::MVT::getSizeInBits(), llvm::getSplatValue(), llvm::SelectionDAG::getUNDEF(), llvm::MVT::getVectorElementType(), llvm::MVT::getVectorNumElements(), llvm::MVT::getVectorVT(), llvm::RISCVSubtarget::getXLen(), llvm::RISCVSubtarget::getXLenVT(), I, llvm::RISCVSubtarget::is64Bit(), llvm::ISD::isBuildVectorAllOnes(), llvm::ISD::isBuildVectorAllZeros(), llvm::ISD::isBuildVectorOfConstantSDNodes(), llvm::MVT::isFixedLengthVector(), llvm::MVT::isFloatingPoint(), llvm::isInt(), llvm::MVT::isInteger(), lowerBuildVectorViaDominantValues(), lowerBuildVectorViaVID(), llvm::maskTrailingOnes(), Opc, OpIdx, SDValue(), llvm::SelectionDAG::shouldOptForSize(), llvm::SignExtend64(), and llvm::Splat.

Referenced by lowerBUILD_VECTOR().

◆ lowerBuildVectorViaDominantValues()

SDValue lowerBuildVectorViaDominantValues ( SDValue Op,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

Try and optimize BUILD_VECTORs with "dominant values" - these are values which constitute a large proportion of the elements.

In such cases we can splat a vector with the dominant element and make up the shortfall with INSERT_VECTOR_ELTs. Returns SDValue if not profitable. Note that this includes vectors of 2 elements by association. The upper-most element is the "dominant" one, allowing us to use a splat to "insert" the upper element, and an insert of the lower element at position 0, which improves codegen.

Definition at line 3868 of file RISCVISelLowering.cpp.

References AbstractManglingParser< Derived, Alloc >::Ops, llvm::ISD::ANY_EXTEND, assert(), llvm::MVT::changeVectorElementType(), convertFromScalableVector(), convertToScalableVector(), llvm::Count, llvm::count_if(), DL, llvm::dyn_cast(), llvm::enumerate(), llvm::SelectionDAG::getBuildVector(), getContainerForFixedLengthVector(), getDefaultVLOps(), llvm::SelectionDAG::getInsertVectorElt(), llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getSplatBuildVector(), llvm::SelectionDAG::getUNDEF(), llvm::RISCVSubtarget::getXLenVT(), llvm::detail::DenseSetImpl< ValueT, MapTy, ValueInfoT >::insert(), llvm::ISD::isBuildVectorOfConstantSDNodes(), llvm::MVT::isFixedLengthVector(), llvm::MVT::isFloatingPoint(), llvm::Log2_32(), OpIdx, SDValue(), llvm::SelectionDAG::shouldOptForSize(), llvm::DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT >::size(), llvm::transform(), and llvm::ISD::VSELECT.

Referenced by lowerBUILD_VECTOR(), and lowerBuildVectorOfConstants().

◆ lowerBuildVectorViaPacking()

◆ lowerBuildVectorViaVID()

◆ lowerConstant()

◆ lowerCttzElts()

◆ lowerDisjointIndicesShuffle()

SDValue lowerDisjointIndicesShuffle ( ShuffleVectorSDNode * SVN,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

Given a shuffle where the indices are disjoint between the two sources, e.g.:

t2:v4i8 = vector_shuffle t0:v4i8, t1:v4i8, <2, 7, 1, 4>

Merge the two sources into one and do a single source shuffle:

t2:v4i8 = vselect t1:v4i8, t0:v4i8, <0, 1, 0, 1> t3:v4i8 = vector_shuffle t2:v4i8, undef, <2, 3, 1, 0>

A vselect will either be merged into a masked instruction or be lowered as a vmerge.vvm, which is cheaper than a vrgather.vv.

Definition at line 5661 of file RISCVISelLowering.cpp.

References llvm::MVT::changeVectorElementType(), DL, llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getConstant(), llvm::ShuffleVectorSDNode::getMask(), llvm::SelectionDAG::getNode(), llvm::SDNode::getOperand(), llvm::SDNode::getSimpleValueType(), llvm::SelectionDAG::getUNDEF(), llvm::SelectionDAG::getVectorShuffle(), llvm::RISCVSubtarget::getXLenVT(), I, llvm::SmallVectorTemplateBase< T, bool >::push_back(), SDValue(), Select, and llvm::ISD::VSELECT.

Referenced by lowerVECTOR_SHUFFLE().

◆ lowerFABSorFNEG()

◆ lowerFCOPYSIGN()

◆ lowerFixedVectorSegLoadIntrinsics()

◆ lowerFixedVectorSegStoreIntrinsics()

◆ lowerFMAXIMUM_FMINIMUM()

◆ lowerFP_TO_INT()

◆ lowerFP_TO_INT_SAT()

◆ lowerFTRUNC_FCEIL_FFLOOR_FROUND()

◆ lowerGetVectorLength()

◆ lowerINT_TO_FP()

◆ LowerPREFETCH()

SDValue LowerPREFETCH ( SDValue Op,
const RISCVSubtarget & Subtarget,
SelectionDAG & DAG )
static

Definition at line 6743 of file RISCVISelLowering.cpp.

◆ lowerReductionSeq()

◆ lowerScalarInsert()

◆ lowerScalarSplat()

◆ lowerSelectToBinOp()

◆ lowerShuffleViaVRegSplitting()

◆ lowerVECTOR_SHUFFLE()

SDValue lowerVECTOR_SHUFFLE ( SDValue Op,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget )
static

Definition at line 5782 of file RISCVISelLowering.cpp.

References AbstractManglingParser< Derived, Alloc >::Ops, llvm::any_of(), assert(), llvm::MVT::bitsGT(), llvm::CallingConv::C, llvm::cast(), llvm::MVT::changeTypeToInteger(), llvm::MVT::changeVectorElementType(), Concat, llvm::ISD::CONCAT_VECTORS, convertFromScalableVector(), convertToScalableVector(), llvm::count_if(), DL, llvm::enumerate(), llvm::ISD::EXTLOAD, foldConcatVector(), llvm::SelectionDAG::getBitcast(), llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getConstant(), getContainerForFixedLengthVector(), getDefaultScalableVLOps(), getDefaultVLOps(), getDeinterleaveShiftAndTrunc(), llvm::MVT::getDoubleNumVectorElementsVT(), llvm::SelectionDAG::getElementCount(), llvm::RISCVSubtarget::getELen(), llvm::SelectionDAG::getExtLoad(), llvm::SelectionDAG::getExtractSubvector(), llvm::TypeSize::getFixed(), llvm::SDNode::getFlags(), llvm::MVT::getHalfNumVectorElementsVT(), llvm::SelectionDAG::getInsertSubvector(), llvm::details::FixedOrScalableQuantity< LeafTy, ValueTy >::getKnownMinValue(), llvm::SelectionDAG::getLoad(), llvm::RISCVTargetLowering::getM1VT(), llvm::SelectionDAG::getMachineFunction(), llvm::MachineFunction::getMachineMemOperand(), llvm::ShuffleVectorSDNode::getMask(), llvm::SelectionDAG::getMemBasePlusOffset(), llvm::SelectionDAG::getMemIntrinsicNode(), llvm::SelectionDAG::getNode(), llvm::RISCVSubtarget::getRealMinVLen(), llvm::RISCVSubtarget::getRealVLen(), llvm::SelectionDAG::getRegister(), llvm::MVT::getScalarSizeInBits(), llvm::MVT::getScalarType(), llvm::SelectionDAG::getSetCC(), getSingleShuffleSrc(), llvm::MVT::getSizeInBits(), llvm::ShuffleVectorSDNode::getSplatIndex(), llvm::MVT::getStoreSize(), llvm::SelectionDAG::getTargetConstant(), llvm::SelectionDAG::getUNDEF(), llvm::MVT::getVectorElementCount(), llvm::MVT::getVectorElementType(), llvm::MVT::getVectorMinNumElements(), llvm::MVT::getVectorNumElements(), llvm::SelectionDAG::getVectorShuffle(), llvm::MVT::getVectorVT(), getVSlidedown(), getVSlideup(), llvm::SelectionDAG::getVTList(), getWideningInterleave(), getWideningSpread(), llvm::RISCVSubtarget::getXLenVT(), llvm::Hi, I, Info, llvm::ISD::INTRINSIC_W_CHAIN, isCompressMask(), llvm::ShuffleVectorInst::isDeInterleaveMaskOfFactor(), isElementRotate(), llvm::MVT::isFloatingPoint(), llvm::ShuffleVectorInst::isIdentityMask(), llvm::MVT::isInteger(), isInterleaveShuffle(), isLegalBitRotate(), isLocalRepeatingShuffle(), isLowSourceShuffle(), llvm::isMaskedSlidePair(), llvm::ISD::isNormalLoad(), llvm::isPowerOf2_32(), llvm::ShuffleVectorInst::isReverseMask(), llvm::ShuffleVectorInst::isSingleSourceMask(), isSpanSplatShuffle(), llvm::ShuffleVectorSDNode::isSplat(), llvm::ShuffleVectorSDNode::isSplatMask(), llvm::SelectionDAG::isSplatValue(), llvm::RISCVTargetLowering::isSpreadMask(), llvm::isUInt(), llvm::SDValue::isUndef(), isZipEven(), isZipOdd(), llvm::Lo, lowerBitreverseShuffle(), lowerDisjointIndicesShuffle(), lowerShuffleViaVRegSplitting(), lowerVECTOR_SHUFFLEAsRotate(), lowerVECTOR_SHUFFLEAsVRGatherVX(), lowerVECTOR_SHUFFLEAsVSlide1(), lowerVECTOR_SHUFFLEAsVSlidedown(), lowerVECTOR_SHUFFLEAsVSlideup(), lowerVZIP(), llvm::SelectionDAG::makeEquivalentMemoryOrdering(), N, llvm::Offset, Opc, llvm::PowerOf2Ceil(), llvm::SmallVectorTemplateBase< T, bool >::push_back(), SDValue(), llvm::ISD::SETNE, Size, llvm::SmallVectorTemplateCommon< T, typename >::size(), llvm::Splat, llvm::RISCVVType::TAIL_AGNOSTIC, tryWidenMaskForShuffle(), llvm::ISD::VECTOR_COMPRESS, llvm::ISD::VECTOR_REVERSE, llvm::ISD::VSELECT, and llvm::ISD::ZERO_EXTEND.

Referenced by llvm::RISCVTargetLowering::LowerOperation(), and llvm::X86TargetLowering::LowerOperation().

◆ lowerVECTOR_SHUFFLEAsRotate()

◆ lowerVECTOR_SHUFFLEAsVRGatherVX()

SDValue lowerVECTOR_SHUFFLEAsVRGatherVX ( ShuffleVectorSDNode * SVN,
const RISCVSubtarget & Subtarget,
SelectionDAG & DAG )
static

◆ lowerVECTOR_SHUFFLEAsVSlide1()

◆ lowerVECTOR_SHUFFLEAsVSlidedown()

◆ lowerVECTOR_SHUFFLEAsVSlideup()

◆ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND()

◆ lowerVectorIntrinsicScalars()

◆ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND()

◆ lowerVectorXRINT_XROUND()

◆ lowerVZIP()

◆ matchIndexAsShuffle()

bool matchIndexAsShuffle ( EVT VT,
SDValue Index,
SDValue Mask,
SmallVector< int > & ShuffleMask )
static

Match the index vector of a scatter or gather node as the shuffle mask which performs the rearrangement if possible.

Will only match if all lanes are touched, and thus replacing the scatter or gather with a unit strided access and shuffle is legal.

Definition at line 19647 of file RISCVISelLowering.cpp.

References llvm::BitVector::all(), assert(), llvm::CallingConv::C, llvm::EVT::getScalarStoreSize(), llvm::EVT::getVectorNumElements(), llvm::ISD::isBuildVectorOfConstantSDNodes(), llvm::ISD::isConstantSplatVectorAllOnes(), and llvm::BitVector::set().

Referenced by llvm::RISCVTargetLowering::PerformDAGCombine().

◆ matchIndexAsWiderOp()

bool matchIndexAsWiderOp ( EVT VT,
SDValue Index,
SDValue Mask,
Align BaseAlign,
const RISCVSubtarget & ST )
static

Match the index of a gather or scatter operation as an operation with twice the element width and half the number of elements.

This is generally profitable (if legal) because these operations are linear in VL, so even if we cause some extract VTYPE/VL toggles, we still come out ahead.

Definition at line 19682 of file RISCVISelLowering.cpp.

References llvm::CallingConv::C, llvm::EVT::getScalarStoreSize(), llvm::EVT::getVectorNumElements(), llvm::ISD::isBuildVectorOfConstantSDNodes(), llvm::ISD::isConstantSplatVectorAllOnes(), and llvm::Last.

Referenced by llvm::RISCVTargetLowering::PerformDAGCombine().

◆ matchRoundingOp()

◆ matchSelectAddSub()

bool matchSelectAddSub ( SDValue TrueVal,
SDValue FalseVal,
bool & SwapCC )
static

◆ matchSetCC()

◆ matchSplatAsGather()

◆ narrowIndex()

◆ negateFMAOpcode()

unsigned negateFMAOpcode ( unsigned Opcode,
bool NegMul,
bool NegAcc )
static

◆ performADDCombine()

◆ performANDCombine()

◆ performBITREVERSECombine()

◆ performBUILD_VECTORCombine()

SDValue performBUILD_VECTORCombine ( SDNode * N,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget,
const RISCVTargetLowering & TLI )
static

If we have a build_vector where each lane is binop X, C, where C is a constant (but not necessarily the same constant on all lanes), form binop (build_vector x1, x2, ...), (build_vector c1, c2, c3, ..).

We assume that materializing a constant build vector will be no more expensive that performing O(n) binops.

Definition at line 19046 of file RISCVISelLowering.cpp.

References assert(), DL, llvm::SelectionDAG::getBuildVector(), llvm::SelectionDAG::getNode(), llvm::EVT::getVectorElementType(), llvm::EVT::getVectorNumElements(), llvm::isa(), llvm::TargetLoweringBase::isBinOp(), llvm::TargetLoweringBase::isOperationLegalOrCustom(), llvm::SelectionDAG::isSafeToSpeculativelyExecute(), llvm::EVT::isScalableVector(), llvm::TargetLoweringBase::isTypeLegal(), N, llvm::SmallVectorTemplateBase< T, bool >::push_back(), and SDValue().

Referenced by llvm::RISCVTargetLowering::PerformDAGCombine().

◆ performCONCAT_VECTORSCombine()

SDValue performCONCAT_VECTORSCombine ( SDNode * N,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget,
const RISCVTargetLowering & TLI )
static

◆ performFP_TO_INT_SATCombine()

◆ performFP_TO_INTCombine()

◆ performINSERT_VECTOR_ELTCombine()

◆ performMemPairCombine()

◆ performMULCombine()

◆ performORCombine()

◆ performSELECTCombine()

◆ performSETCCCombine()

◆ performSHLCombine()

◆ performSIGN_EXTEND_INREGCombine()

◆ performSRACombine()

◆ performSUBCombine()

◆ performTRUNCATECombine()

◆ performVECREDUCECombine()

SDValue performVECREDUCECombine ( SDNode * N,
SelectionDAG & DAG,
const RISCVSubtarget & Subtarget,
const RISCVTargetLowering & TLI )
static

◆ performVECTOR_SHUFFLECombine()

◆ performVFMADD_VLCombine()

◆ performVP_REVERSECombine()

◆ performVP_STORECombine()

◆ performVP_TRUNCATECombine()

◆ performVSELECTCombine()

SDValue performVSELECTCombine ( SDNode * N,
SelectionDAG & DAG )
static

Convert vselect CC, (add a, b), (sub a, b) to add a, (vselect CC, -b, b).

This allows us match a vadd.vv fed by a masked vrsub, which reduces register pressure over the add followed by masked vsub sequence.

Definition at line 18999 of file RISCVISelLowering.cpp.

References A(), llvm::ISD::ADD, B(), DL, llvm::SelectionDAG::getLogicalNOT(), llvm::SelectionDAG::getNegative(), llvm::SelectionDAG::getNode(), llvm::SDNode::getValueType(), matchSelectAddSub(), N, SDValue(), llvm::Sub, and llvm::ISD::VSELECT.

◆ performVWADDSUBW_VLCombine()

◆ performXORCombine()

◆ processVCIXOperands()

◆ promoteVCIXScalar()

◆ reduceANDOfAtomicLoad()

◆ reverseZExtICmpCombine()

◆ simplifyOp_VL()

◆ splatPartsI64WithVL()

◆ splatSplitI64WithVL()

SDValue splatSplitI64WithVL ( const SDLoc & DL,
MVT VT,
SDValue Passthru,
SDValue Scalar,
SDValue VL,
SelectionDAG & DAG )
static

◆ SplitStrictFPVectorOp()

◆ SplitVectorOp()

◆ SplitVectorReductionOp()

◆ SplitVPOp()

◆ STATISTIC()

STATISTIC ( NumTailCalls ,
"Number of tail calls"  )

◆ transformAddImmMulImm()

◆ transformAddShlImm()

◆ translateSetCCForBranch()

◆ tryDemorganOfBooleanCondition()

◆ tryFoldSelectIntoOp()

◆ tryMemPairCombine()

◆ tryWidenMaskForShuffle()

SDValue tryWidenMaskForShuffle ( SDValue Op,
SelectionDAG & DAG )
static

Try to widen element type to get a new mask value for a better permutation sequence.

This doesn't try to inspect the widened mask for profitability; we speculate the widened form is equal or better. This has the effect of reducing mask constant sizes - allowing cheaper materialization sequences

  • and index sequence sizes - reducing register pressure and materialization cost, at the cost of (possibly) an extra VTYPE toggle.

Definition at line 5755 of file RISCVISelLowering.cpp.

References llvm::cast(), DL, llvm::SelectionDAG::getBitcast(), llvm::MVT::getFixedSizeInBits(), llvm::MVT::getFloatingPointVT(), llvm::MVT::getIntegerVT(), llvm::SelectionDAG::getTargetLoweringInfo(), llvm::MVT::getVectorElementType(), llvm::MVT::getVectorNumElements(), llvm::SelectionDAG::getVectorShuffle(), llvm::MVT::getVectorVT(), llvm::MVT::isFloatingPoint(), llvm::TargetLoweringBase::isTypeLegal(), SDValue(), and llvm::widenShuffleMaskElts().

◆ unpackF64OnRV32DSoftABI()

◆ unpackFromMemLoc()

◆ unpackFromRegLoc()

◆ useInversedSetcc()

◆ useRVVForFixedLengthVectorVT()

◆ useTpOffset()

◆ widenVectorOpsToi8()

Variable Documentation

◆ AllowSplatInVW_W

cl::opt< bool > AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden, cl::desc("Allow the formation of VW_W operations (e.g., " "VWADD_W) with splat constants"), cl::init(false)) ( DEBUG_TYPE "-form-vw-w-with-splat" ,
cl::Hidden ,
cl::desc("Allow the formation of VW_W operations (e.g., " "VWADD_W) with splat constants") ,
cl::init(false)  )
static

◆ ExtensionMaxWebSize

cl::opt< unsigned > ExtensionMaxWebSize(DEBUG_TYPE "-ext-max-web-size", cl::Hidden, cl::desc("Give the maximum size (in number of nodes) of the web of " "instructions that we will consider for VW expansion"), cl::init(18)) ( DEBUG_TYPE "-ext-max-web-size" ,
cl::Hidden ,
cl::desc("Give the maximum size (in number of nodes) of the web of " "instructions that we will consider for VW expansion") ,
cl::init(18)  )
static

Referenced by combineOp_VLToVWOp_VL().

◆ FPImmCost

cl::opt< int > FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden, cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value"), cl::init(2)) ( DEBUG_TYPE "-fpimm-cost" ,
cl::Hidden ,
cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value") ,
cl::init(2)  )
static

◆ ModeMask32

const uint32_t ModeMask32 = ~RISCVExceptFlags::ALL

Definition at line 14276 of file RISCVISelLowering.cpp.

◆ ModeMask64

const uint64_t ModeMask64 = ~RISCVExceptFlags::ALL

Definition at line 14275 of file RISCVISelLowering.cpp.

◆ NumRepeatedDivisors

cl::opt< unsigned > NumRepeatedDivisors(DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden, cl::desc("Set the minimum number of repetitions of a divisor to allow " "transformation to multiplications by the reciprocal"), cl::init(2)) ( DEBUG_TYPE "-fp-repeated-divisors" ,
cl::Hidden ,
cl::desc("Set the minimum number of repetitions of a divisor to allow " "transformation to multiplications by the reciprocal") ,
cl::init(2)  )
static

◆ ReassocShlAddiAdd

cl::opt< bool > ReassocShlAddiAdd("reassoc-shl-addi-add", cl::Hidden, cl::desc("Swap add and addi in cases where the add may " "be combined with a shift"), cl::init(true)) ( "reassoc-shl-addi-add" ,
cl::Hidden ,
cl::desc("Swap add and addi in cases where the add may " "be combined with a shift") ,
cl::init(true)  )
static

Referenced by combineShlAddIAdd().