LLVM 22.0.0git
NVPTXISelLowering.h
Go to the documentation of this file.
1//===-- NVPTXISelLowering.h - NVPTX DAG Lowering Interface ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXISELLOWERING_H
15#define LLVM_LIB_TARGET_NVPTX_NVPTXISELLOWERING_H
16
17#include "NVPTX.h"
21
22namespace llvm {
23namespace NVPTXISD {
24enum NodeType : unsigned {
25 // Start the numbering from where ISD NodeType finishes.
28
29 /// These nodes represent a parameter declaration. In PTX this will look like:
30 /// .param .align 16 .b8 param0[1024];
31 /// .param .b32 retval0;
32 ///
33 /// DeclareArrayParam(Chain, Externalsym, Align, Size, Glue)
34 /// DeclareScalarParam(Chain, Externalsym, Size, Glue)
37
38 /// This node represents a PTX call instruction. It's operands are as follows:
39 ///
40 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
41 /// NumParams, Callee, Proto)
43
55
56 /// This node is similar to ISD::BUILD_VECTOR except that the output may be
57 /// implicitly bitcast to a scalar. This allows for the representation of
58 /// packing move instructions for vector types which are not legal i.e. v2i32
60
61 /// This node is the inverse of NVPTX::BUILD_VECTOR. It takes a single value
62 /// which may be a scalar and unpacks it into multiple values by implicitly
63 /// converting it to a vector.
65
71
87
89
90 /// These nodes are used to lower atomic instructions with i128 type. They are
91 /// similar to the generic nodes, but the input and output values are split
92 /// into two 64-bit values.
93 /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP_B128(INCHAIN, ptr, cmpLo, cmpHi,
94 /// swapLo, swapHi)
95 /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP_B128(INCHAIN, ptr, amtLo, amtHi)
98
102 LDUV2, // LDU.v2
103 LDUV4, // LDU.v4
133};
134}
135
136class NVPTXSubtarget;
137
138//===--------------------------------------------------------------------===//
139// TargetLowering Implementation
140//===--------------------------------------------------------------------===//
142public:
143 explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM,
144 const NVPTXSubtarget &STI);
145 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
146
147 const char *getTargetNodeName(unsigned Opcode) const override;
148
150 MachineFunction &MF,
151 unsigned Intrinsic) const override;
152
153 Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx,
154 const DataLayout &DL) const;
155
156 /// getFunctionParamOptimizedAlign - since function arguments are passed via
157 /// .param space, we may want to increase their alignment in a way that
158 /// ensures that we can effectively vectorize their loads & stores. We can
159 /// increase alignment only if the function has internal or has private
160 /// linkage as for other linkage types callers may already rely on default
161 /// alignment. To allow using 128-bit vectorized loads/stores, this function
162 /// ensures that alignment is 16 or greater.
164 const DataLayout &DL) const;
165
166 /// Helper for computing alignment of a device function byval parameter.
168 Align InitialAlign,
169 const DataLayout &DL) const;
170
171 // Helper for getting a function parameter name. Name is composed from
172 // its index and the function name. Negative index corresponds to special
173 // parameter (unsized array) used for passing variable arguments.
174 std::string getParamName(const Function *F, int Idx) const;
175
176 /// isLegalAddressingMode - Return true if the addressing mode represented
177 /// by AM is legal for this target, for a load/store of the specified type
178 /// Used to guide target specific optimizations, like loop strength
179 /// reduction (LoopStrengthReduce.cpp) and memory optimization for
180 /// address mode (CodeGenPrepare.cpp)
181 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
182 unsigned AS,
183 Instruction *I = nullptr) const override;
184
185 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override {
186 // Truncating 64-bit to 32-bit is free in SASS.
187 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
188 return false;
189 return SrcTy->getPrimitiveSizeInBits() == 64 &&
190 DstTy->getPrimitiveSizeInBits() == 32;
191 }
192
194 EVT VT) const override {
195 if (VT.isVector())
196 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
197 return MVT::i1;
198 }
199
200 ConstraintType getConstraintType(StringRef Constraint) const override;
201 std::pair<unsigned, const TargetRegisterClass *>
203 StringRef Constraint, MVT VT) const override;
204
206 bool isVarArg,
208 const SDLoc &dl, SelectionDAG &DAG,
209 SmallVectorImpl<SDValue> &InVals) const override;
210
211 SDValue LowerCall(CallLoweringInfo &CLI,
212 SmallVectorImpl<SDValue> &InVals) const override;
213
217
218 std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
220 std::optional<unsigned> FirstVAArg,
221 const CallBase &CB, unsigned UniqueCallSite) const;
222
223 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
225 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
226 SelectionDAG &DAG) const override;
227
229 std::vector<SDValue> &Ops,
230 SelectionDAG &DAG) const override;
231
233
234 // PTX always uses 32-bit shift amounts
235 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
236 return MVT::i32;
237 }
238
240 getPreferredVectorAction(MVT VT) const override;
241
242 // Get the degree of precision we want from 32-bit floating point division
243 // operations.
245 const SDNode &N) const;
246
247 // Get whether we should use a precise or approximate 32-bit floating point
248 // sqrt instruction.
249 bool usePrecSqrtF32(const SDNode *N = nullptr) const;
250
251 // Get whether we should use instructions that flush floating-point denormals
252 // to sign-preserving zero.
253 bool useF32FTZ(const MachineFunction &MF) const;
254
256 int &ExtraSteps, bool &UseOneConst,
257 bool Reciprocal) const override;
258
259 unsigned combineRepeatedFPDivisors() const override { return 2; }
260
261 bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const;
262
264 EVT) const override {
265 return true;
266 }
267
268 // The default is the same as pointer type, but brx.idx only accepts i32
269 MVT getJumpTableRegTy(const DataLayout &) const override { return MVT::i32; }
270
271 unsigned getJumpTableEncoding() const override;
272
273 bool enableAggressiveFMAFusion(EVT VT) const override { return true; }
274
275 // The default is to transform llvm.ctlz(x, false) (where false indicates that
276 // x == 0 is not undefined behavior) into a branch that checks whether x is 0
277 // and avoids calling ctlz in that case. We have a dedicated ctlz
278 // instruction, so we say that ctlz is cheap to speculate.
279 bool isCheapToSpeculateCtlz(Type *Ty) const override { return true; }
280
284
288
290 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
291
292 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
293 // There's rarely any point of packing something into a vector type if we
294 // already have the source data.
295 return true;
296 }
297
298 bool shouldInsertFencesForAtomic(const Instruction *) const override;
299
302
304 AtomicOrdering Ord) const override;
306 AtomicOrdering Ord) const override;
307
308 unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT,
309 EVT ToVT) const override;
310
312 const APInt &DemandedElts,
313 const SelectionDAG &DAG,
314 unsigned Depth = 0) const override;
316 const APInt &DemandedElts,
317 KnownBits &Known,
318 TargetLoweringOpt &TLO,
319 unsigned Depth = 0) const override;
320
321private:
322 const NVPTXSubtarget &STI; // cache the subtarget here
323 mutable unsigned GlobalUniqueCallSite;
324
325 SDValue getParamSymbol(SelectionDAG &DAG, int I, EVT T) const;
326 SDValue getCallParamSymbol(SelectionDAG &DAG, int I, EVT T) const;
329
330 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
332 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
334 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
336
338
340 SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const;
341 SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
342
343 SDValue PromoteBinOpIfF32FTZ(SDValue Op, SelectionDAG &DAG) const;
344
345 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
346 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
347
348 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
349 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
350
353 SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const;
354
355 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
356 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
357
358 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
359
362
363 SDValue LowerCopyToReg_128(SDValue Op, SelectionDAG &DAG) const;
364 unsigned getNumRegisters(LLVMContext &Context, EVT VT,
365 std::optional<MVT> RegisterVT) const override;
366 bool
367 splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
368 SDValue *Parts, unsigned NumParts, MVT PartVT,
369 std::optional<CallingConv::ID> CC) const override;
370
371 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
372 SelectionDAG &DAG) const override;
373 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
374
375 Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx,
376 const DataLayout &DL) const;
377};
378
379} // namespace llvm
380
381#endif
static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
#define T
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool Enabled
Definition Statistic.cpp:46
This file describes how to lower LLVM code to machine code.
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG)
ISD::FROUND is defined to round to nearest with ties rounding away from 0.
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction that atomically reads a memory location, combines it with another value,...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Machine Value Type.
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
MVT getJumpTableRegTy(const DataLayout &) const override
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
unsigned combineRepeatedFPDivisors() const override
Indicate whether this target prefers to combine FDIVs with the same divisor.
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
TargetLowering(const TargetLowering &)=delete
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CALL
This node represents a PTX call instruction.
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ UNPACK_VECTOR
This node is the inverse of NVPTX::BUILD_VECTOR.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ DeclareScalarParam
These nodes represent a parameter declaration.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ BUILD_VECTOR
This node is similar to ISD::BUILD_VECTOR except that the output may be implicitly bitcast to a scala...
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2
DivPrecisionLevel
Definition NVPTX.h:252
This is an optimization pass for GlobalISel generic memory operations.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...