LLVM 22.0.0git
NVPTXISelLowering.h
Go to the documentation of this file.
1//===-- NVPTXISelLowering.h - NVPTX DAG Lowering Interface ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_NVPTX_NVPTXISELLOWERING_H
15#define LLVM_LIB_TARGET_NVPTX_NVPTXISELLOWERING_H
16
17#include "NVPTX.h"
21
22namespace llvm {
23namespace NVPTXISD {
24enum NodeType : unsigned {
25 // Start the numbering from where ISD NodeType finishes.
28
29 /// These nodes represent a parameter declaration. In PTX this will look like:
30 /// .param .align 16 .b8 param0[1024];
31 /// .param .b32 retval0;
32 ///
33 /// DeclareArrayParam(Chain, Externalsym, Align, Size, Glue)
34 /// DeclareScalarParam(Chain, Externalsym, Size, Glue)
37
38 /// This node represents a PTX call instruction. It's operands are as follows:
39 ///
40 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
41 /// NumParams, Callee, Proto)
43
55
56 /// This node is similar to ISD::BUILD_VECTOR except that the output may be
57 /// implicitly bitcast to a scalar. This allows for the representation of
58 /// packing move instructions for vector types which are not legal i.e. v2i32
60
61 /// This node is the inverse of NVPTX::BUILD_VECTOR. It takes a single value
62 /// which may be a scalar and unpacks it into multiple values by implicitly
63 /// converting it to a vector.
65
71
82
84
85 /// These nodes are used to lower atomic instructions with i128 type. They are
86 /// similar to the generic nodes, but the input and output values are split
87 /// into two 64-bit values.
88 /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP_B128(INCHAIN, ptr, cmpLo, cmpHi,
89 /// swapLo, swapHi)
90 /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP_B128(INCHAIN, ptr, amtLo, amtHi)
93
97 LDUV2, // LDU.v2
98 LDUV4, // LDU.v4
128};
129}
130
131class NVPTXSubtarget;
132
133//===--------------------------------------------------------------------===//
134// TargetLowering Implementation
135//===--------------------------------------------------------------------===//
137public:
138 explicit NVPTXTargetLowering(const NVPTXTargetMachine &TM,
139 const NVPTXSubtarget &STI);
140 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
141
142 const char *getTargetNodeName(unsigned Opcode) const override;
143
145 MachineFunction &MF,
146 unsigned Intrinsic) const override;
147
148 Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx,
149 const DataLayout &DL) const;
150
151 /// getFunctionParamOptimizedAlign - since function arguments are passed via
152 /// .param space, we may want to increase their alignment in a way that
153 /// ensures that we can effectively vectorize their loads & stores. We can
154 /// increase alignment only if the function has internal or has private
155 /// linkage as for other linkage types callers may already rely on default
156 /// alignment. To allow using 128-bit vectorized loads/stores, this function
157 /// ensures that alignment is 16 or greater.
159 const DataLayout &DL) const;
160
161 /// Helper for computing alignment of a device function byval parameter.
163 Align InitialAlign,
164 const DataLayout &DL) const;
165
166 // Helper for getting a function parameter name. Name is composed from
167 // its index and the function name. Negative index corresponds to special
168 // parameter (unsized array) used for passing variable arguments.
169 std::string getParamName(const Function *F, int Idx) const;
170
171 /// isLegalAddressingMode - Return true if the addressing mode represented
172 /// by AM is legal for this target, for a load/store of the specified type
173 /// Used to guide target specific optimizations, like loop strength
174 /// reduction (LoopStrengthReduce.cpp) and memory optimization for
175 /// address mode (CodeGenPrepare.cpp)
176 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
177 unsigned AS,
178 Instruction *I = nullptr) const override;
179
180 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override {
181 // Truncating 64-bit to 32-bit is free in SASS.
182 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
183 return false;
184 return SrcTy->getPrimitiveSizeInBits() == 64 &&
185 DstTy->getPrimitiveSizeInBits() == 32;
186 }
187
189 EVT VT) const override {
190 if (VT.isVector())
191 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
192 return MVT::i1;
193 }
194
195 ConstraintType getConstraintType(StringRef Constraint) const override;
196 std::pair<unsigned, const TargetRegisterClass *>
198 StringRef Constraint, MVT VT) const override;
199
201 bool isVarArg,
203 const SDLoc &dl, SelectionDAG &DAG,
204 SmallVectorImpl<SDValue> &InVals) const override;
205
206 SDValue LowerCall(CallLoweringInfo &CLI,
207 SmallVectorImpl<SDValue> &InVals) const override;
208
212
213 std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
215 std::optional<unsigned> FirstVAArg,
216 const CallBase &CB, unsigned UniqueCallSite) const;
217
218 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
220 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
221 SelectionDAG &DAG) const override;
222
224 std::vector<SDValue> &Ops,
225 SelectionDAG &DAG) const override;
226
228
229 // PTX always uses 32-bit shift amounts
230 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
231 return MVT::i32;
232 }
233
235 getPreferredVectorAction(MVT VT) const override;
236
237 // Get the degree of precision we want from 32-bit floating point division
238 // operations.
240 const SDNode &N) const;
241
242 // Get whether we should use a precise or approximate 32-bit floating point
243 // sqrt instruction.
244 bool usePrecSqrtF32(const SDNode *N = nullptr) const;
245
246 // Get whether we should use instructions that flush floating-point denormals
247 // to sign-preserving zero.
248 bool useF32FTZ(const MachineFunction &MF) const;
249
251 int &ExtraSteps, bool &UseOneConst,
252 bool Reciprocal) const override;
253
254 unsigned combineRepeatedFPDivisors() const override { return 2; }
255
256 bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const;
257
259 EVT) const override {
260 return true;
261 }
262
263 // The default is the same as pointer type, but brx.idx only accepts i32
264 MVT getJumpTableRegTy(const DataLayout &) const override { return MVT::i32; }
265
266 unsigned getJumpTableEncoding() const override;
267
268 bool enableAggressiveFMAFusion(EVT VT) const override { return true; }
269
270 // The default is to transform llvm.ctlz(x, false) (where false indicates that
271 // x == 0 is not undefined behavior) into a branch that checks whether x is 0
272 // and avoids calling ctlz in that case. We have a dedicated ctlz
273 // instruction, so we say that ctlz is cheap to speculate.
274 bool isCheapToSpeculateCtlz(Type *Ty) const override { return true; }
275
279
283
285 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
286
287 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
288 // There's rarely any point of packing something into a vector type if we
289 // already have the source data.
290 return true;
291 }
292
293 bool shouldInsertFencesForAtomic(const Instruction *) const override;
294
297
299 AtomicOrdering Ord) const override;
301 AtomicOrdering Ord) const override;
302
303 unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT,
304 EVT ToVT) const override;
305
307 const APInt &DemandedElts,
308 const SelectionDAG &DAG,
309 unsigned Depth = 0) const override;
311 const APInt &DemandedElts,
312 KnownBits &Known,
313 TargetLoweringOpt &TLO,
314 unsigned Depth = 0) const override;
315
316private:
317 const NVPTXSubtarget &STI; // cache the subtarget here
318 mutable unsigned GlobalUniqueCallSite;
319
320 SDValue getParamSymbol(SelectionDAG &DAG, int I, EVT T) const;
321 SDValue getCallParamSymbol(SelectionDAG &DAG, int I, EVT T) const;
324
325 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
327 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
329 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
331
333
335 SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const;
336 SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
337
338 SDValue PromoteBinOpIfF32FTZ(SDValue Op, SelectionDAG &DAG) const;
339
340 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
341 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
342
343 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
344 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
345
348 SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const;
349
350 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
351 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
352
353 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
354
357
358 SDValue LowerCopyToReg_128(SDValue Op, SelectionDAG &DAG) const;
359 unsigned getNumRegisters(LLVMContext &Context, EVT VT,
360 std::optional<MVT> RegisterVT) const override;
361 bool
362 splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
363 SDValue *Parts, unsigned NumParts, MVT PartVT,
364 std::optional<CallingConv::ID> CC) const override;
365
366 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
367 SelectionDAG &DAG) const override;
368 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
369
370 Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx,
371 const DataLayout &DL) const;
372};
373
374} // namespace llvm
375
376#endif
static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
#define T
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool Enabled
Definition Statistic.cpp:46
This file describes how to lower LLVM code to machine code.
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG)
ISD::FROUND is defined to round to nearest with ties rounding away from 0.
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction that atomically reads a memory location, combines it with another value,...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Machine Value Type.
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
MVT getJumpTableRegTy(const DataLayout &) const override
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
unsigned combineRepeatedFPDivisors() const override
Indicate whether this target prefers to combine FDIVs with the same divisor.
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
TargetLowering(const TargetLowering &)=delete
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CALL
This node represents a PTX call instruction.
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ UNPACK_VECTOR
This node is the inverse of NVPTX::BUILD_VECTOR.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ DeclareScalarParam
These nodes represent a parameter declaration.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ BUILD_VECTOR
This node is similar to ISD::BUILD_VECTOR except that the output may be implicitly bitcast to a scala...
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2
DivPrecisionLevel
Definition NVPTX.h:251
This is an optimization pass for GlobalISel generic memory operations.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...