150#include "llvm/IR/IntrinsicsNVPTX.h"
160#define DEBUG_TYPE "nvptx-lower-args"
172 return "Lower pointer arguments of CUDA kernels";
180char NVPTXLowerArgsLegacyPass::ID = 1;
183 "Lower arguments (NVPTX)",
false,
false)
211 bool IsGridConstant) {
213 assert(
I &&
"OldUse must be in an instruction");
222 auto CloneInstInParamAS = [HasCvtaParam,
223 IsGridConstant](
const IP &
I) ->
Value * {
225 LI->setOperand(0,
I.NewParam);
231 GEP->getSourceElementType(),
I.NewParam, Indices,
GEP->getName(),
233 NewGEP->setIsInBounds(
GEP->isInBounds());
239 BC->getName(), BC->getIterator());
248 if (
MI->getRawSource() ==
I.OldUse->get()) {
253 CallInst *
B = Builder.CreateMemTransferInst(
254 ID,
MI->getRawDest(),
MI->getDestAlign(),
I.NewParam,
255 MI->getSourceAlign(),
MI->getLength(),
MI->isVolatile());
256 for (
unsigned I : {0, 1})
257 if (
uint64_t Bytes =
MI->getParamDereferenceableBytes(
I))
258 B->addDereferenceableParamAttr(
I, Bytes);
266 auto GetParamAddrCastToGeneric =
272 auto *ParamInGenericAS =
273 GetParamAddrCastToGeneric(
I.NewParam,
I.OldInstruction);
277 for (
auto [Idx, V] :
enumerate(
PHI->incoming_values())) {
278 if (V.get() ==
I.OldUse->get())
279 PHI->setIncomingValue(Idx, ParamInGenericAS);
283 if (
SI->getTrueValue() ==
I.OldUse->get())
284 SI->setTrueValue(ParamInGenericAS);
285 if (
SI->getFalseValue() ==
I.OldUse->get())
286 SI->setFalseValue(ParamInGenericAS);
291 if (IsGridConstant) {
293 I.OldUse->set(ParamInGenericAS);
298 if (
SI->getValueOperand() ==
I.OldUse->get())
299 SI->setOperand(0, ParamInGenericAS);
303 if (PI->getPointerOperand() ==
I.OldUse->get())
304 PI->setOperand(0, ParamInGenericAS);
315 while (!ItemsToConvert.
empty()) {
317 Value *NewInst = CloneInstInParamAS(
I);
319 if (NewInst && NewInst !=
I.OldInstruction) {
323 for (
Use &U :
I.OldInstruction->uses())
326 InstructionsToDelete.
push_back(
I.OldInstruction);
338 I->eraseFromParent();
353 const Align NewArgAlign =
357 if (CurArgAlign >= NewArgAlign)
361 <<
" instead of " << CurArgAlign.
value() <<
" for " << *Arg
380 std::queue<LoadContext> Worklist;
381 Worklist.push({ArgInParamAS, 0});
383 while (!Worklist.empty()) {
384 LoadContext Ctx = Worklist.front();
387 for (
User *CurUser : Ctx.InitialVal->
users()) {
393 APInt OffsetAccumulated =
396 if (!
I->accumulateConstantOffset(
DL, OffsetAccumulated))
401 assert(
Offset != OffsetLimit &&
"Expect Offset less than UINT64_MAX");
403 Worklist.push({
I, Ctx.Offset +
Offset});
408 for (Load &CurLoad : Loads) {
409 Align NewLoadAlign(std::gcd(NewArgAlign.
value(), CurLoad.Offset));
410 Align CurLoadAlign = CurLoad.Inst->getAlign();
411 CurLoad.Inst->setAlignment(std::max(NewLoadAlign, CurLoadAlign));
422 &Arg, {}, Arg.
getName() +
".param");
433 using Base = PtrUseVisitor<ArgUseChecker>;
437 SmallPtrSet<Instruction *, 4> Conditionals;
439 ArgUseChecker(
const DataLayout &
DL,
bool IsGridConstant)
440 : PtrUseVisitor(
DL), IsGridConstant(IsGridConstant) {}
442 PtrInfo visitArgPtr(Argument &
A) {
443 assert(
A.getType()->isPointerTy());
445 IsOffsetKnown =
false;
448 Conditionals.clear();
457 while (!(Worklist.empty() || PI.isAborted())) {
458 UseToVisit ToVisit = Worklist.pop_back_val();
459 U = ToVisit.UseAndIsOffsetKnown.getPointer();
462 Conditionals.insert(
I);
467 LLVM_DEBUG(
dbgs() <<
"Argument pointer escaped: " << *PI.getEscapingInst()
469 else if (PI.isAborted())
470 LLVM_DEBUG(
dbgs() <<
"Pointer use needs a copy: " << *PI.getAbortingInst()
473 <<
" conditionals\n");
477 void visitStoreInst(StoreInst &SI) {
479 if (
U->get() ==
SI.getValueOperand())
480 return PI.setEscapedAndAborted(&SI);
484 return PI.setAborted(&SI);
487 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
490 return PI.setEscapedAndAborted(&ASC);
494 void visitPtrToIntInst(PtrToIntInst &
I) {
499 void visitPHINodeOrSelectInst(Instruction &
I) {
503 void visitPHINode(PHINode &PN) { enqueueUsers(PN); }
504 void visitSelectInst(SelectInst &SI) { enqueueUsers(SI); }
506 void visitMemTransferInst(MemTransferInst &
II) {
507 if (*U ==
II.getRawDest() && !IsGridConstant)
513 void visitMemSetInst(MemSetInst &
II) {
540 IRB.CreateMemCpy(AllocA, AllocA->
getAlign(), ArgInParam, AllocA->
getAlign(),
548 const bool HasCvtaParam = TM.getSubtargetImpl(*Func)->hasCvtaParam();
555 ArgUseChecker AUC(
DL, IsGridConstant);
556 ArgUseChecker::PtrInfo PI = AUC.visitArgPtr(*Arg);
557 bool ArgUseIsReadOnly = !(PI.isEscaped() || PI.isAborted());
559 if (ArgUseIsReadOnly && AUC.Conditionals.
empty()) {
567 for (
Use *U : UsesToUpdate)
584 if (IsGridConstant || (HasCvtaParam && ArgUseIsReadOnly)) {
585 LLVM_DEBUG(
dbgs() <<
"Using non-copy pointer to " << *Arg <<
"\n");
605 ParamSpaceArg->setOperand(0, Arg);
607 copyByValParam(*Func, *Arg);
622 assert(InsertPt != InsertPt->getParent()->end() &&
623 "We don't call this function with Ptr being a terminator.");
629 Ptr->getName(), InsertPt);
631 Ptr->replaceAllUsesWith(PtrInGeneric);
647 auto HandleIntToPtr = [](
Value &V) {
648 if (
llvm::all_of(V.users(), [](
User *U) { return isa<IntToPtrInst>(U); })) {
650 for (
User *U : UsersToUpdate)
659 if (LI->getType()->isPointerTy() || LI->getType()->isIntegerTy()) {
664 if (LI->getType()->isPointerTy())
676 LLVM_DEBUG(
dbgs() <<
"Lowering kernel args of " <<
F.getName() <<
"\n");
690 LLVM_DEBUG(
dbgs() <<
"Lowering function args of " <<
F.getName() <<
"\n");
707bool NVPTXLowerArgsLegacyPass::runOnFunction(
Function &
F) {
708 auto &
TM = getAnalysis<TargetPassConfig>().getTM<NVPTXTargetMachine>();
712 return new NVPTXLowerArgsLegacyPass();
716 LLVM_DEBUG(
dbgs() <<
"Creating a copy of byval args of " <<
F.getName()
723 copyByValParam(
F, Arg);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool runOnFunction(Function &F, bool PostInlining)
NVPTX address space definition.
static bool runOnDeviceFunction(const NVPTXTargetMachine &TM, Function &F)
static CallInst * createNVVMInternalAddrspaceWrap(IRBuilder<> &IRB, Argument &Arg)
static void adjustByValArgAlignment(Argument *Arg, Value *ArgInParamAS, const NVPTXTargetLowering *TLI)
static bool copyFunctionByValArgs(Function &F)
static void markPointerAsAS(Value *Ptr, const unsigned AS)
nvptx lower Lower static false void convertToParamAS(Use *OldUse, Value *Param, bool HasCvtaParam, bool IsGridConstant)
static bool processFunction(Function &F, NVPTXTargetMachine &TM)
static bool runOnKernelFunction(const NVPTXTargetMachine &TM, Function &F)
static void markPointerAsGlobal(Value *Ptr)
static void handleByValParam(const NVPTXTargetMachine &TM, Argument *Arg)
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents a conversion between pointers from one address space to another.
unsigned getDestAddressSpace() const
Returns the address space of the result.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
This class represents an incoming formal argument to a Function.
LLVM_ABI void addAttr(Attribute::AttrKind Kind)
LLVM_ABI bool hasByValAttr() const
Return true if this argument has the byval attribute.
LLVM_ABI void removeAttr(Attribute::AttrKind Kind)
Remove attributes from an argument.
const Function * getParent() const
LLVM_ABI Type * getParamByValType() const
If this is a byval argument, return its type.
LLVM_ABI MaybeAlign getParamAlign() const
If this is a byval or inalloca argument, return its alignment.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
iterator begin()
Instruction iterator methods.
InstListType::iterator iterator
Instruction iterators...
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void visit(Iterator Start, Iterator End)
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
An instruction for reading from memory.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
A base class for visitors over the uses of a pointer value.
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitPtrToIntInst(PtrToIntInst &I)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionPass * createNVPTXLowerArgsPass()
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
bool isParamGridConstant(const Argument &Arg)
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)