LLVM 22.0.0git
MemoryLocation.cpp
Go to the documentation of this file.
1//===- MemoryLocation.cpp - Memory location descriptions -------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11#include "llvm/IR/DataLayout.h"
14#include "llvm/IR/IntrinsicsARM.h"
16#include "llvm/IR/Type.h"
17#include <optional>
18using namespace llvm;
19
21 OS << "LocationSize::";
22 if (*this == beforeOrAfterPointer())
23 OS << "beforeOrAfterPointer";
24 else if (*this == afterPointer())
25 OS << "afterPointer";
26 else if (*this == mapEmpty())
27 OS << "mapEmpty";
28 else if (*this == mapTombstone())
29 OS << "mapTombstone";
30 else if (isPrecise())
31 OS << "precise(" << getValue() << ')';
32 else
33 OS << "upperBound(" << getValue() << ')';
34}
35
37 const auto &DL = LI->getDataLayout();
38
39 return MemoryLocation(
41 LocationSize::precise(DL.getTypeStoreSize(LI->getType())),
42 LI->getAAMetadata());
43}
44
46 const auto &DL = SI->getDataLayout();
47
48 return MemoryLocation(SI->getPointerOperand(),
49 LocationSize::precise(DL.getTypeStoreSize(
50 SI->getValueOperand()->getType())),
51 SI->getAAMetadata());
52}
53
55 return MemoryLocation(VI->getPointerOperand(),
56 LocationSize::afterPointer(), VI->getAAMetadata());
57}
58
60 const auto &DL = CXI->getDataLayout();
61
63 LocationSize::precise(DL.getTypeStoreSize(
64 CXI->getCompareOperand()->getType())),
65 CXI->getAAMetadata());
66}
67
69 const auto &DL = RMWI->getDataLayout();
70
71 return MemoryLocation(RMWI->getPointerOperand(),
72 LocationSize::precise(DL.getTypeStoreSize(
73 RMWI->getValOperand()->getType())),
74 RMWI->getAAMetadata());
75}
76
77std::optional<MemoryLocation>
79 switch (Inst->getOpcode()) {
80 case Instruction::Load:
81 return get(cast<LoadInst>(Inst));
82 case Instruction::Store:
83 return get(cast<StoreInst>(Inst));
84 case Instruction::VAArg:
85 return get(cast<VAArgInst>(Inst));
86 case Instruction::AtomicCmpXchg:
87 return get(cast<AtomicCmpXchgInst>(Inst));
88 case Instruction::AtomicRMW:
89 return get(cast<AtomicRMWInst>(Inst));
90 default:
91 return std::nullopt;
92 }
93}
94
98
100 assert(MTI->getRawSource() == MTI->getArgOperand(1));
101 return getForArgument(MTI, 1, nullptr);
102}
103
107
109 assert(MI->getRawDest() == MI->getArgOperand(0));
110 return getForArgument(MI, 0, nullptr);
111}
112
113std::optional<MemoryLocation>
115 // Check that the only possible writes are to arguments.
117 if (!WriteME.onlyAccessesArgPointees())
118 return std::nullopt;
119
120 if (CB->hasOperandBundles())
121 // TODO: remove implementation restriction
122 return std::nullopt;
123
124 Value *UsedV = nullptr;
125 std::optional<unsigned> UsedIdx;
126 for (unsigned i = 0; i < CB->arg_size(); i++) {
127 if (!CB->getArgOperand(i)->getType()->isPointerTy())
128 continue;
129 if (CB->onlyReadsMemory(i))
130 continue;
131 if (!UsedV) {
132 // First potentially writing parameter
133 UsedV = CB->getArgOperand(i);
134 UsedIdx = i;
135 continue;
136 }
137 UsedIdx = std::nullopt;
138 if (UsedV != CB->getArgOperand(i))
139 // Can't describe writing to two distinct locations.
140 // TODO: This results in an inprecision when two values derived from the
141 // same object are passed as arguments to the same function.
142 return std::nullopt;
143 }
144 if (!UsedV)
145 // We don't currently have a way to represent a "does not write" result
146 // and thus have to be conservative and return unknown.
147 return std::nullopt;
148
149 if (UsedIdx)
150 return getForArgument(CB, *UsedIdx, &TLI);
152}
153
154// If the mask for a memory op is a get active lane mask intrinsic
155// we can possibly infer the size of memory written or read
156static std::optional<FixedVectorType *>
158 using namespace llvm::PatternMatch;
159 ConstantInt *Op0, *Op1;
161 m_ConstantInt(Op0), m_ConstantInt(Op1))))
162 return std::nullopt;
163
164 APInt LaneMaskLo = Op0->getValue();
165 APInt LaneMaskHi = Op1->getValue();
166 if (LaneMaskHi.ule(LaneMaskLo))
167 return std::nullopt;
168
169 APInt NumElts = LaneMaskHi - LaneMaskLo;
170 if (NumElts.ugt(Ty->getElementCount().getKnownMinValue())) {
172 return std::nullopt;
173 // Unlike scalable vectors, fixed vector types are guaranteed to handle the
174 // KnownMinValue and can be clamped
175 NumElts = Ty->getElementCount().getKnownMinValue();
176 }
177
178 return FixedVectorType::get(Ty->getElementType(), NumElts.getZExtValue());
179}
180
182 unsigned ArgIdx,
183 const TargetLibraryInfo *TLI) {
184 AAMDNodes AATags = Call->getAAMetadata();
185 const Value *Arg = Call->getArgOperand(ArgIdx);
186
187 // We may be able to produce an exact size for known intrinsics.
189 const DataLayout &DL = II->getDataLayout();
190
191 switch (II->getIntrinsicID()) {
192 default:
193 break;
194 case Intrinsic::memset:
195 case Intrinsic::memcpy:
196 case Intrinsic::memcpy_inline:
197 case Intrinsic::memmove:
198 case Intrinsic::memcpy_element_unordered_atomic:
199 case Intrinsic::memmove_element_unordered_atomic:
200 case Intrinsic::memset_element_unordered_atomic:
201 assert((ArgIdx == 0 || ArgIdx == 1) &&
202 "Invalid argument index for memory intrinsic");
203 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
204 return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
205 AATags);
206 return MemoryLocation::getAfter(Arg, AATags);
207
208 case Intrinsic::experimental_memset_pattern:
209 assert((ArgIdx == 0 || ArgIdx == 1) &&
210 "Invalid argument index for memory intrinsic");
211 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
212 return MemoryLocation(
213 Arg,
215 LenCI->getZExtValue() *
216 DL.getTypeAllocSize(II->getArgOperand(1)->getType())),
217 AATags);
218 return MemoryLocation::getAfter(Arg, AATags);
219
220 case Intrinsic::lifetime_start:
221 case Intrinsic::lifetime_end: {
222 assert(ArgIdx == 0 && "Invalid argument index");
223 auto *AI = dyn_cast<AllocaInst>(Arg);
224 if (!AI)
225 // lifetime of poison value.
227
228 std::optional<TypeSize> AllocSize =
229 AI->getAllocationSize(II->getDataLayout());
230 return MemoryLocation(Arg,
231 AllocSize ? LocationSize::precise(*AllocSize)
233 AATags);
234 }
235
236 case Intrinsic::invariant_start:
237 assert(ArgIdx == 1 && "Invalid argument index");
238 return MemoryLocation(
239 Arg,
241 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue()),
242 AATags);
243
244 case Intrinsic::masked_load: {
245 assert(ArgIdx == 0 && "Invalid argument index");
246
247 auto *Ty = cast<VectorType>(II->getType());
248 if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(2), Ty))
249 return MemoryLocation(Arg, DL.getTypeStoreSize(*KnownType), AATags);
250
251 return MemoryLocation(
252 Arg, LocationSize::upperBound(DL.getTypeStoreSize(Ty)), AATags);
253 }
254 case Intrinsic::masked_store: {
255 assert(ArgIdx == 1 && "Invalid argument index");
256
257 auto *Ty = cast<VectorType>(II->getArgOperand(0)->getType());
258 if (auto KnownType = getKnownTypeFromMaskedOp(II->getOperand(3), Ty))
259 return MemoryLocation(Arg, DL.getTypeStoreSize(*KnownType), AATags);
260
261 return MemoryLocation(
262 Arg, LocationSize::upperBound(DL.getTypeStoreSize(Ty)), AATags);
263 }
264
265 case Intrinsic::invariant_end:
266 // The first argument to an invariant.end is a "descriptor" type (e.g. a
267 // pointer to a empty struct) which is never actually dereferenced.
268 if (ArgIdx == 0)
270 assert(ArgIdx == 2 && "Invalid argument index");
271 return MemoryLocation(
272 Arg,
274 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue()),
275 AATags);
276
277 case Intrinsic::arm_neon_vld1:
278 assert(ArgIdx == 0 && "Invalid argument index");
279 // LLVM's vld1 and vst1 intrinsics currently only support a single
280 // vector register.
281 return MemoryLocation(
282 Arg, LocationSize::precise(DL.getTypeStoreSize(II->getType())),
283 AATags);
284
285 case Intrinsic::arm_neon_vst1:
286 assert(ArgIdx == 0 && "Invalid argument index");
287 return MemoryLocation(Arg,
288 LocationSize::precise(DL.getTypeStoreSize(
289 II->getArgOperand(1)->getType())),
290 AATags);
291 }
292
293 assert(
295 "all memory transfer intrinsics should be handled by the switch above");
296 }
297
298 // We can bound the aliasing properties of memset_pattern16 just as we can
299 // for memcpy/memset. This is particularly important because the
300 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
301 // whenever possible.
302 LibFunc F;
303 if (TLI && TLI->getLibFunc(*Call, F) && TLI->has(F)) {
304 switch (F) {
305 case LibFunc_strcpy:
306 case LibFunc_strcat:
307 case LibFunc_strncat:
308 assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for str function");
309 return MemoryLocation::getAfter(Arg, AATags);
310
311 case LibFunc_memset_chk:
312 assert(ArgIdx == 0 && "Invalid argument index for memset_chk");
313 [[fallthrough]];
314 case LibFunc_memcpy_chk: {
315 assert((ArgIdx == 0 || ArgIdx == 1) &&
316 "Invalid argument index for memcpy_chk");
318 if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
319 // memset_chk writes at most Len bytes, memcpy_chk reads/writes at most
320 // Len bytes. They may read/write less, if Len exceeds the specified max
321 // size and aborts.
322 Size = LocationSize::upperBound(Len->getZExtValue());
323 }
324 return MemoryLocation(Arg, Size, AATags);
325 }
326 case LibFunc_strncpy: {
327 assert((ArgIdx == 0 || ArgIdx == 1) &&
328 "Invalid argument index for strncpy");
330 if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
331 // strncpy is guaranteed to write Len bytes, but only reads up to Len
332 // bytes.
333 Size = ArgIdx == 0 ? LocationSize::precise(Len->getZExtValue())
334 : LocationSize::upperBound(Len->getZExtValue());
335 }
336 return MemoryLocation(Arg, Size, AATags);
337 }
338 case LibFunc_memset_pattern16:
339 case LibFunc_memset_pattern4:
340 case LibFunc_memset_pattern8:
341 assert((ArgIdx == 0 || ArgIdx == 1) &&
342 "Invalid argument index for memset_pattern16");
343 if (ArgIdx == 1) {
344 unsigned Size = 16;
345 if (F == LibFunc_memset_pattern4)
346 Size = 4;
347 else if (F == LibFunc_memset_pattern8)
348 Size = 8;
350 }
351 if (const ConstantInt *LenCI =
352 dyn_cast<ConstantInt>(Call->getArgOperand(2)))
353 return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
354 AATags);
355 return MemoryLocation::getAfter(Arg, AATags);
356 case LibFunc_bcmp:
357 case LibFunc_memcmp:
358 assert((ArgIdx == 0 || ArgIdx == 1) &&
359 "Invalid argument index for memcmp/bcmp");
360 if (const ConstantInt *LenCI =
361 dyn_cast<ConstantInt>(Call->getArgOperand(2)))
362 return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
363 AATags);
364 return MemoryLocation::getAfter(Arg, AATags);
365 case LibFunc_memchr:
366 assert((ArgIdx == 0) && "Invalid argument index for memchr");
367 if (const ConstantInt *LenCI =
368 dyn_cast<ConstantInt>(Call->getArgOperand(2)))
369 return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
370 AATags);
371 return MemoryLocation::getAfter(Arg, AATags);
372 case LibFunc_memccpy:
373 assert((ArgIdx == 0 || ArgIdx == 1) &&
374 "Invalid argument index for memccpy");
375 // We only know an upper bound on the number of bytes read/written.
376 if (const ConstantInt *LenCI =
377 dyn_cast<ConstantInt>(Call->getArgOperand(3)))
378 return MemoryLocation(
379 Arg, LocationSize::upperBound(LenCI->getZExtValue()), AATags);
380 return MemoryLocation::getAfter(Arg, AATags);
381 default:
382 break;
383 };
384 }
385
386 return MemoryLocation::getBeforeOrAfter(Call->getArgOperand(ArgIdx), AATags);
387}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:55
static std::optional< FixedVectorType * > getKnownTypeFromMaskedOp(Value *Mask, VectorType *Ty)
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1182
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
Value * getPointerOperand()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI MemoryEffects getMemoryEffects() const
bool onlyReadsMemory(unsigned OpNo) const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
bool hasOperandBundles() const
Return true if this User has any operand bundles.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Value * getPointerOperand()
static LocationSize precise(uint64_t Value)
static constexpr LocationSize mapEmpty()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
LLVM_ABI void print(raw_ostream &OS) const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static LocationSize upperBound(uint64_t Value)
static constexpr LocationSize mapTombstone()
This is the common base class for memset/memcpy/memmove.
Value * getRawSource() const
Return the arguments to the instruction.
This class wraps the llvm.memcpy/memmove intrinsics.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:224
static MemoryEffectsBase writeOnly()
Definition ModRef.h:130
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static LLVM_ABI MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:296
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:760