LLVM 22.0.0git
InstCombineLoadStoreAlloca.cpp
Go to the documentation of this file.
1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/Loads.h"
19#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/LLVMContext.h"
25using namespace llvm;
26using namespace PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
30STATISTIC(NumDeadStore, "Number of dead stores eliminated");
31STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
32
34 "instcombine-max-copied-from-constant-users", cl::init(300),
35 cl::desc("Maximum users to visit in copy from constant transform"),
37
38/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
39/// pointer to an alloca. Ignore any reads of the pointer, return false if we
40/// see any stores or other unknown uses. If we see pointer arithmetic, keep
41/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
42/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
43/// the alloca, and if the source pointer is a pointer to a constant memory
44/// location, we can optimize this.
45static bool
47 MemTransferInst *&TheCopy,
49 // We track lifetime intrinsics as we encounter them. If we decide to go
50 // ahead and replace the value with the memory location, this lets the caller
51 // quickly eliminate the markers.
52
53 using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
56 Worklist.emplace_back(V, false);
57 while (!Worklist.empty()) {
58 ValueAndIsOffset Elem = Worklist.pop_back_val();
59 if (!Visited.insert(Elem).second)
60 continue;
61 if (Visited.size() > MaxCopiedFromConstantUsers)
62 return false;
63
64 const auto [Value, IsOffset] = Elem;
65 for (auto &U : Value->uses()) {
66 auto *I = cast<Instruction>(U.getUser());
67
68 if (auto *LI = dyn_cast<LoadInst>(I)) {
69 // Ignore non-volatile loads, they are always ok.
70 if (!LI->isSimple()) return false;
71 continue;
72 }
73
74 if (isa<PHINode, SelectInst>(I)) {
75 // We set IsOffset=true, to forbid the memcpy from occurring after the
76 // phi: If one of the phi operands is not based on the alloca, we
77 // would incorrectly omit a write.
78 Worklist.emplace_back(I, true);
79 continue;
80 }
81 if (isa<BitCastInst, AddrSpaceCastInst>(I)) {
82 // If uses of the bitcast are ok, we are ok.
83 Worklist.emplace_back(I, IsOffset);
84 continue;
85 }
86 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
87 // If the GEP has all zero indices, it doesn't offset the pointer. If it
88 // doesn't, it does.
89 Worklist.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
90 continue;
91 }
92
93 if (auto *Call = dyn_cast<CallBase>(I)) {
94 // If this is the function being called then we treat it like a load and
95 // ignore it.
96 if (Call->isCallee(&U))
97 continue;
98
99 unsigned DataOpNo = Call->getDataOperandNo(&U);
100 bool IsArgOperand = Call->isArgOperand(&U);
101
102 // Inalloca arguments are clobbered by the call.
103 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
104 return false;
105
106 // If this call site doesn't modify the memory, then we know it is just
107 // a load (but one that potentially returns the value itself), so we can
108 // ignore it if we know that the value isn't captured.
109 bool NoCapture = Call->doesNotCapture(DataOpNo);
110 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
111 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
112 continue;
113 }
114
115 // Lifetime intrinsics can be handled by the caller.
116 if (I->isLifetimeStartOrEnd()) {
117 assert(I->use_empty() && "Lifetime markers have no result to use!");
118 ToDelete.push_back(I);
119 continue;
120 }
121
122 // If this is isn't our memcpy/memmove, reject it as something we can't
123 // handle.
124 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
125 if (!MI)
126 return false;
127
128 // If the transfer is volatile, reject it.
129 if (MI->isVolatile())
130 return false;
131
132 // If the transfer is using the alloca as a source of the transfer, then
133 // ignore it since it is a load (unless the transfer is volatile).
134 if (U.getOperandNo() == 1)
135 continue;
136
137 // If we already have seen a copy, reject the second one.
138 if (TheCopy) return false;
139
140 // If the pointer has been offset from the start of the alloca, we can't
141 // safely handle this.
142 if (IsOffset) return false;
143
144 // If the memintrinsic isn't using the alloca as the dest, reject it.
145 if (U.getOperandNo() != 0) return false;
146
147 // If the source of the memcpy/move is not constant, reject it.
148 if (isModSet(AA->getModRefInfoMask(MI->getSource())))
149 return false;
150
151 // Otherwise, the transform is safe. Remember the copy instruction.
152 TheCopy = MI;
153 }
154 }
155 return true;
156}
157
158/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
159/// modified by a copy from a constant memory location. If we can prove this, we
160/// can replace any uses of the alloca with uses of the memory location
161/// directly.
162static MemTransferInst *
164 AllocaInst *AI,
166 MemTransferInst *TheCopy = nullptr;
167 if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
168 return TheCopy;
169 return nullptr;
170}
171
172/// Returns true if V is dereferenceable for size of alloca.
173static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
174 const DataLayout &DL) {
175 if (AI->isArrayAllocation())
176 return false;
177 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
178 if (!AllocaSize)
179 return false;
181 APInt(64, AllocaSize), DL);
182}
183
185 AllocaInst &AI, DominatorTree &DT) {
186 // Check for array size of 1 (scalar allocation).
187 if (!AI.isArrayAllocation()) {
188 // i32 1 is the canonical array size for scalar allocations.
189 if (AI.getArraySize()->getType()->isIntegerTy(32))
190 return nullptr;
191
192 // Canonicalize it.
193 return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
194 }
195
196 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
197 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
198 if (C->getValue().getActiveBits() <= 64) {
199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
200 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, AI.getAddressSpace(),
201 nullptr, AI.getName());
202 New->setAlignment(AI.getAlign());
203 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
204
205 replaceAllDbgUsesWith(AI, *New, *New, DT);
206 return IC.replaceInstUsesWith(AI, New);
207 }
208 }
209
210 if (isa<UndefValue>(AI.getArraySize()))
212
213 // Ensure that the alloca array size argument has type equal to the offset
214 // size of the alloca() pointer, which, in the tyical case, is intptr_t,
215 // so that any casting is exposed early.
216 Type *PtrIdxTy = IC.getDataLayout().getIndexType(AI.getType());
217 if (AI.getArraySize()->getType() != PtrIdxTy) {
218 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), PtrIdxTy, false);
219 return IC.replaceOperand(AI, 0, V);
220 }
221
222 return nullptr;
223}
224
225namespace {
226// If I and V are pointers in different address space, it is not allowed to
227// use replaceAllUsesWith since I and V have different types. A
228// non-target-specific transformation should not use addrspacecast on V since
229// the two address space may be disjoint depending on target.
230//
231// This class chases down uses of the old pointer until reaching the load
232// instructions, then replaces the old pointer in the load instructions with
233// the new pointer. If during the chasing it sees bitcast or GEP, it will
234// create new bitcast or GEP with the new pointer and use them in the load
235// instruction.
236class PointerReplacer {
237public:
238 PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)
239 : IC(IC), Root(Root), FromAS(SrcAS) {}
240
241 bool collectUsers();
242 void replacePointer(Value *V);
243
244private:
245 void replace(Instruction *I);
246 Value *getReplacement(Value *V) const { return WorkMap.lookup(V); }
247 bool isAvailable(Instruction *I) const {
248 return I == &Root || UsersToReplace.contains(I);
249 }
250
251 bool isEqualOrValidAddrSpaceCast(const Instruction *I,
252 unsigned FromAS) const {
253 const auto *ASC = dyn_cast<AddrSpaceCastInst>(I);
254 if (!ASC)
255 return false;
256 unsigned ToAS = ASC->getDestAddressSpace();
257 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
258 }
259
263 Instruction &Root;
264 unsigned FromAS;
265};
266} // end anonymous namespace
267
268bool PointerReplacer::collectUsers() {
270 SmallSetVector<Instruction *, 32> ValuesToRevisit;
271
272 auto PushUsersToWorklist = [&](Instruction *Inst) {
273 for (auto *U : Inst->users())
274 if (auto *I = dyn_cast<Instruction>(U))
275 if (!isAvailable(I) && !ValuesToRevisit.contains(I))
276 Worklist.emplace_back(I);
277 };
278
279 auto TryPushInstOperand = [&](Instruction *InstOp) {
280 if (!UsersToReplace.contains(InstOp)) {
281 if (!ValuesToRevisit.insert(InstOp))
282 return false;
283 Worklist.emplace_back(InstOp);
284 }
285 return true;
286 };
287
288 PushUsersToWorklist(&Root);
289 while (!Worklist.empty()) {
290 Instruction *Inst = Worklist.pop_back_val();
291 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
292 if (Load->isVolatile())
293 return false;
294 UsersToReplace.insert(Load);
295 } else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
296 /// TODO: Handle poison and null pointers for PHI and select.
297 // If all incoming values are available, mark this PHI as
298 // replacable and push it's users into the worklist.
299 bool IsReplaceable = true;
300 if (all_of(PHI->incoming_values(), [&](Value *V) {
301 if (!isa<Instruction>(V))
302 return IsReplaceable = false;
303 return isAvailable(cast<Instruction>(V));
304 })) {
305 UsersToReplace.insert(PHI);
306 PushUsersToWorklist(PHI);
307 continue;
308 }
309
310 // Either an incoming value is not an instruction or not all
311 // incoming values are available. If this PHI was already
312 // visited prior to this iteration, return false.
313 if (!IsReplaceable || !ValuesToRevisit.insert(PHI))
314 return false;
315
316 // Push PHI back into the stack, followed by unavailable
317 // incoming values.
318 Worklist.emplace_back(PHI);
319 for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {
320 if (!TryPushInstOperand(cast<Instruction>(PHI->getIncomingValue(Idx))))
321 return false;
322 }
323 } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
324 auto *TrueInst = dyn_cast<Instruction>(SI->getTrueValue());
325 auto *FalseInst = dyn_cast<Instruction>(SI->getFalseValue());
326 if (!TrueInst || !FalseInst)
327 return false;
328
329 if (isAvailable(TrueInst) && isAvailable(FalseInst)) {
330 UsersToReplace.insert(SI);
331 PushUsersToWorklist(SI);
332 continue;
333 }
334
335 // Push select back onto the stack, followed by unavailable true/false
336 // value.
337 Worklist.emplace_back(SI);
338 if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst))
339 return false;
340 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
341 UsersToReplace.insert(GEP);
342 PushUsersToWorklist(GEP);
343 } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
344 if (MI->isVolatile())
345 return false;
346 UsersToReplace.insert(Inst);
347 } else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
348 UsersToReplace.insert(Inst);
349 PushUsersToWorklist(Inst);
350 } else if (Inst->isLifetimeStartOrEnd()) {
351 continue;
352 } else {
353 // TODO: For arbitrary uses with address space mismatches, should we check
354 // if we can introduce a valid addrspacecast?
355 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');
356 return false;
357 }
358 }
359
360 return true;
361}
362
363void PointerReplacer::replacePointer(Value *V) {
364 assert(cast<PointerType>(Root.getType()) != cast<PointerType>(V->getType()) &&
365 "Invalid usage");
366 WorkMap[&Root] = V;
368 SetVector<Instruction *> PostOrderWorklist;
370
371 // Perform a postorder traversal of the users of Root.
372 Worklist.push_back(&Root);
373 while (!Worklist.empty()) {
374 Instruction *I = Worklist.back();
375
376 // If I has not been processed before, push each of its
377 // replacable users into the worklist.
378 if (Visited.insert(I).second) {
379 for (auto *U : I->users()) {
380 auto *UserInst = cast<Instruction>(U);
381 if (UsersToReplace.contains(UserInst) && !Visited.contains(UserInst))
382 Worklist.push_back(UserInst);
383 }
384 // Otherwise, users of I have already been pushed into
385 // the PostOrderWorklist. Push I as well.
386 } else {
387 PostOrderWorklist.insert(I);
388 Worklist.pop_back();
389 }
390 }
391
392 // Replace pointers in reverse-postorder.
393 for (Instruction *I : reverse(PostOrderWorklist))
394 replace(I);
395}
396
397void PointerReplacer::replace(Instruction *I) {
398 if (getReplacement(I))
399 return;
400
401 if (auto *LT = dyn_cast<LoadInst>(I)) {
402 auto *V = getReplacement(LT->getPointerOperand());
403 assert(V && "Operand not replaced");
404 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
405 LT->getAlign(), LT->getOrdering(),
406 LT->getSyncScopeID());
407 NewI->takeName(LT);
408 copyMetadataForLoad(*NewI, *LT);
409
410 IC.InsertNewInstWith(NewI, LT->getIterator());
411 IC.replaceInstUsesWith(*LT, NewI);
412 // LT has actually been replaced by NewI. It is useless to insert LT into
413 // the map. Instead, we insert NewI into the map to indicate this is the
414 // replacement (new value).
415 WorkMap[NewI] = NewI;
416 } else if (auto *PHI = dyn_cast<PHINode>(I)) {
417 // Create a new PHI by replacing any incoming value that is a user of the
418 // root pointer and has a replacement.
419 Value *V = WorkMap.lookup(PHI->getIncomingValue(0));
420 PHI->mutateType(V ? V->getType() : PHI->getIncomingValue(0)->getType());
421 for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {
422 Value *V = WorkMap.lookup(PHI->getIncomingValue(I));
423 PHI->setIncomingValue(I, V ? V : PHI->getIncomingValue(I));
424 }
425 WorkMap[PHI] = PHI;
426 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
427 auto *V = getReplacement(GEP->getPointerOperand());
428 assert(V && "Operand not replaced");
429 SmallVector<Value *, 8> Indices(GEP->indices());
430 auto *NewI =
431 GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
432 IC.InsertNewInstWith(NewI, GEP->getIterator());
433 NewI->takeName(GEP);
434 NewI->setNoWrapFlags(GEP->getNoWrapFlags());
435 WorkMap[GEP] = NewI;
436 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
437 Value *TrueValue = SI->getTrueValue();
438 Value *FalseValue = SI->getFalseValue();
439 if (Value *Replacement = getReplacement(TrueValue))
440 TrueValue = Replacement;
441 if (Value *Replacement = getReplacement(FalseValue))
442 FalseValue = Replacement;
443 auto *NewSI = SelectInst::Create(SI->getCondition(), TrueValue, FalseValue,
444 SI->getName(), nullptr, SI);
445 IC.InsertNewInstWith(NewSI, SI->getIterator());
446 NewSI->takeName(SI);
447 WorkMap[SI] = NewSI;
448 } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
449 auto *DestV = MemCpy->getRawDest();
450 auto *SrcV = MemCpy->getRawSource();
451
452 if (auto *DestReplace = getReplacement(DestV))
453 DestV = DestReplace;
454 if (auto *SrcReplace = getReplacement(SrcV))
455 SrcV = SrcReplace;
456
457 IC.Builder.SetInsertPoint(MemCpy);
458 auto *NewI = IC.Builder.CreateMemTransferInst(
459 MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,
460 MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());
461 AAMDNodes AAMD = MemCpy->getAAMetadata();
462 if (AAMD)
463 NewI->setAAMetadata(AAMD);
464
465 IC.eraseInstFromFunction(*MemCpy);
466 WorkMap[MemCpy] = NewI;
467 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(I)) {
468 auto *V = getReplacement(ASC->getPointerOperand());
469 assert(V && "Operand not replaced");
470 assert(isEqualOrValidAddrSpaceCast(
471 ASC, V->getType()->getPointerAddressSpace()) &&
472 "Invalid address space cast!");
473
474 if (V->getType()->getPointerAddressSpace() !=
475 ASC->getType()->getPointerAddressSpace()) {
476 auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");
477 NewI->takeName(ASC);
478 IC.InsertNewInstWith(NewI, ASC->getIterator());
479 WorkMap[ASC] = NewI;
480 } else {
481 WorkMap[ASC] = V;
482 }
483
484 } else {
485 llvm_unreachable("should never reach here");
486 }
487}
488
490 if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
491 return I;
492
493 if (AI.getAllocatedType()->isSized()) {
494 // Move all alloca's of zero byte objects to the entry block and merge them
495 // together. Note that we only do this for alloca's, because malloc should
496 // allocate and return a unique pointer, even for a zero byte allocation.
498 // For a zero sized alloca there is no point in doing an array allocation.
499 // This is helpful if the array size is a complicated expression not used
500 // elsewhere.
501 if (AI.isArrayAllocation())
502 return replaceOperand(AI, 0,
503 ConstantInt::get(AI.getArraySize()->getType(), 1));
504
505 // Get the first instruction in the entry block.
506 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
507 BasicBlock::iterator FirstInst = EntryBlock.getFirstNonPHIOrDbg();
508 if (&*FirstInst != &AI) {
509 // If the entry block doesn't start with a zero-size alloca then move
510 // this one to the start of the entry block. There is no problem with
511 // dominance as the array size was forced to a constant earlier already.
512 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
513 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
515 .getKnownMinValue() != 0) {
516 AI.moveBefore(FirstInst);
517 return &AI;
518 }
519
520 // Replace this zero-sized alloca with the one at the start of the entry
521 // block after ensuring that the address will be aligned enough for both
522 // types.
523 const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
524 EntryAI->setAlignment(MaxAlign);
525 return replaceInstUsesWith(AI, EntryAI);
526 }
527 }
528 }
529
530 // Check to see if this allocation is only modified by a memcpy/memmove from
531 // a memory location whose alignment is equal to or exceeds that of the
532 // allocation. If this is the case, we can change all users to use the
533 // constant memory location instead. This is commonly produced by the CFE by
534 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
535 // is only subsequently read.
537 if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
538 Value *TheSrc = Copy->getSource();
539 Align AllocaAlign = AI.getAlign();
540 Align SourceAlign = getOrEnforceKnownAlignment(
541 TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
542 if (AllocaAlign <= SourceAlign &&
543 isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
544 !isa<Instruction>(TheSrc)) {
545 // FIXME: Can we sink instructions without violating dominance when TheSrc
546 // is an instruction instead of a constant or argument?
547 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
548 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
549 unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
550 if (AI.getAddressSpace() == SrcAddrSpace) {
551 for (Instruction *Delete : ToDelete)
552 eraseInstFromFunction(*Delete);
553
554 Instruction *NewI = replaceInstUsesWith(AI, TheSrc);
556 ++NumGlobalCopies;
557 return NewI;
558 }
559
560 PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);
561 if (PtrReplacer.collectUsers()) {
562 for (Instruction *Delete : ToDelete)
563 eraseInstFromFunction(*Delete);
564
565 PtrReplacer.replacePointer(TheSrc);
566 ++NumGlobalCopies;
567 }
568 }
569 }
570
571 // At last, use the generic allocation site handler to aggressively remove
572 // unused allocas.
573 return visitAllocSite(AI);
574}
575
576// Are we allowed to form a atomic load or store of this type?
577static bool isSupportedAtomicType(Type *Ty) {
578 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
579}
580
581/// Helper to combine a load to a new type.
582///
583/// This just does the work of combining a load to a new type. It handles
584/// metadata, etc., and returns the new instruction. The \c NewTy should be the
585/// loaded *value* type. This will convert it to a pointer, cast the operand to
586/// that pointer type, load it, etc.
587///
588/// Note that this will create all of the instructions with whatever insert
589/// point the \c InstCombinerImpl currently is using.
591 const Twine &Suffix) {
592 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
593 "can't fold an atomic load to requested type");
594
595 LoadInst *NewLoad =
597 LI.isVolatile(), LI.getName() + Suffix);
598 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
599 copyMetadataForLoad(*NewLoad, LI);
600 return NewLoad;
601}
602
603/// Combine a store to a new type.
604///
605/// Returns the newly created store instruction.
607 Value *V) {
608 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
609 "can't fold an atomic store of requested type");
610
611 Value *Ptr = SI.getPointerOperand();
613 SI.getAllMetadata(MD);
614
615 StoreInst *NewStore =
616 IC.Builder.CreateAlignedStore(V, Ptr, SI.getAlign(), SI.isVolatile());
617 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
618 for (const auto &MDPair : MD) {
619 unsigned ID = MDPair.first;
620 MDNode *N = MDPair.second;
621 // Note, essentially every kind of metadata should be preserved here! This
622 // routine is supposed to clone a store instruction changing *only its
623 // type*. The only metadata it makes sense to drop is metadata which is
624 // invalidated when the pointer type changes. This should essentially
625 // never be the case in LLVM, but we explicitly switch over only known
626 // metadata to be conservatively correct. If you are adding metadata to
627 // LLVM which pertains to stores, you almost certainly want to add it
628 // here.
629 switch (ID) {
630 case LLVMContext::MD_dbg:
631 case LLVMContext::MD_DIAssignID:
632 case LLVMContext::MD_tbaa:
633 case LLVMContext::MD_prof:
634 case LLVMContext::MD_fpmath:
635 case LLVMContext::MD_tbaa_struct:
636 case LLVMContext::MD_alias_scope:
637 case LLVMContext::MD_noalias:
638 case LLVMContext::MD_nontemporal:
639 case LLVMContext::MD_mem_parallel_loop_access:
640 case LLVMContext::MD_access_group:
641 // All of these directly apply.
642 NewStore->setMetadata(ID, N);
643 break;
644 case LLVMContext::MD_invariant_load:
645 case LLVMContext::MD_nonnull:
646 case LLVMContext::MD_noundef:
647 case LLVMContext::MD_range:
648 case LLVMContext::MD_align:
649 case LLVMContext::MD_dereferenceable:
650 case LLVMContext::MD_dereferenceable_or_null:
651 // These don't apply for stores.
652 break;
653 }
654 }
655
656 return NewStore;
657}
658
659/// Combine loads to match the type of their uses' value after looking
660/// through intervening bitcasts.
661///
662/// The core idea here is that if the result of a load is used in an operation,
663/// we should load the type most conducive to that operation. For example, when
664/// loading an integer and converting that immediately to a pointer, we should
665/// instead directly load a pointer.
666///
667/// However, this routine must never change the width of a load or the number of
668/// loads as that would introduce a semantic change. This combine is expected to
669/// be a semantic no-op which just allows loads to more closely model the types
670/// of their consuming operations.
671///
672/// Currently, we also refuse to change the precise type used for an atomic load
673/// or a volatile load. This is debatable, and might be reasonable to change
674/// later. However, it is risky in case some backend or other part of LLVM is
675/// relying on the exact type loaded to select appropriate atomic operations.
677 LoadInst &Load) {
678 // FIXME: We could probably with some care handle both volatile and ordered
679 // atomic loads here but it isn't clear that this is important.
680 if (!Load.isUnordered())
681 return nullptr;
682
683 if (Load.use_empty())
684 return nullptr;
685
686 // swifterror values can't be bitcasted.
687 if (Load.getPointerOperand()->isSwiftError())
688 return nullptr;
689
690 // Fold away bit casts of the loaded value by loading the desired type.
691 // Note that we should not do this for pointer<->integer casts,
692 // because that would result in type punning.
693 if (Load.hasOneUse()) {
694 // Don't transform when the type is x86_amx, it makes the pass that lower
695 // x86_amx type happy.
696 Type *LoadTy = Load.getType();
697 if (auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
698 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
699 if (BC->getType()->isX86_AMXTy())
700 return nullptr;
701 }
702
703 if (auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
704 Type *DestTy = CastUser->getDestTy();
705 if (CastUser->isNoopCast(IC.getDataLayout()) &&
706 LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
707 (!Load.isAtomic() || isSupportedAtomicType(DestTy))) {
708 LoadInst *NewLoad = IC.combineLoadToNewType(Load, DestTy);
709 CastUser->replaceAllUsesWith(NewLoad);
710 IC.eraseInstFromFunction(*CastUser);
711 return &Load;
712 }
713 }
714 }
715
716 // FIXME: We should also canonicalize loads of vectors when their elements are
717 // cast to other types.
718 return nullptr;
719}
720
722 // FIXME: We could probably with some care handle both volatile and atomic
723 // stores here but it isn't clear that this is important.
724 if (!LI.isSimple())
725 return nullptr;
726
727 Type *T = LI.getType();
728 if (!T->isAggregateType())
729 return nullptr;
730
731 StringRef Name = LI.getName();
732
733 if (auto *ST = dyn_cast<StructType>(T)) {
734 // If the struct only have one element, we unpack.
735 auto NumElements = ST->getNumElements();
736 if (NumElements == 1) {
737 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
738 ".unpack");
739 NewLoad->setAAMetadata(LI.getAAMetadata());
740 // Copy invariant metadata from parent load.
741 NewLoad->copyMetadata(LI, LLVMContext::MD_invariant_load);
743 PoisonValue::get(T), NewLoad, 0, Name));
744 }
745
746 // We don't want to break loads with padding here as we'd loose
747 // the knowledge that padding exists for the rest of the pipeline.
748 const DataLayout &DL = IC.getDataLayout();
749 auto *SL = DL.getStructLayout(ST);
750
751 if (SL->hasPadding())
752 return nullptr;
753
754 const auto Align = LI.getAlign();
755 auto *Addr = LI.getPointerOperand();
756 auto *IdxType = DL.getIndexType(Addr->getType());
757
759 for (unsigned i = 0; i < NumElements; i++) {
761 Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
762 Name + ".elt");
763 auto *L = IC.Builder.CreateAlignedLoad(
764 ST->getElementType(i), Ptr,
765 commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue()),
766 Name + ".unpack");
767 // Propagate AA metadata. It'll still be valid on the narrowed load.
768 L->setAAMetadata(LI.getAAMetadata());
769 // Copy invariant metadata from parent load.
770 L->copyMetadata(LI, LLVMContext::MD_invariant_load);
771 V = IC.Builder.CreateInsertValue(V, L, i);
772 }
773
774 V->setName(Name);
775 return IC.replaceInstUsesWith(LI, V);
776 }
777
778 if (auto *AT = dyn_cast<ArrayType>(T)) {
779 auto *ET = AT->getElementType();
780 auto NumElements = AT->getNumElements();
781 if (NumElements == 1) {
782 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
783 NewLoad->setAAMetadata(LI.getAAMetadata());
785 PoisonValue::get(T), NewLoad, 0, Name));
786 }
787
788 // Bail out if the array is too large. Ideally we would like to optimize
789 // arrays of arbitrary size but this has a terrible impact on compile time.
790 // The threshold here is chosen arbitrarily, maybe needs a little bit of
791 // tuning.
792 if (NumElements > IC.MaxArraySizeForCombine)
793 return nullptr;
794
795 const DataLayout &DL = IC.getDataLayout();
796 TypeSize EltSize = DL.getTypeAllocSize(ET);
797 const auto Align = LI.getAlign();
798
799 auto *Addr = LI.getPointerOperand();
800 auto *IdxType = Type::getInt64Ty(T->getContext());
801 auto *Zero = ConstantInt::get(IdxType, 0);
802
805 for (uint64_t i = 0; i < NumElements; i++) {
806 Value *Indices[2] = {
807 Zero,
808 ConstantInt::get(IdxType, i),
809 };
810 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices),
811 Name + ".elt");
812 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
813 auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
814 EltAlign, Name + ".unpack");
815 L->setAAMetadata(LI.getAAMetadata());
816 V = IC.Builder.CreateInsertValue(V, L, i);
817 Offset += EltSize;
818 }
819
820 V->setName(Name);
821 return IC.replaceInstUsesWith(LI, V);
822 }
823
824 return nullptr;
825}
826
827// If we can determine that all possible objects pointed to by the provided
828// pointer value are, not only dereferenceable, but also definitively less than
829// or equal to the provided maximum size, then return true. Otherwise, return
830// false (constant global values and allocas fall into this category).
831//
832// FIXME: This should probably live in ValueTracking (or similar).
834 const DataLayout &DL) {
836 SmallVector<Value *, 4> Worklist(1, V);
837
838 do {
839 Value *P = Worklist.pop_back_val();
840 P = P->stripPointerCasts();
841
842 if (!Visited.insert(P).second)
843 continue;
844
845 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
846 Worklist.push_back(SI->getTrueValue());
847 Worklist.push_back(SI->getFalseValue());
848 continue;
849 }
850
851 if (PHINode *PN = dyn_cast<PHINode>(P)) {
852 append_range(Worklist, PN->incoming_values());
853 continue;
854 }
855
856 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
857 if (GA->isInterposable())
858 return false;
859 Worklist.push_back(GA->getAliasee());
860 continue;
861 }
862
863 // If we know how big this object is, and it is less than MaxSize, continue
864 // searching. Otherwise, return false.
865 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
866 if (!AI->getAllocatedType()->isSized())
867 return false;
868
869 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
870 if (!CS)
871 return false;
872
873 TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
874 if (TS.isScalable())
875 return false;
876 // Make sure that, even if the multiplication below would wrap as an
877 // uint64_t, we still do the right thing.
878 if ((CS->getValue().zext(128) * APInt(128, TS.getFixedValue()))
879 .ugt(MaxSize))
880 return false;
881 continue;
882 }
883
884 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
885 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
886 return false;
887
888 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
889 if (InitSize > MaxSize)
890 return false;
891 continue;
892 }
893
894 return false;
895 } while (!Worklist.empty());
896
897 return true;
898}
899
900// If we're indexing into an object of a known size, and the outer index is
901// not a constant, but having any value but zero would lead to undefined
902// behavior, replace it with zero.
903//
904// For example, if we have:
905// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
906// ...
907// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
908// ... = load i32* %arrayidx, align 4
909// Then we know that we can replace %x in the GEP with i64 0.
910//
911// FIXME: We could fold any GEP index to zero that would cause UB if it were
912// not zero. Currently, we only handle the first such index. Also, we could
913// also search through non-zero constant indices if we kept track of the
914// offsets those indices implied.
916 GetElementPtrInst *GEPI, Instruction *MemI,
917 unsigned &Idx) {
918 if (GEPI->getNumOperands() < 2)
919 return false;
920
921 // Find the first non-zero index of a GEP. If all indices are zero, return
922 // one past the last index.
923 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
924 unsigned I = 1;
925 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
926 Value *V = GEPI->getOperand(I);
927 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
928 if (CI->isZero())
929 continue;
930
931 break;
932 }
933
934 return I;
935 };
936
937 // Skip through initial 'zero' indices, and find the corresponding pointer
938 // type. See if the next index is not a constant.
939 Idx = FirstNZIdx(GEPI);
940 if (Idx == GEPI->getNumOperands())
941 return false;
942 if (isa<Constant>(GEPI->getOperand(Idx)))
943 return false;
944
945 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
946 Type *SourceElementType = GEPI->getSourceElementType();
947 // Size information about scalable vectors is not available, so we cannot
948 // deduce whether indexing at n is undefined behaviour or not. Bail out.
949 if (SourceElementType->isScalableTy())
950 return false;
951
952 Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
953 if (!AllocTy || !AllocTy->isSized())
954 return false;
955 const DataLayout &DL = IC.getDataLayout();
956 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();
957
958 // If there are more indices after the one we might replace with a zero, make
959 // sure they're all non-negative. If any of them are negative, the overall
960 // address being computed might be before the base address determined by the
961 // first non-zero index.
962 auto IsAllNonNegative = [&]() {
963 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
964 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), MemI);
965 if (Known.isNonNegative())
966 continue;
967 return false;
968 }
969
970 return true;
971 };
972
973 // FIXME: If the GEP is not inbounds, and there are extra indices after the
974 // one we'll replace, those could cause the address computation to wrap
975 // (rendering the IsAllNonNegative() check below insufficient). We can do
976 // better, ignoring zero indices (and other indices we can prove small
977 // enough not to wrap).
978 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
979 return false;
980
981 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
982 // also known to be dereferenceable.
983 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
984 IsAllNonNegative();
985}
986
987// If we're indexing into an object with a variable index for the memory
988// access, but the object has only one element, we can assume that the index
989// will always be zero. If we replace the GEP, return it.
991 Instruction &MemI) {
992 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
993 unsigned Idx;
994 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
995 Instruction *NewGEPI = GEPI->clone();
996 NewGEPI->setOperand(Idx,
997 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
998 IC.InsertNewInstBefore(NewGEPI, GEPI->getIterator());
999 return NewGEPI;
1000 }
1001 }
1002
1003 return nullptr;
1004}
1005
1007 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1008 return false;
1009
1010 auto *Ptr = SI.getPointerOperand();
1011 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
1012 Ptr = GEPI->getOperand(0);
1013 return (isa<ConstantPointerNull>(Ptr) &&
1014 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
1015}
1016
1018 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
1019 const Value *GEPI0 = GEPI->getOperand(0);
1020 if (isa<ConstantPointerNull>(GEPI0) &&
1021 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
1022 return true;
1023 }
1024 if (isa<UndefValue>(Op) ||
1025 (isa<ConstantPointerNull>(Op) &&
1027 return true;
1028 return false;
1029}
1030
1031Value *InstCombinerImpl::simplifyNonNullOperand(Value *V,
1032 bool HasDereferenceable,
1033 unsigned Depth) {
1034 if (auto *Sel = dyn_cast<SelectInst>(V)) {
1035 if (isa<ConstantPointerNull>(Sel->getOperand(1)))
1036 return Sel->getOperand(2);
1037
1038 if (isa<ConstantPointerNull>(Sel->getOperand(2)))
1039 return Sel->getOperand(1);
1040 }
1041
1042 if (!V->hasOneUse())
1043 return nullptr;
1044
1045 constexpr unsigned RecursionLimit = 3;
1046 if (Depth == RecursionLimit)
1047 return nullptr;
1048
1049 if (auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
1050 if (HasDereferenceable || GEP->isInBounds()) {
1051 if (auto *Res = simplifyNonNullOperand(GEP->getPointerOperand(),
1052 HasDereferenceable, Depth + 1)) {
1053 replaceOperand(*GEP, 0, Res);
1055 return nullptr;
1056 }
1057 }
1058 }
1059
1060 if (auto *PHI = dyn_cast<PHINode>(V)) {
1061 bool Changed = false;
1062 for (Use &U : PHI->incoming_values()) {
1063 // We set Depth to RecursionLimit to avoid expensive recursion.
1064 if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable,
1065 RecursionLimit)) {
1066 replaceUse(U, Res);
1067 Changed = true;
1068 }
1069 }
1070 if (Changed)
1072 return nullptr;
1073 }
1074
1075 return nullptr;
1076}
1077
1079 Value *Op = LI.getOperand(0);
1080 if (Value *Res = simplifyLoadInst(&LI, Op, SQ.getWithInstruction(&LI)))
1081 return replaceInstUsesWith(LI, Res);
1082
1083 // Try to canonicalize the loaded type.
1084 if (Instruction *Res = combineLoadToOperationType(*this, LI))
1085 return Res;
1086
1087 // Replace GEP indices if possible.
1088 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI))
1089 return replaceOperand(LI, 0, NewGEPI);
1090
1091 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1092 return Res;
1093
1094 // Do really simple store-to-load forwarding and load CSE, to catch cases
1095 // where there are several consecutive memory accesses to the same location,
1096 // separated by a few arithmetic operations.
1097 bool IsLoadCSE = false;
1098 BatchAAResults BatchAA(*AA);
1099 if (Value *AvailableVal = FindAvailableLoadedValue(&LI, BatchAA, &IsLoadCSE)) {
1100 if (IsLoadCSE)
1101 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1102
1103 return replaceInstUsesWith(
1104 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1105 LI.getName() + ".cast"));
1106 }
1107
1108 // None of the following transforms are legal for volatile/ordered atomic
1109 // loads. Most of them do apply for unordered atomics.
1110 if (!LI.isUnordered()) return nullptr;
1111
1112 // load(gep null, ...) -> unreachable
1113 // load null/undef -> unreachable
1114 // TODO: Consider a target hook for valid address spaces for this xforms.
1115 if (canSimplifyNullLoadOrGEP(LI, Op)) {
1118 }
1119
1120 if (Op->hasOneUse()) {
1121 // Change select and PHI nodes to select values instead of addresses: this
1122 // helps alias analysis out a lot, allows many others simplifications, and
1123 // exposes redundancy in the code.
1124 //
1125 // Note that we cannot do the transformation unless we know that the
1126 // introduced loads cannot trap! Something like this is valid as long as
1127 // the condition is always false: load (select bool %C, int* null, int* %G),
1128 // but it would not be valid if we transformed it to load from null
1129 // unconditionally.
1130 //
1131 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1132 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1133 Align Alignment = LI.getAlign();
1134 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1135 Alignment, DL, SI) &&
1136 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1137 Alignment, DL, SI)) {
1138 LoadInst *V1 =
1139 Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1140 SI->getOperand(1)->getName() + ".val");
1141 LoadInst *V2 =
1142 Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1143 SI->getOperand(2)->getName() + ".val");
1144 assert(LI.isUnordered() && "implied by above");
1145 V1->setAlignment(Alignment);
1146 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1147 V2->setAlignment(Alignment);
1148 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1149 // It is safe to copy any metadata that does not trigger UB. Copy any
1150 // poison-generating metadata.
1153 return SelectInst::Create(SI->getCondition(), V1, V2);
1154 }
1155 }
1156 }
1157
1159 if (Value *V = simplifyNonNullOperand(Op, /*HasDereferenceable=*/true))
1160 return replaceOperand(LI, 0, V);
1161
1162 return nullptr;
1163}
1164
1165/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1166///
1167/// \returns underlying value that was "cast", or nullptr otherwise.
1168///
1169/// For example, if we have:
1170///
1171/// %E0 = extractelement <2 x double> %U, i32 0
1172/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1173/// %E1 = extractelement <2 x double> %U, i32 1
1174/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1175///
1176/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1177/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1178/// Note that %U may contain non-undef values where %V1 has undef.
1180 Value *U = nullptr;
1181 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1182 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1183 if (!E)
1184 return nullptr;
1185 auto *W = E->getVectorOperand();
1186 if (!U)
1187 U = W;
1188 else if (U != W)
1189 return nullptr;
1190 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1191 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1192 return nullptr;
1193 V = IV->getAggregateOperand();
1194 }
1195 if (!match(V, m_Undef()) || !U)
1196 return nullptr;
1197
1198 auto *UT = cast<VectorType>(U->getType());
1199 auto *VT = V->getType();
1200 // Check that types UT and VT are bitwise isomorphic.
1201 const auto &DL = IC.getDataLayout();
1202 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1203 return nullptr;
1204 }
1205 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1206 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1207 return nullptr;
1208 } else {
1209 auto *ST = cast<StructType>(VT);
1210 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1211 return nullptr;
1212 for (const auto *EltT : ST->elements()) {
1213 if (EltT != UT->getElementType())
1214 return nullptr;
1215 }
1216 }
1217 return U;
1218}
1219
1220/// Combine stores to match the type of value being stored.
1221///
1222/// The core idea here is that the memory does not have any intrinsic type and
1223/// where we can we should match the type of a store to the type of value being
1224/// stored.
1225///
1226/// However, this routine must never change the width of a store or the number of
1227/// stores as that would introduce a semantic change. This combine is expected to
1228/// be a semantic no-op which just allows stores to more closely model the types
1229/// of their incoming values.
1230///
1231/// Currently, we also refuse to change the precise type used for an atomic or
1232/// volatile store. This is debatable, and might be reasonable to change later.
1233/// However, it is risky in case some backend or other part of LLVM is relying
1234/// on the exact type stored to select appropriate atomic operations.
1235///
1236/// \returns true if the store was successfully combined away. This indicates
1237/// the caller must erase the store instruction. We have to let the caller erase
1238/// the store instruction as otherwise there is no way to signal whether it was
1239/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1241 // FIXME: We could probably with some care handle both volatile and ordered
1242 // atomic stores here but it isn't clear that this is important.
1243 if (!SI.isUnordered())
1244 return false;
1245
1246 // swifterror values can't be bitcasted.
1247 if (SI.getPointerOperand()->isSwiftError())
1248 return false;
1249
1250 Value *V = SI.getValueOperand();
1251
1252 // Fold away bit casts of the stored value by storing the original type.
1253 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1254 assert(!BC->getType()->isX86_AMXTy() &&
1255 "store to x86_amx* should not happen!");
1256 V = BC->getOperand(0);
1257 // Don't transform when the type is x86_amx, it makes the pass that lower
1258 // x86_amx type happy.
1259 if (V->getType()->isX86_AMXTy())
1260 return false;
1261 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1262 combineStoreToNewValue(IC, SI, V);
1263 return true;
1264 }
1265 }
1266
1267 if (Value *U = likeBitCastFromVector(IC, V))
1268 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1269 combineStoreToNewValue(IC, SI, U);
1270 return true;
1271 }
1272
1273 // FIXME: We should also canonicalize stores of vectors when their elements
1274 // are cast to other types.
1275 return false;
1276}
1277
1279 // FIXME: We could probably with some care handle both volatile and atomic
1280 // stores here but it isn't clear that this is important.
1281 if (!SI.isSimple())
1282 return false;
1283
1284 Value *V = SI.getValueOperand();
1285 Type *T = V->getType();
1286
1287 if (!T->isAggregateType())
1288 return false;
1289
1290 if (auto *ST = dyn_cast<StructType>(T)) {
1291 // If the struct only have one element, we unpack.
1292 unsigned Count = ST->getNumElements();
1293 if (Count == 1) {
1294 V = IC.Builder.CreateExtractValue(V, 0);
1295 combineStoreToNewValue(IC, SI, V);
1296 return true;
1297 }
1298
1299 // We don't want to break loads with padding here as we'd loose
1300 // the knowledge that padding exists for the rest of the pipeline.
1301 const DataLayout &DL = IC.getDataLayout();
1302 auto *SL = DL.getStructLayout(ST);
1303
1304 if (SL->hasPadding())
1305 return false;
1306
1307 const auto Align = SI.getAlign();
1308
1309 SmallString<16> EltName = V->getName();
1310 EltName += ".elt";
1311 auto *Addr = SI.getPointerOperand();
1312 SmallString<16> AddrName = Addr->getName();
1313 AddrName += ".repack";
1314
1315 auto *IdxType = DL.getIndexType(Addr->getType());
1316 for (unsigned i = 0; i < Count; i++) {
1317 auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
1318 Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
1319 AddrName);
1320 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1321 auto EltAlign =
1322 commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue());
1323 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1324 NS->setAAMetadata(SI.getAAMetadata());
1325 }
1326
1327 return true;
1328 }
1329
1330 if (auto *AT = dyn_cast<ArrayType>(T)) {
1331 // If the array only have one element, we unpack.
1332 auto NumElements = AT->getNumElements();
1333 if (NumElements == 1) {
1334 V = IC.Builder.CreateExtractValue(V, 0);
1335 combineStoreToNewValue(IC, SI, V);
1336 return true;
1337 }
1338
1339 // Bail out if the array is too large. Ideally we would like to optimize
1340 // arrays of arbitrary size but this has a terrible impact on compile time.
1341 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1342 // tuning.
1343 if (NumElements > IC.MaxArraySizeForCombine)
1344 return false;
1345
1346 const DataLayout &DL = IC.getDataLayout();
1347 TypeSize EltSize = DL.getTypeAllocSize(AT->getElementType());
1348 const auto Align = SI.getAlign();
1349
1350 SmallString<16> EltName = V->getName();
1351 EltName += ".elt";
1352 auto *Addr = SI.getPointerOperand();
1353 SmallString<16> AddrName = Addr->getName();
1354 AddrName += ".repack";
1355
1356 auto *IdxType = Type::getInt64Ty(T->getContext());
1357 auto *Zero = ConstantInt::get(IdxType, 0);
1358
1360 for (uint64_t i = 0; i < NumElements; i++) {
1361 Value *Indices[2] = {
1362 Zero,
1363 ConstantInt::get(IdxType, i),
1364 };
1365 auto *Ptr =
1366 IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices), AddrName);
1367 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1368 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
1369 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1370 NS->setAAMetadata(SI.getAAMetadata());
1371 Offset += EltSize;
1372 }
1373
1374 return true;
1375 }
1376
1377 return false;
1378}
1379
1380/// equivalentAddressValues - Test if A and B will obviously have the same
1381/// value. This includes recognizing that %t0 and %t1 will have the same
1382/// value in code like this:
1383/// %t0 = getelementptr \@a, 0, 3
1384/// store i32 0, i32* %t0
1385/// %t1 = getelementptr \@a, 0, 3
1386/// %t2 = load i32* %t1
1387///
1389 // Test if the values are trivially equivalent.
1390 if (A == B) return true;
1391
1392 // Test if the values come form identical arithmetic instructions.
1393 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1394 // its only used to compare two uses within the same basic block, which
1395 // means that they'll always either have the same value or one of them
1396 // will have an undefined value.
1397 if (isa<BinaryOperator>(A) ||
1398 isa<CastInst>(A) ||
1399 isa<PHINode>(A) ||
1400 isa<GetElementPtrInst>(A))
1401 if (Instruction *BI = dyn_cast<Instruction>(B))
1402 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1403 return true;
1404
1405 // Otherwise they may not be equivalent.
1406 return false;
1407}
1408
1410 Value *Val = SI.getOperand(0);
1411 Value *Ptr = SI.getOperand(1);
1412
1413 // Try to canonicalize the stored type.
1414 if (combineStoreToValueType(*this, SI))
1415 return eraseInstFromFunction(SI);
1416
1417 // Try to canonicalize the stored type.
1418 if (unpackStoreToAggregate(*this, SI))
1419 return eraseInstFromFunction(SI);
1420
1421 // Replace GEP indices if possible.
1422 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI))
1423 return replaceOperand(SI, 1, NewGEPI);
1424
1425 // Don't hack volatile/ordered stores.
1426 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1427 if (!SI.isUnordered()) return nullptr;
1428
1429 // If the RHS is an alloca with a single use, zapify the store, making the
1430 // alloca dead.
1431 if (Ptr->hasOneUse()) {
1432 if (isa<AllocaInst>(Ptr))
1433 return eraseInstFromFunction(SI);
1434 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1435 if (isa<AllocaInst>(GEP->getOperand(0))) {
1436 if (GEP->getOperand(0)->hasOneUse())
1437 return eraseInstFromFunction(SI);
1438 }
1439 }
1440 }
1441
1442 // If we have a store to a location which is known constant, we can conclude
1443 // that the store must be storing the constant value (else the memory
1444 // wouldn't be constant), and this must be a noop.
1446 return eraseInstFromFunction(SI);
1447
1448 // Do really simple DSE, to catch cases where there are several consecutive
1449 // stores to the same location, separated by a few arithmetic operations. This
1450 // situation often occurs with bitfield accesses.
1451 BasicBlock::iterator BBI(SI);
1452 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1453 --ScanInsts) {
1454 --BBI;
1455 // Don't count debug info directives, lest they affect codegen,
1456 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1457 if (BBI->isDebugOrPseudoInst()) {
1458 ScanInsts++;
1459 continue;
1460 }
1461
1462 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1463 // Prev store isn't volatile, and stores to the same location?
1464 if (PrevSI->isUnordered() &&
1465 equivalentAddressValues(PrevSI->getOperand(1), SI.getOperand(1)) &&
1466 PrevSI->getValueOperand()->getType() ==
1467 SI.getValueOperand()->getType()) {
1468 ++NumDeadStore;
1469 // Manually add back the original store to the worklist now, so it will
1470 // be processed after the operands of the removed store, as this may
1471 // expose additional DSE opportunities.
1472 Worklist.push(&SI);
1473 eraseInstFromFunction(*PrevSI);
1474 return nullptr;
1475 }
1476 break;
1477 }
1478
1479 // If this is a load, we have to stop. However, if the loaded value is from
1480 // the pointer we're loading and is producing the pointer we're storing,
1481 // then *this* store is dead (X = load P; store X -> P).
1482 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1483 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1484 assert(SI.isUnordered() && "can't eliminate ordering operation");
1485 return eraseInstFromFunction(SI);
1486 }
1487
1488 // Otherwise, this is a load from some other location. Stores before it
1489 // may not be dead.
1490 break;
1491 }
1492
1493 // Don't skip over loads, throws or things that can modify memory.
1494 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1495 break;
1496 }
1497
1498 // store X, null -> turns into 'unreachable' in SimplifyCFG
1499 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1500 if (canSimplifyNullStoreOrGEP(SI)) {
1501 if (!isa<PoisonValue>(Val))
1502 return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1503 return nullptr; // Do not modify these!
1504 }
1505
1506 // This is a non-terminator unreachable marker. Don't remove it.
1507 if (isa<UndefValue>(Ptr)) {
1508 // Remove guaranteed-to-transfer instructions before the marker.
1510
1511 // Remove all instructions after the marker and handle dead blocks this
1512 // implies.
1514 handleUnreachableFrom(SI.getNextNode(), Worklist);
1516 return nullptr;
1517 }
1518
1519 // store undef, Ptr -> noop
1520 // FIXME: This is technically incorrect because it might overwrite a poison
1521 // value. Change to PoisonValue once #52930 is resolved.
1522 if (isa<UndefValue>(Val))
1523 return eraseInstFromFunction(SI);
1524
1525 if (!NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1526 if (Value *V = simplifyNonNullOperand(Ptr, /*HasDereferenceable=*/true))
1527 return replaceOperand(SI, 1, V);
1528
1529 return nullptr;
1530}
1531
1532/// Try to transform:
1533/// if () { *P = v1; } else { *P = v2 }
1534/// or:
1535/// *P = v1; if () { *P = v2; }
1536/// into a phi node with a store in the successor.
1538 if (!SI.isUnordered())
1539 return false; // This code has not been audited for volatile/ordered case.
1540
1541 // Check if the successor block has exactly 2 incoming edges.
1542 BasicBlock *StoreBB = SI.getParent();
1543 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1544 if (!DestBB->hasNPredecessors(2))
1545 return false;
1546
1547 // Capture the other block (the block that doesn't contain our store).
1548 pred_iterator PredIter = pred_begin(DestBB);
1549 if (*PredIter == StoreBB)
1550 ++PredIter;
1551 BasicBlock *OtherBB = *PredIter;
1552
1553 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1554 // for example, if SI is in an infinite loop.
1555 if (StoreBB == DestBB || OtherBB == DestBB)
1556 return false;
1557
1558 // Verify that the other block ends in a branch and is not otherwise empty.
1559 BasicBlock::iterator BBI(OtherBB->getTerminator());
1560 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1561 if (!OtherBr || BBI == OtherBB->begin())
1562 return false;
1563
1564 auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {
1565 if (!OtherStore ||
1566 OtherStore->getPointerOperand() != SI.getPointerOperand())
1567 return false;
1568
1569 auto *SIVTy = SI.getValueOperand()->getType();
1570 auto *OSVTy = OtherStore->getValueOperand()->getType();
1571 return CastInst::isBitOrNoopPointerCastable(OSVTy, SIVTy, DL) &&
1572 SI.hasSameSpecialState(OtherStore);
1573 };
1574
1575 // If the other block ends in an unconditional branch, check for the 'if then
1576 // else' case. There is an instruction before the branch.
1577 StoreInst *OtherStore = nullptr;
1578 if (OtherBr->isUnconditional()) {
1579 --BBI;
1580 // Skip over debugging info and pseudo probes.
1581 while (BBI->isDebugOrPseudoInst()) {
1582 if (BBI==OtherBB->begin())
1583 return false;
1584 --BBI;
1585 }
1586 // If this isn't a store, isn't a store to the same location, or is not the
1587 // right kind of store, bail out.
1588 OtherStore = dyn_cast<StoreInst>(BBI);
1589 if (!OtherStoreIsMergeable(OtherStore))
1590 return false;
1591 } else {
1592 // Otherwise, the other block ended with a conditional branch. If one of the
1593 // destinations is StoreBB, then we have the if/then case.
1594 if (OtherBr->getSuccessor(0) != StoreBB &&
1595 OtherBr->getSuccessor(1) != StoreBB)
1596 return false;
1597
1598 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1599 // if/then triangle. See if there is a store to the same ptr as SI that
1600 // lives in OtherBB.
1601 for (;; --BBI) {
1602 // Check to see if we find the matching store.
1603 OtherStore = dyn_cast<StoreInst>(BBI);
1604 if (OtherStoreIsMergeable(OtherStore))
1605 break;
1606
1607 // If we find something that may be using or overwriting the stored
1608 // value, or if we run out of instructions, we can't do the transform.
1609 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1610 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1611 return false;
1612 }
1613
1614 // In order to eliminate the store in OtherBr, we have to make sure nothing
1615 // reads or overwrites the stored value in StoreBB.
1616 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1617 // FIXME: This should really be AA driven.
1618 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1619 return false;
1620 }
1621 }
1622
1623 // Insert a PHI node now if we need it.
1624 Value *MergedVal = OtherStore->getValueOperand();
1625 // The debug locations of the original instructions might differ. Merge them.
1626 DebugLoc MergedLoc =
1627 DebugLoc::getMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc());
1628 if (MergedVal != SI.getValueOperand()) {
1629 PHINode *PN =
1630 PHINode::Create(SI.getValueOperand()->getType(), 2, "storemerge");
1631 PN->addIncoming(SI.getValueOperand(), SI.getParent());
1632 Builder.SetInsertPoint(OtherStore);
1633 PN->addIncoming(Builder.CreateBitOrPointerCast(MergedVal, PN->getType()),
1634 OtherBB);
1635 MergedVal = InsertNewInstBefore(PN, DestBB->begin());
1636 PN->setDebugLoc(MergedLoc);
1637 }
1638
1639 // Advance to a place where it is safe to insert the new store and insert it.
1640 BBI = DestBB->getFirstInsertionPt();
1641 StoreInst *NewSI =
1642 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1643 SI.getOrdering(), SI.getSyncScopeID());
1644 InsertNewInstBefore(NewSI, BBI);
1645 NewSI->setDebugLoc(MergedLoc);
1646 NewSI->mergeDIAssignID({&SI, OtherStore});
1647
1648 // If the two stores had AA tags, merge them.
1649 AAMDNodes AATags = SI.getAAMetadata();
1650 if (AATags)
1651 NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1652
1653 // Nuke the old stores.
1655 eraseInstFromFunction(*OtherStore);
1656 return true;
1657}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
std::string Name
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
@ RecursionLimit
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
#define P(N)
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static const uint32_t IV[8]
Definition: blake3_impl.h:83
A private abstract base class describing the concept of an individual alias analysis implementation.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
Definition: APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:1012
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:143
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:132
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:354
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:459
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:154
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:877
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:124
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Definition: DebugLoc.cpp:183
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:973
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1830
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2625
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1864
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2618
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1931
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2286
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1847
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:130
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2277
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1883
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Definition: IRBuilder.h:2041
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
const DataLayout & DL
Definition: InstCombiner.h:76
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Definition: InstCombiner.h:433
AssumptionCache & AC
Definition: InstCombiner.h:73
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
BuilderTy & Builder
Definition: InstCombiner.h:61
void push(Instruction *I)
Push the instruction onto the worklist stack.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:901
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1804
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1718
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1789
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:510
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
An instruction for reading from memory.
Definition: Instructions.h:180
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:265
void setAlignment(Align Align)
Definition: Instructions.h:219
Value * getPointerOperand()
Definition: Instructions.h:259
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:209
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:245
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:224
bool isUnordered() const
Definition: Instructions.h:253
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:234
bool isSimple() const
Definition: Instructions.h:251
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
Metadata node.
Definition: Metadata.h:1077
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
This class wraps the llvm.memcpy/memmove intrinsics.
static constexpr const unsigned PoisonGeneratingIDs[]
Metadata IDs that may generate poison.
Definition: Metadata.h:145
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:59
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:168
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
Definition: SetVector.h:269
size_type size() const
Definition: SmallPtrSet.h:99
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:476
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:356
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:82
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
Value * getValueOperand()
Definition: Instructions.h:383
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:369
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
static constexpr TypeSize getZero()
Definition: TypeSize.h:352
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:311
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:270
bool isX86_AMXTy() const
Return true if this is X86 AMX.
Definition: Type.h:200
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
iterator_range< use_iterator > uses()
Definition: Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
LLVM_ABI bool isAvailable()
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:232
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3090
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2155
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition: Loads.cpp:538
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1566
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:49
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1172
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:431
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2414
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3081
void replace(R &&Range, const T &OldValue, const T &NewValue)
Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1879
auto pred_begin(const MachineBasicBlock *BB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:101
SimplifyQuery getWithInstruction(const Instruction *I) const