LLVM 21.0.0git
VPlanSLP.cpp
Go to the documentation of this file.
1//===- VPlanSLP.cpp - SLP Analysis based on VPlan -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// This file implements SLP analysis based on VPlan. The analysis is based on
9/// the ideas described in
10///
11/// Look-ahead SLP: auto-vectorization in the presence of commutative
12/// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
13/// Luís F. W. Góes
14///
15//===----------------------------------------------------------------------===//
16
17#include "VPlanSLP.h"
18#include "VPlan.h"
19#include "VPlanCFG.h"
20#include "VPlanValue.h"
21#include "llvm/ADT/DenseMap.h"
25#include "llvm/IR/Instruction.h"
27#include "llvm/IR/Type.h"
28#include "llvm/IR/Value.h"
30#include "llvm/Support/Debug.h"
33#include <algorithm>
34#include <cassert>
35#include <optional>
36#include <utility>
37
38using namespace llvm;
39
40#define DEBUG_TYPE "vplan-slp"
41
42// Number of levels to look ahead when re-ordering multi node operands.
43static unsigned LookaheadMaxDepth = 5;
44
45void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region,
46 Old2NewTy &Old2New,
49 Region->getEntry());
50 for (VPBlockBase *Base : RPOT) {
51 visitBlock(Base, Old2New, IAI);
52 }
53}
54
55void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
57 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) {
58 for (VPRecipeBase &VPI : *VPBB) {
59 if (isa<VPWidenPHIRecipe>(&VPI))
60 continue;
61 auto *VPInst = cast<VPInstruction>(&VPI);
62 auto *Inst = dyn_cast_or_null<Instruction>(VPInst->getUnderlyingValue());
63 if (!Inst)
64 continue;
65 auto *IG = IAI.getInterleaveGroup(Inst);
66 if (!IG)
67 continue;
68
69 auto NewIGIter = Old2New.find(IG);
70 if (NewIGIter == Old2New.end())
71 Old2New[IG] = new InterleaveGroup<VPInstruction>(
72 IG->getFactor(), IG->isReverse(), IG->getAlign());
73
74 if (Inst == IG->getInsertPos())
75 Old2New[IG]->setInsertPos(VPInst);
76
77 InterleaveGroupMap[VPInst] = Old2New[IG];
78 InterleaveGroupMap[VPInst]->insertMember(
79 VPInst, IG->getIndex(Inst),
80 Align(IG->isReverse() ? (-1) * int(IG->getFactor())
81 : IG->getFactor()));
82 }
83 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) {
84 visitRegion(Region, Old2New, IAI);
85 } else {
86 llvm_unreachable("Unsupported kind of VPBlock.");
87 }
88}
89
92 Old2NewTy Old2New;
93 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI);
94}
95
96VPInstruction *VPlanSlp::markFailed() {
97 // FIXME: Currently this is used to signal we hit instructions we cannot
98 // trivially SLP'ize.
99 CompletelySLP = false;
100 return nullptr;
101}
102
103void VPlanSlp::addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New) {
104 if (all_of(Operands, [](VPValue *V) {
105 return cast<VPInstruction>(V)->getUnderlyingInstr();
106 })) {
107 unsigned BundleSize = 0;
108 for (VPValue *V : Operands) {
109 Type *T = cast<VPInstruction>(V)->getUnderlyingInstr()->getType();
110 assert(!T->isVectorTy() && "Only scalar types supported for now");
111 BundleSize += T->getScalarSizeInBits();
112 }
113 WidestBundleBits = std::max(WidestBundleBits, BundleSize);
114 }
115
116 auto Res = BundleToCombined.try_emplace(to_vector<4>(Operands), New);
117 assert(Res.second &&
118 "Already created a combined instruction for the operand bundle");
119 (void)Res;
120}
121
122bool VPlanSlp::areVectorizable(ArrayRef<VPValue *> Operands) const {
123 // Currently we only support VPInstructions.
124 if (!all_of(Operands, [](VPValue *Op) {
125 return Op && isa<VPInstruction>(Op) &&
126 cast<VPInstruction>(Op)->getUnderlyingInstr();
127 })) {
128 LLVM_DEBUG(dbgs() << "VPSLP: not all operands are VPInstructions\n");
129 return false;
130 }
131
132 // Check if opcodes and type width agree for all instructions in the bundle.
133 // FIXME: Differing widths/opcodes can be handled by inserting additional
134 // instructions.
135 // FIXME: Deal with non-primitive types.
136 const Instruction *OriginalInstr =
137 cast<VPInstruction>(Operands[0])->getUnderlyingInstr();
138 unsigned Opcode = OriginalInstr->getOpcode();
139 unsigned Width = OriginalInstr->getType()->getPrimitiveSizeInBits();
140 if (!all_of(Operands, [Opcode, Width](VPValue *Op) {
141 const Instruction *I = cast<VPInstruction>(Op)->getUnderlyingInstr();
142 return I->getOpcode() == Opcode &&
143 I->getType()->getPrimitiveSizeInBits() == Width;
144 })) {
145 LLVM_DEBUG(dbgs() << "VPSLP: Opcodes do not agree \n");
146 return false;
147 }
148
149 // For now, all operands must be defined in the same BB.
150 if (any_of(Operands, [this](VPValue *Op) {
151 return cast<VPInstruction>(Op)->getParent() != &this->BB;
152 })) {
153 LLVM_DEBUG(dbgs() << "VPSLP: operands in different BBs\n");
154 return false;
155 }
156
157 if (any_of(Operands,
158 [](VPValue *Op) { return Op->hasMoreThanOneUniqueUser(); })) {
159 LLVM_DEBUG(dbgs() << "VPSLP: Some operands have multiple users.\n");
160 return false;
161 }
162
163 // For loads, check that there are no instructions writing to memory in
164 // between them.
165 // TODO: we only have to forbid instructions writing to memory that could
166 // interfere with any of the loads in the bundle
167 if (Opcode == Instruction::Load) {
168 unsigned LoadsSeen = 0;
169 VPBasicBlock *Parent = cast<VPInstruction>(Operands[0])->getParent();
170 for (auto &I : *Parent) {
171 auto *VPI = dyn_cast<VPInstruction>(&I);
172 if (!VPI)
173 break;
174 if (VPI->getOpcode() == Instruction::Load &&
176 LoadsSeen++;
177
178 if (LoadsSeen == Operands.size())
179 break;
180 if (LoadsSeen > 0 && VPI->mayWriteToMemory()) {
182 dbgs() << "VPSLP: instruction modifying memory between loads\n");
183 return false;
184 }
185 }
186
187 if (!all_of(Operands, [](VPValue *Op) {
188 return cast<LoadInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
189 ->isSimple();
190 })) {
191 LLVM_DEBUG(dbgs() << "VPSLP: only simple loads are supported.\n");
192 return false;
193 }
194 }
195
196 if (Opcode == Instruction::Store)
197 if (!all_of(Operands, [](VPValue *Op) {
198 return cast<StoreInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
199 ->isSimple();
200 })) {
201 LLVM_DEBUG(dbgs() << "VPSLP: only simple stores are supported.\n");
202 return false;
203 }
204
205 return true;
206}
207
209 unsigned OperandIndex) {
211 for (VPValue *V : Values) {
212 // Currently we only support VPInstructions.
213 auto *U = cast<VPInstruction>(V);
214 Operands.push_back(U->getOperand(OperandIndex));
215 }
216 return Operands;
217}
218
221 cast<VPInstruction>(Values[0])->getOpcode());
222}
223
227 auto *VPI = cast<VPInstruction>(Values[0]);
228
229 switch (VPI->getOpcode()) {
230 case Instruction::Load:
231 llvm_unreachable("Loads terminate a tree, no need to get operands");
232 case Instruction::Store:
233 Result.push_back(getOperands(Values, 0));
234 break;
235 default:
236 for (unsigned I = 0, NumOps = VPI->getNumOperands(); I < NumOps; ++I)
237 Result.push_back(getOperands(Values, I));
238 break;
239 }
240
241 return Result;
242}
243
244/// Returns the opcode of Values or ~0 if they do not all agree.
245static std::optional<unsigned> getOpcode(ArrayRef<VPValue *> Values) {
246 unsigned Opcode = cast<VPInstruction>(Values[0])->getOpcode();
247 if (any_of(Values, [Opcode](VPValue *V) {
248 return cast<VPInstruction>(V)->getOpcode() != Opcode;
249 }))
250 return std::nullopt;
251 return {Opcode};
252}
253
254/// Returns true if A and B access sequential memory if they are loads or
255/// stores or if they have identical opcodes otherwise.
258 if (A->getOpcode() != B->getOpcode())
259 return false;
260
261 if (A->getOpcode() != Instruction::Load &&
262 A->getOpcode() != Instruction::Store)
263 return true;
264 auto *GA = IAI.getInterleaveGroup(A);
265 auto *GB = IAI.getInterleaveGroup(B);
266
267 return GA && GB && GA == GB && GA->getIndex(A) + 1 == GB->getIndex(B);
268}
269
270/// Implements getLAScore from Listing 7 in the paper.
271/// Traverses and compares operands of V1 and V2 to MaxLevel.
272static unsigned getLAScore(VPValue *V1, VPValue *V2, unsigned MaxLevel,
274 auto *I1 = dyn_cast<VPInstruction>(V1);
275 auto *I2 = dyn_cast<VPInstruction>(V2);
276 // Currently we only support VPInstructions.
277 if (!I1 || !I2)
278 return 0;
279
280 if (MaxLevel == 0)
281 return (unsigned)areConsecutiveOrMatch(I1, I2, IAI);
282
283 unsigned Score = 0;
284 for (unsigned I = 0, EV1 = I1->getNumOperands(); I < EV1; ++I)
285 for (unsigned J = 0, EV2 = I2->getNumOperands(); J < EV2; ++J)
286 Score +=
287 getLAScore(I1->getOperand(I), I2->getOperand(J), MaxLevel - 1, IAI);
288 return Score;
289}
290
291std::pair<VPlanSlp::OpMode, VPValue *>
292VPlanSlp::getBest(OpMode Mode, VPValue *Last,
293 SmallPtrSetImpl<VPValue *> &Candidates,
295 assert((Mode == OpMode::Load || Mode == OpMode::Opcode) &&
296 "Currently we only handle load and commutative opcodes");
297 LLVM_DEBUG(dbgs() << " getBest\n");
298
299 SmallVector<VPValue *, 4> BestCandidates;
300 LLVM_DEBUG(dbgs() << " Candidates for "
301 << *cast<VPInstruction>(Last)->getUnderlyingInstr() << " ");
302 for (auto *Candidate : Candidates) {
303 auto *LastI = cast<VPInstruction>(Last);
304 auto *CandidateI = cast<VPInstruction>(Candidate);
305 if (areConsecutiveOrMatch(LastI, CandidateI, IAI)) {
306 LLVM_DEBUG(dbgs() << *cast<VPInstruction>(Candidate)->getUnderlyingInstr()
307 << " ");
308 BestCandidates.push_back(Candidate);
309 }
310 }
311 LLVM_DEBUG(dbgs() << "\n");
312
313 if (BestCandidates.empty())
314 return {OpMode::Failed, nullptr};
315
316 if (BestCandidates.size() == 1)
317 return {Mode, BestCandidates[0]};
318
319 VPValue *Best = nullptr;
320 unsigned BestScore = 0;
321 for (unsigned Depth = 1; Depth < LookaheadMaxDepth; Depth++) {
322 unsigned PrevScore = ~0u;
323 bool AllSame = true;
324
325 // FIXME: Avoid visiting the same operands multiple times.
326 for (auto *Candidate : BestCandidates) {
327 unsigned Score = getLAScore(Last, Candidate, Depth, IAI);
328 if (PrevScore == ~0u)
329 PrevScore = Score;
330 if (PrevScore != Score)
331 AllSame = false;
332 PrevScore = Score;
333
334 if (Score > BestScore) {
335 BestScore = Score;
336 Best = Candidate;
337 }
338 }
339 if (!AllSame)
340 break;
341 }
342 LLVM_DEBUG(dbgs() << "Found best "
343 << *cast<VPInstruction>(Best)->getUnderlyingInstr()
344 << "\n");
345 Candidates.erase(Best);
346
347 return {Mode, Best};
348}
349
350SmallVector<VPlanSlp::MultiNodeOpTy, 4> VPlanSlp::reorderMultiNodeOps() {
353 FinalOrder.reserve(MultiNodeOps.size());
354 Mode.reserve(MultiNodeOps.size());
355
356 LLVM_DEBUG(dbgs() << "Reordering multinode\n");
357
358 for (auto &Operands : MultiNodeOps) {
359 FinalOrder.push_back({Operands.first, {Operands.second[0]}});
360 if (cast<VPInstruction>(Operands.second[0])->getOpcode() ==
361 Instruction::Load)
362 Mode.push_back(OpMode::Load);
363 else
364 Mode.push_back(OpMode::Opcode);
365 }
366
367 for (unsigned Lane = 1, E = MultiNodeOps[0].second.size(); Lane < E; ++Lane) {
368 LLVM_DEBUG(dbgs() << " Finding best value for lane " << Lane << "\n");
369 SmallPtrSet<VPValue *, 4> Candidates;
370 LLVM_DEBUG(dbgs() << " Candidates ");
371 for (auto Ops : MultiNodeOps) {
373 dbgs() << *cast<VPInstruction>(Ops.second[Lane])->getUnderlyingInstr()
374 << " ");
375 Candidates.insert(Ops.second[Lane]);
376 }
377 LLVM_DEBUG(dbgs() << "\n");
378
379 for (unsigned Op = 0, E = MultiNodeOps.size(); Op < E; ++Op) {
380 LLVM_DEBUG(dbgs() << " Checking " << Op << "\n");
381 if (Mode[Op] == OpMode::Failed)
382 continue;
383
384 VPValue *Last = FinalOrder[Op].second[Lane - 1];
385 std::pair<OpMode, VPValue *> Res =
386 getBest(Mode[Op], Last, Candidates, IAI);
387 if (Res.second)
388 FinalOrder[Op].second.push_back(Res.second);
389 else
390 // TODO: handle this case
391 FinalOrder[Op].second.push_back(markFailed());
392 }
393 }
394
395 return FinalOrder;
396}
397
398#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
399void VPlanSlp::dumpBundle(ArrayRef<VPValue *> Values) {
400 dbgs() << " Ops: ";
401 for (auto *Op : Values) {
402 if (auto *VPInstr = cast_or_null<VPInstruction>(Op))
403 if (auto *Instr = VPInstr->getUnderlyingInstr()) {
404 dbgs() << *Instr << " | ";
405 continue;
406 }
407 dbgs() << " nullptr | ";
408 }
409 dbgs() << "\n";
410}
411#endif
412
414 assert(!Values.empty() && "Need some operands!");
415
416 // If we already visited this instruction bundle, re-use the existing node
417 auto I = BundleToCombined.find(to_vector<4>(Values));
418 if (I != BundleToCombined.end()) {
419#ifndef NDEBUG
420 // Check that the resulting graph is a tree. If we re-use a node, this means
421 // its values have multiple users. We only allow this, if all users of each
422 // value are the same instruction.
423 for (auto *V : Values) {
424 auto UI = V->user_begin();
425 auto *FirstUser = *UI++;
426 while (UI != V->user_end()) {
427 assert(*UI == FirstUser && "Currently we only support SLP trees.");
428 UI++;
429 }
430 }
431#endif
432 return I->second;
433 }
434
435 // Dump inputs
436 LLVM_DEBUG({
437 dbgs() << "buildGraph: ";
438 dumpBundle(Values);
439 });
440
441 if (!areVectorizable(Values))
442 return markFailed();
443
444 assert(getOpcode(Values) && "Opcodes for all values must match");
445 unsigned ValuesOpcode = *getOpcode(Values);
446
447 SmallVector<VPValue *, 4> CombinedOperands;
448 if (areCommutative(Values)) {
449 bool MultiNodeRoot = !MultiNodeActive;
450 MultiNodeActive = true;
451 for (auto &Operands : getOperands(Values)) {
452 LLVM_DEBUG({
453 dbgs() << " Visiting Commutative";
454 dumpBundle(Operands);
455 });
456
457 auto OperandsOpcode = getOpcode(Operands);
458 if (OperandsOpcode && OperandsOpcode == getOpcode(Values)) {
459 LLVM_DEBUG(dbgs() << " Same opcode, continue building\n");
460 CombinedOperands.push_back(buildGraph(Operands));
461 } else {
462 LLVM_DEBUG(dbgs() << " Adding multinode Ops\n");
463 // Create dummy VPInstruction, which will we replace later by the
464 // re-ordered operand.
465 VPInstruction *Op = new VPInstruction(0, {});
466 CombinedOperands.push_back(Op);
467 MultiNodeOps.emplace_back(Op, Operands);
468 }
469 }
470
471 if (MultiNodeRoot) {
472 LLVM_DEBUG(dbgs() << "Reorder \n");
473 MultiNodeActive = false;
474
475 auto FinalOrder = reorderMultiNodeOps();
476
477 MultiNodeOps.clear();
478 for (auto &Ops : FinalOrder) {
479 VPInstruction *NewOp = buildGraph(Ops.second);
480 Ops.first->replaceAllUsesWith(NewOp);
481 for (unsigned i = 0; i < CombinedOperands.size(); i++)
482 if (CombinedOperands[i] == Ops.first)
483 CombinedOperands[i] = NewOp;
484 delete Ops.first;
485 Ops.first = NewOp;
486 }
487 LLVM_DEBUG(dbgs() << "Found final order\n");
488 }
489 } else {
490 LLVM_DEBUG(dbgs() << " NonCommuntative\n");
491 if (ValuesOpcode == Instruction::Load)
492 for (VPValue *V : Values)
493 CombinedOperands.push_back(cast<VPInstruction>(V)->getOperand(0));
494 else
495 for (auto &Operands : getOperands(Values))
496 CombinedOperands.push_back(buildGraph(Operands));
497 }
498
499 unsigned Opcode;
500 switch (ValuesOpcode) {
501 case Instruction::Load:
502 Opcode = VPInstruction::SLPLoad;
503 break;
504 case Instruction::Store:
506 break;
507 default:
508 Opcode = ValuesOpcode;
509 break;
510 }
511
512 if (!CompletelySLP)
513 return markFailed();
514
515 assert(CombinedOperands.size() > 0 && "Need more some operands");
516 auto *Inst = cast<VPInstruction>(Values[0])->getUnderlyingInstr();
517 auto *VPI = new VPInstruction(Opcode, CombinedOperands, Inst->getDebugLoc());
518
519 LLVM_DEBUG(dbgs() << "Create VPInstruction " << *VPI << " "
520 << *cast<VPInstruction>(Values[0]) << "\n");
521 addCombined(Values, VPI);
522 return VPI;
523}
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:245
static unsigned LookaheadMaxDepth
Definition: VPlanSLP.cpp:43
static bool areConsecutiveOrMatch(VPInstruction *A, VPInstruction *B, VPInterleavedAccessInfo &IAI)
Returns true if A and B access sequential memory if they are loads or stores or if they have identica...
Definition: VPlanSLP.cpp:256
static unsigned getLAScore(VPValue *V1, VPValue *V2, unsigned MaxLevel, VPInterleavedAccessInfo &IAI)
Implements getLAScore from Listing 7 in the paper.
Definition: VPlanSLP.cpp:272
static bool areCommutative(ArrayRef< VPValue * > Values)
Definition: VPlanSLP.cpp:219
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
Definition: VPlanSLP.cpp:208
This file contains the declarations for VPlan-based SLP.
This file contains the declarations of the entities induced by Vectorization Plans,...
This file contains the declarations of the Vectorization Plan base classes:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
This class represents an Operation in the Expression.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:310
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:488
Drive the analysis of interleaved memory accesses in the loop.
Definition: VectorUtils.h:630
InterleaveGroup< Instruction > * getInterleaveGroup(const Instruction *Instr) const
Get the interleave group that Instr belongs to.
Definition: VectorUtils.h:675
BlockT * getEntry() const
Get the entry BasicBlock of the Region.
Definition: RegionInfo.h:320
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
void reserve(size_type N)
Definition: SmallVector.h:663
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:3200
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:78
VPRegionBlock * getParent()
Definition: VPlan.h:170
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:845
VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI)
Definition: VPlanSLP.cpp:90
InterleaveGroup< VPInstruction > * getInterleaveGroup(VPInstruction *Instr) const
Get the interleave group that Instr belongs to.
Definition: VPlanSLP.h:65
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:366
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:3377
VPInstruction * buildGraph(ArrayRef< VPValue * > Operands)
Tries to build an SLP tree rooted at Operands and returns a VPInstruction combining Operands,...
Definition: VPlanSLP.cpp:413
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:3476
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.cpp:1070
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39