LLVM 22.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/Use.h"
36#include "llvm/IR/User.h"
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <optional>
45
46namespace llvm {
47
48class APFloat;
49class APInt;
50class BasicBlock;
51class ConstantInt;
52class DataLayout;
53struct KnownBits;
54class StringRef;
55class Type;
56class Value;
57class UnreachableInst;
58
59//===----------------------------------------------------------------------===//
60// AllocaInst Class
61//===----------------------------------------------------------------------===//
62
63/// an instruction to allocate memory on the stack
65 Type *AllocatedType;
66
67 using AlignmentField = AlignmentBitfieldElementT<0>;
68 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
70 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
71 SwiftErrorField>(),
72 "Bitfields must be contiguous");
73
74protected:
75 // Note: Instruction needs to be a friend here to call cloneImpl.
76 friend class Instruction;
77
79
80public:
81 LLVM_ABI explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, InsertPosition InsertBefore);
83
84 LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 InsertPosition InsertBefore);
86
87 LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
88 Align Align, const Twine &Name = "",
89 InsertPosition InsertBefore = nullptr);
90
91 /// Return true if there is an allocation size parameter to the allocation
92 /// instruction that is not 1.
93 LLVM_ABI bool isArrayAllocation() const;
94
95 /// Get the number of elements allocated. For a simple allocation of a single
96 /// element, this will return a constant 1 value.
97 const Value *getArraySize() const { return getOperand(0); }
98 Value *getArraySize() { return getOperand(0); }
99
100 /// Overload to return most specific pointer type.
102 return cast<PointerType>(Instruction::getType());
103 }
104
105 /// Return the address space for the allocation.
106 unsigned getAddressSpace() const {
107 return getType()->getAddressSpace();
108 }
109
110 /// Get allocation size in bytes. Returns std::nullopt if size can't be
111 /// determined, e.g. in case of a VLA.
112 LLVM_ABI std::optional<TypeSize>
113 getAllocationSize(const DataLayout &DL) const;
114
115 /// Get allocation size in bits. Returns std::nullopt if size can't be
116 /// determined, e.g. in case of a VLA.
117 LLVM_ABI std::optional<TypeSize>
119
120 /// Return the type that is being allocated by the instruction.
121 Type *getAllocatedType() const { return AllocatedType; }
122 /// for use only in special circumstances that need to generically
123 /// transform a whole instruction (eg: IR linking and vectorization).
124 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
125
126 /// Return the alignment of the memory that is being allocated by the
127 /// instruction.
128 Align getAlign() const {
129 return Align(1ULL << getSubclassData<AlignmentField>());
130 }
131
133 setSubclassData<AlignmentField>(Log2(Align));
134 }
135
136 /// Return true if this alloca is in the entry block of the function and is a
137 /// constant size. If so, the code generator will fold it into the
138 /// prolog/epilog code, so it is basically free.
139 LLVM_ABI bool isStaticAlloca() const;
140
141 /// Return true if this alloca is used as an inalloca argument to a call. Such
142 /// allocas are never considered static even if they are in the entry block.
143 bool isUsedWithInAlloca() const {
144 return getSubclassData<UsedWithInAllocaField>();
145 }
146
147 /// Specify whether this alloca is used to represent the arguments to a call.
148 void setUsedWithInAlloca(bool V) {
149 setSubclassData<UsedWithInAllocaField>(V);
150 }
151
152 /// Return true if this alloca is used as a swifterror argument to a call.
153 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
154 /// Specify whether this alloca is used to represent a swifterror.
155 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
156
157 // Methods for support type inquiry through isa, cast, and dyn_cast:
158 static bool classof(const Instruction *I) {
159 return (I->getOpcode() == Instruction::Alloca);
160 }
161 static bool classof(const Value *V) {
162 return isa<Instruction>(V) && classof(cast<Instruction>(V));
163 }
164
165private:
166 // Shadow Instruction::setInstructionSubclassData with a private forwarding
167 // method so that subclasses cannot accidentally use it.
168 template <typename Bitfield>
169 void setSubclassData(typename Bitfield::Type Value) {
170 Instruction::setSubclassData<Bitfield>(Value);
171 }
172};
173
174//===----------------------------------------------------------------------===//
175// LoadInst Class
176//===----------------------------------------------------------------------===//
177
178/// An instruction for reading from memory. This uses the SubclassData field in
179/// Value to store whether or not the load is volatile.
181 using VolatileField = BoolBitfieldElementT<0>;
184 static_assert(
185 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
186 "Bitfields must be contiguous");
187
188 void AssertOK();
189
190protected:
191 // Note: Instruction needs to be a friend here to call cloneImpl.
192 friend class Instruction;
193
194 LLVM_ABI LoadInst *cloneImpl() const;
195
196public:
197 LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
198 InsertPosition InsertBefore);
199 LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 InsertPosition InsertBefore);
201 LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, InsertPosition InsertBefore = nullptr);
203 LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
206 InsertPosition InsertBefore = nullptr);
207
208 /// Return true if this is a load from a volatile memory location.
209 bool isVolatile() const { return getSubclassData<VolatileField>(); }
210
211 /// Specify whether this is a volatile load or not.
212 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
213
214 /// Return the alignment of the access that is being performed.
215 Align getAlign() const {
216 return Align(1ULL << (getSubclassData<AlignmentField>()));
217 }
218
220 setSubclassData<AlignmentField>(Log2(Align));
221 }
222
223 /// Returns the ordering constraint of this load instruction.
225 return getSubclassData<OrderingField>();
226 }
227 /// Sets the ordering constraint of this load instruction. May not be Release
228 /// or AcquireRelease.
230 setSubclassData<OrderingField>(Ordering);
231 }
232
233 /// Returns the synchronization scope ID of this load instruction.
235 return SSID;
236 }
237
238 /// Sets the synchronization scope ID of this load instruction.
240 this->SSID = SSID;
241 }
242
243 /// Sets the ordering constraint and the synchronization scope ID of this load
244 /// instruction.
247 setOrdering(Ordering);
248 setSyncScopeID(SSID);
249 }
250
251 bool isSimple() const { return !isAtomic() && !isVolatile(); }
252
253 bool isUnordered() const {
256 !isVolatile();
257 }
258
260 const Value *getPointerOperand() const { return getOperand(0); }
261 static unsigned getPointerOperandIndex() { return 0U; }
263
264 /// Returns the address space of the pointer operand.
265 unsigned getPointerAddressSpace() const {
267 }
268
269 // Methods for support type inquiry through isa, cast, and dyn_cast:
270 static bool classof(const Instruction *I) {
271 return I->getOpcode() == Instruction::Load;
272 }
273 static bool classof(const Value *V) {
274 return isa<Instruction>(V) && classof(cast<Instruction>(V));
275 }
276
277private:
278 // Shadow Instruction::setInstructionSubclassData with a private forwarding
279 // method so that subclasses cannot accidentally use it.
280 template <typename Bitfield>
281 void setSubclassData(typename Bitfield::Type Value) {
282 Instruction::setSubclassData<Bitfield>(Value);
283 }
284
285 /// The synchronization scope ID of this load instruction. Not quite enough
286 /// room in SubClassData for everything, so synchronization scope ID gets its
287 /// own field.
288 SyncScope::ID SSID;
289};
290
291//===----------------------------------------------------------------------===//
292// StoreInst Class
293//===----------------------------------------------------------------------===//
294
295/// An instruction for storing to memory.
296class StoreInst : public Instruction {
297 using VolatileField = BoolBitfieldElementT<0>;
300 static_assert(
301 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
302 "Bitfields must be contiguous");
303
304 void AssertOK();
305
306 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
307
308protected:
309 // Note: Instruction needs to be a friend here to call cloneImpl.
310 friend class Instruction;
311
313
314public:
315 LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore);
317 InsertPosition InsertBefore);
319 InsertPosition InsertBefore = nullptr);
321 AtomicOrdering Order,
323 InsertPosition InsertBefore = nullptr);
324
325 // allocate space for exactly two operands
326 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
327 void operator delete(void *Ptr) { User::operator delete(Ptr); }
328
329 /// Return true if this is a store to a volatile memory location.
330 bool isVolatile() const { return getSubclassData<VolatileField>(); }
331
332 /// Specify whether this is a volatile store or not.
333 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
334
335 /// Transparently provide more efficient getOperand methods.
337
338 Align getAlign() const {
339 return Align(1ULL << (getSubclassData<AlignmentField>()));
340 }
341
343 setSubclassData<AlignmentField>(Log2(Align));
344 }
345
346 /// Returns the ordering constraint of this store instruction.
348 return getSubclassData<OrderingField>();
349 }
350
351 /// Sets the ordering constraint of this store instruction. May not be
352 /// Acquire or AcquireRelease.
354 setSubclassData<OrderingField>(Ordering);
355 }
356
357 /// Returns the synchronization scope ID of this store instruction.
359 return SSID;
360 }
361
362 /// Sets the synchronization scope ID of this store instruction.
364 this->SSID = SSID;
365 }
366
367 /// Sets the ordering constraint and the synchronization scope ID of this
368 /// store instruction.
371 setOrdering(Ordering);
372 setSyncScopeID(SSID);
373 }
374
375 bool isSimple() const { return !isAtomic() && !isVolatile(); }
376
377 bool isUnordered() const {
380 !isVolatile();
381 }
382
384 const Value *getValueOperand() const { return getOperand(0); }
385
387 const Value *getPointerOperand() const { return getOperand(1); }
388 static unsigned getPointerOperandIndex() { return 1U; }
390
391 /// Returns the address space of the pointer operand.
392 unsigned getPointerAddressSpace() const {
394 }
395
396 // Methods for support type inquiry through isa, cast, and dyn_cast:
397 static bool classof(const Instruction *I) {
398 return I->getOpcode() == Instruction::Store;
399 }
400 static bool classof(const Value *V) {
401 return isa<Instruction>(V) && classof(cast<Instruction>(V));
402 }
403
404private:
405 // Shadow Instruction::setInstructionSubclassData with a private forwarding
406 // method so that subclasses cannot accidentally use it.
407 template <typename Bitfield>
408 void setSubclassData(typename Bitfield::Type Value) {
409 Instruction::setSubclassData<Bitfield>(Value);
410 }
411
412 /// The synchronization scope ID of this store instruction. Not quite enough
413 /// room in SubClassData for everything, so synchronization scope ID gets its
414 /// own field.
415 SyncScope::ID SSID;
416};
417
418template <>
419struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
420};
421
423
424//===----------------------------------------------------------------------===//
425// FenceInst Class
426//===----------------------------------------------------------------------===//
427
428/// An instruction for ordering other memory operations.
429class FenceInst : public Instruction {
430 using OrderingField = AtomicOrderingBitfieldElementT<0>;
431
432 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
433
434 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
435
436protected:
437 // Note: Instruction needs to be a friend here to call cloneImpl.
438 friend class Instruction;
439
440 LLVM_ABI FenceInst *cloneImpl() const;
441
442public:
443 // Ordering may only be Acquire, Release, AcquireRelease, or
444 // SequentiallyConsistent.
447 InsertPosition InsertBefore = nullptr);
448
449 // allocate space for exactly zero operands
450 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
451 void operator delete(void *Ptr) { User::operator delete(Ptr); }
452
453 /// Returns the ordering constraint of this fence instruction.
455 return getSubclassData<OrderingField>();
456 }
457
458 /// Sets the ordering constraint of this fence instruction. May only be
459 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
461 setSubclassData<OrderingField>(Ordering);
462 }
463
464 /// Returns the synchronization scope ID of this fence instruction.
466 return SSID;
467 }
468
469 /// Sets the synchronization scope ID of this fence instruction.
471 this->SSID = SSID;
472 }
473
474 // Methods for support type inquiry through isa, cast, and dyn_cast:
475 static bool classof(const Instruction *I) {
476 return I->getOpcode() == Instruction::Fence;
477 }
478 static bool classof(const Value *V) {
479 return isa<Instruction>(V) && classof(cast<Instruction>(V));
480 }
481
482private:
483 // Shadow Instruction::setInstructionSubclassData with a private forwarding
484 // method so that subclasses cannot accidentally use it.
485 template <typename Bitfield>
486 void setSubclassData(typename Bitfield::Type Value) {
487 Instruction::setSubclassData<Bitfield>(Value);
488 }
489
490 /// The synchronization scope ID of this fence instruction. Not quite enough
491 /// room in SubClassData for everything, so synchronization scope ID gets its
492 /// own field.
493 SyncScope::ID SSID;
494};
495
496//===----------------------------------------------------------------------===//
497// AtomicCmpXchgInst Class
498//===----------------------------------------------------------------------===//
499
500/// An instruction that atomically checks whether a
501/// specified value is in a memory location, and, if it is, stores a new value
502/// there. The value returned by this instruction is a pair containing the
503/// original value as first element, and an i1 indicating success (true) or
504/// failure (false) as second element.
505///
507 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
508 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
509 SyncScope::ID SSID);
510
511 template <unsigned Offset>
512 using AtomicOrderingBitfieldElement =
515
516 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
517
518protected:
519 // Note: Instruction needs to be a friend here to call cloneImpl.
520 friend class Instruction;
521
523
524public:
526 Align Alignment, AtomicOrdering SuccessOrdering,
527 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
528 InsertPosition InsertBefore = nullptr);
529
530 // allocate space for exactly three operands
531 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
532 void operator delete(void *Ptr) { User::operator delete(Ptr); }
533
542 static_assert(
545 "Bitfields must be contiguous");
546
547 /// Return the alignment of the memory that is being allocated by the
548 /// instruction.
549 Align getAlign() const {
550 return Align(1ULL << getSubclassData<AlignmentField>());
551 }
552
554 setSubclassData<AlignmentField>(Log2(Align));
555 }
556
557 /// Return true if this is a cmpxchg from a volatile memory
558 /// location.
559 ///
560 bool isVolatile() const { return getSubclassData<VolatileField>(); }
561
562 /// Specify whether this is a volatile cmpxchg.
563 ///
564 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
565
566 /// Return true if this cmpxchg may spuriously fail.
567 bool isWeak() const { return getSubclassData<WeakField>(); }
568
569 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
570
571 /// Transparently provide more efficient getOperand methods.
573
575 return Ordering != AtomicOrdering::NotAtomic &&
576 Ordering != AtomicOrdering::Unordered;
577 }
578
580 return Ordering != AtomicOrdering::NotAtomic &&
581 Ordering != AtomicOrdering::Unordered &&
582 Ordering != AtomicOrdering::AcquireRelease &&
583 Ordering != AtomicOrdering::Release;
584 }
585
586 /// Returns the success ordering constraint of this cmpxchg instruction.
588 return getSubclassData<SuccessOrderingField>();
589 }
590
591 /// Sets the success ordering constraint of this cmpxchg instruction.
593 assert(isValidSuccessOrdering(Ordering) &&
594 "invalid CmpXchg success ordering");
595 setSubclassData<SuccessOrderingField>(Ordering);
596 }
597
598 /// Returns the failure ordering constraint of this cmpxchg instruction.
600 return getSubclassData<FailureOrderingField>();
601 }
602
603 /// Sets the failure ordering constraint of this cmpxchg instruction.
605 assert(isValidFailureOrdering(Ordering) &&
606 "invalid CmpXchg failure ordering");
607 setSubclassData<FailureOrderingField>(Ordering);
608 }
609
610 /// Returns a single ordering which is at least as strong as both the
611 /// success and failure orderings for this cmpxchg.
620 }
621 return getSuccessOrdering();
622 }
623
624 /// Returns the synchronization scope ID of this cmpxchg instruction.
626 return SSID;
627 }
628
629 /// Sets the synchronization scope ID of this cmpxchg instruction.
631 this->SSID = SSID;
632 }
633
635 const Value *getPointerOperand() const { return getOperand(0); }
636 static unsigned getPointerOperandIndex() { return 0U; }
637
639 const Value *getCompareOperand() const { return getOperand(1); }
640
642 const Value *getNewValOperand() const { return getOperand(2); }
643
644 /// Returns the address space of the pointer operand.
645 unsigned getPointerAddressSpace() const {
647 }
648
649 /// Returns the strongest permitted ordering on failure, given the
650 /// desired ordering on success.
651 ///
652 /// If the comparison in a cmpxchg operation fails, there is no atomic store
653 /// so release semantics cannot be provided. So this function drops explicit
654 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
655 /// operation would remain SequentiallyConsistent.
656 static AtomicOrdering
658 switch (SuccessOrdering) {
659 default:
660 llvm_unreachable("invalid cmpxchg success ordering");
669 }
670 }
671
672 // Methods for support type inquiry through isa, cast, and dyn_cast:
673 static bool classof(const Instruction *I) {
674 return I->getOpcode() == Instruction::AtomicCmpXchg;
675 }
676 static bool classof(const Value *V) {
677 return isa<Instruction>(V) && classof(cast<Instruction>(V));
678 }
679
680private:
681 // Shadow Instruction::setInstructionSubclassData with a private forwarding
682 // method so that subclasses cannot accidentally use it.
683 template <typename Bitfield>
684 void setSubclassData(typename Bitfield::Type Value) {
685 Instruction::setSubclassData<Bitfield>(Value);
686 }
687
688 /// The synchronization scope ID of this cmpxchg instruction. Not quite
689 /// enough room in SubClassData for everything, so synchronization scope ID
690 /// gets its own field.
691 SyncScope::ID SSID;
692};
693
694template <>
696 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
697};
698
700
701//===----------------------------------------------------------------------===//
702// AtomicRMWInst Class
703//===----------------------------------------------------------------------===//
704
705/// an instruction that atomically reads a memory location,
706/// combines it with another value, and then stores the result back. Returns
707/// the old value.
708///
710protected:
711 // Note: Instruction needs to be a friend here to call cloneImpl.
712 friend class Instruction;
713
714 LLVM_ABI AtomicRMWInst *cloneImpl() const;
715
716public:
717 /// This enumeration lists the possible modifications atomicrmw can make. In
718 /// the descriptions, 'p' is the pointer to the instruction's memory location,
719 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
720 /// instruction. These instructions always return 'old'.
721 enum BinOp : unsigned {
722 /// *p = v
724 /// *p = old + v
726 /// *p = old - v
728 /// *p = old & v
730 /// *p = ~(old & v)
732 /// *p = old | v
734 /// *p = old ^ v
736 /// *p = old >signed v ? old : v
738 /// *p = old <signed v ? old : v
740 /// *p = old >unsigned v ? old : v
742 /// *p = old <unsigned v ? old : v
744
745 /// *p = old + v
747
748 /// *p = old - v
750
751 /// *p = maxnum(old, v)
752 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
754
755 /// *p = minnum(old, v)
756 /// \p minnum matches the behavior of \p llvm.minnum.*.
758
759 /// *p = maximum(old, v)
760 /// \p maximum matches the behavior of \p llvm.maximum.*.
762
763 /// *p = minimum(old, v)
764 /// \p minimum matches the behavior of \p llvm.minimum.*.
766
767 /// Increment one up to a maximum value.
768 /// *p = (old u>= v) ? 0 : (old + 1)
770
771 /// Decrement one until a minimum value or zero.
772 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
774
775 /// Subtract only if no unsigned overflow.
776 /// *p = (old u>= v) ? old - v : old
778
779 /// *p = usub.sat(old, v)
780 /// \p usub.sat matches the behavior of \p llvm.usub.sat.*.
782
783 FIRST_BINOP = Xchg,
784 LAST_BINOP = USubSat,
785 BAD_BINOP
786 };
787
788private:
789 template <unsigned Offset>
790 using AtomicOrderingBitfieldElement =
793
794 template <unsigned Offset>
795 using BinOpBitfieldElement =
797
798 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
799
800public:
801 LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
802 Align Alignment, AtomicOrdering Ordering,
803 SyncScope::ID SSID,
804 InsertPosition InsertBefore = nullptr);
805
806 // allocate space for exactly two operands
807 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
808 void operator delete(void *Ptr) { User::operator delete(Ptr); }
809
813 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
817 "Bitfields must be contiguous");
818
819 BinOp getOperation() const { return getSubclassData<OperationField>(); }
820
821 LLVM_ABI static StringRef getOperationName(BinOp Op);
822
823 static bool isFPOperation(BinOp Op) {
824 switch (Op) {
831 return true;
832 default:
833 return false;
834 }
835 }
836
838 setSubclassData<OperationField>(Operation);
839 }
840
841 /// Return the alignment of the memory that is being allocated by the
842 /// instruction.
843 Align getAlign() const {
844 return Align(1ULL << getSubclassData<AlignmentField>());
845 }
846
848 setSubclassData<AlignmentField>(Log2(Align));
849 }
850
851 /// Return true if this is a RMW on a volatile memory location.
852 ///
853 bool isVolatile() const { return getSubclassData<VolatileField>(); }
854
855 /// Specify whether this is a volatile RMW or not.
856 ///
857 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
858
859 /// Transparently provide more efficient getOperand methods.
861
862 /// Returns the ordering constraint of this rmw instruction.
864 return getSubclassData<AtomicOrderingField>();
865 }
866
867 /// Sets the ordering constraint of this rmw instruction.
869 assert(Ordering != AtomicOrdering::NotAtomic &&
870 "atomicrmw instructions can only be atomic.");
871 assert(Ordering != AtomicOrdering::Unordered &&
872 "atomicrmw instructions cannot be unordered.");
873 setSubclassData<AtomicOrderingField>(Ordering);
874 }
875
876 /// Returns the synchronization scope ID of this rmw instruction.
878 return SSID;
879 }
880
881 /// Sets the synchronization scope ID of this rmw instruction.
883 this->SSID = SSID;
884 }
885
886 Value *getPointerOperand() { return getOperand(0); }
887 const Value *getPointerOperand() const { return getOperand(0); }
888 static unsigned getPointerOperandIndex() { return 0U; }
889
890 Value *getValOperand() { return getOperand(1); }
891 const Value *getValOperand() const { return getOperand(1); }
892
893 /// Returns the address space of the pointer operand.
894 unsigned getPointerAddressSpace() const {
896 }
897
899 return isFPOperation(getOperation());
900 }
901
902 // Methods for support type inquiry through isa, cast, and dyn_cast:
903 static bool classof(const Instruction *I) {
904 return I->getOpcode() == Instruction::AtomicRMW;
905 }
906 static bool classof(const Value *V) {
907 return isa<Instruction>(V) && classof(cast<Instruction>(V));
908 }
909
910private:
911 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
912 AtomicOrdering Ordering, SyncScope::ID SSID);
913
914 // Shadow Instruction::setInstructionSubclassData with a private forwarding
915 // method so that subclasses cannot accidentally use it.
916 template <typename Bitfield>
917 void setSubclassData(typename Bitfield::Type Value) {
918 Instruction::setSubclassData<Bitfield>(Value);
919 }
920
921 /// The synchronization scope ID of this rmw instruction. Not quite enough
922 /// room in SubClassData for everything, so synchronization scope ID gets its
923 /// own field.
924 SyncScope::ID SSID;
925};
926
927template <>
929 : public FixedNumOperandTraits<AtomicRMWInst,2> {
930};
931
933
934//===----------------------------------------------------------------------===//
935// GetElementPtrInst Class
936//===----------------------------------------------------------------------===//
937
938// checkGEPType - Simple wrapper function to give a better assertion failure
939// message on bad indexes for a gep instruction.
940//
942 assert(Ty && "Invalid GetElementPtrInst indices for type!");
943 return Ty;
944}
945
946/// an instruction for type-safe pointer arithmetic to
947/// access elements of arrays and structs
948///
950 Type *SourceElementType;
951 Type *ResultElementType;
952
954
955 /// Constructors - Create a getelementptr instruction with a base pointer an
956 /// list of indices. The first and second ctor can optionally insert before an
957 /// existing instruction, the third appends the new instruction to the
958 /// specified BasicBlock.
959 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
961 const Twine &NameStr, InsertPosition InsertBefore);
962
963 LLVM_ABI void init(Value *Ptr, ArrayRef<Value *> IdxList,
964 const Twine &NameStr);
965
966protected:
967 // Note: Instruction needs to be a friend here to call cloneImpl.
968 friend class Instruction;
969
971
972public:
973 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr = "",
976 InsertPosition InsertBefore = nullptr) {
977 unsigned Values = 1 + unsigned(IdxList.size());
978 assert(PointeeType && "Must specify element type");
979 IntrusiveOperandsAllocMarker AllocMarker{Values};
980 return new (AllocMarker) GetElementPtrInst(
981 PointeeType, Ptr, IdxList, AllocMarker, NameStr, InsertBefore);
982 }
983
984 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
986 const Twine &NameStr = "",
987 InsertPosition InsertBefore = nullptr) {
989 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
990 GEP->setNoWrapFlags(NW);
991 return GEP;
992 }
993
994 /// Create an "inbounds" getelementptr. See the documentation for the
995 /// "inbounds" flag in LangRef.html for details.
996 static GetElementPtrInst *
998 const Twine &NameStr = "",
999 InsertPosition InsertBefore = nullptr) {
1000 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
1001 NameStr, InsertBefore);
1002 }
1003
1004 /// Transparently provide more efficient getOperand methods.
1006
1007 Type *getSourceElementType() const { return SourceElementType; }
1008
1009 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1010 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1011
1013 return ResultElementType;
1014 }
1015
1016 /// Returns the address space of this instruction's pointer type.
1017 unsigned getAddressSpace() const {
1018 // Note that this is always the same as the pointer operand's address space
1019 // and that is cheaper to compute, so cheat here.
1020 return getPointerAddressSpace();
1021 }
1022
1023 /// Returns the result type of a getelementptr with the given source
1024 /// element type and indexes.
1025 ///
1026 /// Null is returned if the indices are invalid for the specified
1027 /// source element type.
1028 LLVM_ABI static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1030 LLVM_ABI static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1031
1032 /// Return the type of the element at the given index of an indexable
1033 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1034 ///
1035 /// Returns null if the type can't be indexed, or the given index is not
1036 /// legal for the given type.
1037 LLVM_ABI static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1039
1040 inline op_iterator idx_begin() { return op_begin()+1; }
1041 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1042 inline op_iterator idx_end() { return op_end(); }
1043 inline const_op_iterator idx_end() const { return op_end(); }
1044
1046 return make_range(idx_begin(), idx_end());
1047 }
1048
1050 return make_range(idx_begin(), idx_end());
1051 }
1052
1054 return getOperand(0);
1055 }
1056 const Value *getPointerOperand() const {
1057 return getOperand(0);
1058 }
1059 static unsigned getPointerOperandIndex() {
1060 return 0U; // get index for modifying correct operand.
1061 }
1062
1063 /// Method to return the pointer operand as a
1064 /// PointerType.
1066 return getPointerOperand()->getType();
1067 }
1068
1069 /// Returns the address space of the pointer operand.
1070 unsigned getPointerAddressSpace() const {
1072 }
1073
1074 /// Returns the pointer type returned by the GEP
1075 /// instruction, which may be a vector of pointers.
1077 // Vector GEP
1078 Type *Ty = Ptr->getType();
1079 if (Ty->isVectorTy())
1080 return Ty;
1081
1082 for (Value *Index : IdxList)
1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084 ElementCount EltCount = IndexVTy->getElementCount();
1085 return VectorType::get(Ty, EltCount);
1086 }
1087 // Scalar GEP
1088 return Ty;
1089 }
1090
1091 unsigned getNumIndices() const { // Note: always non-negative
1092 return getNumOperands() - 1;
1093 }
1094
1095 bool hasIndices() const {
1096 return getNumOperands() > 1;
1097 }
1098
1099 /// Return true if all of the indices of this GEP are
1100 /// zeros. If so, the result pointer and the first operand have the same
1101 /// value, just potentially different types.
1102 LLVM_ABI bool hasAllZeroIndices() const;
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// constant integers. If so, the result pointer and the first operand have
1106 /// a constant offset between them.
1107 LLVM_ABI bool hasAllConstantIndices() const;
1108
1109 /// Set nowrap flags for GEP instruction.
1111
1112 /// Set or clear the inbounds flag on this GEP instruction.
1113 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1114 /// TODO: Remove this method in favor of setNoWrapFlags().
1115 LLVM_ABI void setIsInBounds(bool b = true);
1116
1117 /// Get the nowrap flags for the GEP instruction.
1119
1120 /// Determine whether the GEP has the inbounds flag.
1121 LLVM_ABI bool isInBounds() const;
1122
1123 /// Determine whether the GEP has the nusw flag.
1124 LLVM_ABI bool hasNoUnsignedSignedWrap() const;
1125
1126 /// Determine whether the GEP has the nuw flag.
1127 LLVM_ABI bool hasNoUnsignedWrap() const;
1128
1129 /// Accumulate the constant address offset of this GEP if possible.
1130 ///
1131 /// This routine accepts an APInt into which it will accumulate the constant
1132 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1133 /// all-constant, it returns false and the value of the offset APInt is
1134 /// undefined (it is *not* preserved!). The APInt passed into this routine
1135 /// must be at least as wide as the IntPtr type for the address space of
1136 /// the base GEP pointer.
1138 APInt &Offset) const;
1139 LLVM_ABI bool
1140 collectOffset(const DataLayout &DL, unsigned BitWidth,
1141 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1142 APInt &ConstantOffset) const;
1143 // Methods for support type inquiry through isa, cast, and dyn_cast:
1144 static bool classof(const Instruction *I) {
1145 return (I->getOpcode() == Instruction::GetElementPtr);
1146 }
1147 static bool classof(const Value *V) {
1148 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1149 }
1150};
1151
1152template <>
1154 : public VariadicOperandTraits<GetElementPtrInst> {};
1155
1156GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1157 ArrayRef<Value *> IdxList,
1158 AllocInfo AllocInfo, const Twine &NameStr,
1159 InsertPosition InsertBefore)
1160 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, AllocInfo,
1161 InsertBefore),
1162 SourceElementType(PointeeType),
1163 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1164 init(Ptr, IdxList, NameStr);
1165}
1166
1167DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1168
1169//===----------------------------------------------------------------------===//
1170// ICmpInst Class
1171//===----------------------------------------------------------------------===//
1172
1173/// This instruction compares its operands according to the predicate given
1174/// to the constructor. It only operates on integers or pointers. The operands
1175/// must be identical types.
1176/// Represent an integer comparison operator.
1177class ICmpInst: public CmpInst {
1178 void AssertOK() {
1179 assert(isIntPredicate() &&
1180 "Invalid ICmp predicate value");
1181 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1182 "Both operands to ICmp instruction are not of the same type!");
1183 // Check that the operands are the right type
1184 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1185 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1186 "Invalid operand types for ICmp instruction");
1187 }
1188
1189 enum { SameSign = (1 << 0) };
1190
1191protected:
1192 // Note: Instruction needs to be a friend here to call cloneImpl.
1193 friend class Instruction;
1194
1195 /// Clone an identical ICmpInst
1196 LLVM_ABI ICmpInst *cloneImpl() const;
1197
1198public:
1199 /// Constructor with insertion semantics.
1200 ICmpInst(InsertPosition InsertBefore, ///< Where to insert
1201 Predicate pred, ///< The predicate to use for the comparison
1202 Value *LHS, ///< The left-hand-side of the expression
1203 Value *RHS, ///< The right-hand-side of the expression
1204 const Twine &NameStr = "" ///< Name of the instruction
1205 )
1206 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS,
1207 RHS, NameStr, InsertBefore) {
1208#ifndef NDEBUG
1209 AssertOK();
1210#endif
1211 }
1212
1213 /// Constructor with no-insertion semantics
1215 Predicate pred, ///< The predicate to use for the comparison
1216 Value *LHS, ///< The left-hand-side of the expression
1217 Value *RHS, ///< The right-hand-side of the expression
1218 const Twine &NameStr = "" ///< Name of the instruction
1219 ) : CmpInst(makeCmpResultType(LHS->getType()),
1220 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1221#ifndef NDEBUG
1222 AssertOK();
1223#endif
1224 }
1225
1226 /// @returns the predicate along with samesign information.
1228 return {getPredicate(), hasSameSign()};
1229 }
1230
1231 /// @returns the inverse predicate along with samesign information: static
1232 /// variant.
1234 return {getInversePredicate(Pred), Pred.hasSameSign()};
1235 }
1236
1237 /// @returns the inverse predicate along with samesign information.
1239 return getInverseCmpPredicate(getCmpPredicate());
1240 }
1241
1242 /// @returns the swapped predicate along with samesign information: static
1243 /// variant.
1245 return {getSwappedPredicate(Pred), Pred.hasSameSign()};
1246 }
1247
1248 /// @returns the swapped predicate along with samesign information.
1250 return getSwappedCmpPredicate(getCmpPredicate());
1251 }
1252
1253 /// @returns the non-strict predicate along with samesign information: static
1254 /// variant.
1256 return {getNonStrictPredicate(Pred), Pred.hasSameSign()};
1257 }
1258
1259 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
1260 /// @returns the non-strict predicate along with samesign information.
1262 return getNonStrictCmpPredicate(getCmpPredicate());
1263 }
1264
1265 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1266 /// @returns the predicate that would be the result if the operand were
1267 /// regarded as signed.
1268 /// Return the signed version of the predicate.
1270 return getSignedPredicate(getPredicate());
1271 }
1272
1273 /// Return the signed version of the predicate: static variant.
1274 LLVM_ABI static Predicate getSignedPredicate(Predicate Pred);
1275
1276 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1277 /// @returns the predicate that would be the result if the operand were
1278 /// regarded as unsigned.
1279 /// Return the unsigned version of the predicate.
1281 return getUnsignedPredicate(getPredicate());
1282 }
1283
1284 /// Return the unsigned version of the predicate: static variant.
1285 LLVM_ABI static Predicate getUnsignedPredicate(Predicate Pred);
1286
1287 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1288 /// @returns the unsigned version of the signed predicate pred or
1289 /// the signed version of the signed predicate pred.
1290 /// Static variant.
1291 LLVM_ABI static Predicate getFlippedSignednessPredicate(Predicate Pred);
1292
1293 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1294 /// @returns the unsigned version of the signed predicate pred or
1295 /// the signed version of the signed predicate pred.
1297 return getFlippedSignednessPredicate(getPredicate());
1298 }
1299
1300 /// Determine if Pred1 implies Pred2 is true, false, or if nothing can be
1301 /// inferred about the implication, when two compares have matching operands.
1302 LLVM_ABI static std::optional<bool>
1303 isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2);
1304
1305 void setSameSign(bool B = true) {
1306 SubclassOptionalData = (SubclassOptionalData & ~SameSign) | (B * SameSign);
1307 }
1308
1309 /// An icmp instruction, which can be marked as "samesign", indicating that
1310 /// the two operands have the same sign. This means that we can convert
1311 /// "slt" to "ult" and vice versa, which enables more optimizations.
1312 bool hasSameSign() const { return SubclassOptionalData & SameSign; }
1313
1314 /// Return true if this predicate is either EQ or NE. This also
1315 /// tests for commutativity.
1316 static bool isEquality(Predicate P) {
1317 return P == ICMP_EQ || P == ICMP_NE;
1318 }
1319
1320 /// Return true if this predicate is either EQ or NE. This also
1321 /// tests for commutativity.
1322 bool isEquality() const {
1323 return isEquality(getPredicate());
1324 }
1325
1326 /// @returns true if the predicate is commutative
1327 /// Determine if this relation is commutative.
1328 static bool isCommutative(Predicate P) { return isEquality(P); }
1329
1330 /// @returns true if the predicate of this ICmpInst is commutative
1331 /// Determine if this relation is commutative.
1332 bool isCommutative() const { return isCommutative(getPredicate()); }
1333
1334 /// Return true if the predicate is relational (not EQ or NE).
1335 ///
1336 bool isRelational() const {
1337 return !isEquality();
1338 }
1339
1340 /// Return true if the predicate is relational (not EQ or NE).
1341 ///
1342 static bool isRelational(Predicate P) {
1343 return !isEquality(P);
1344 }
1345
1346 /// Return true if the predicate is SGT or UGT.
1347 ///
1348 static bool isGT(Predicate P) {
1349 return P == ICMP_SGT || P == ICMP_UGT;
1350 }
1351
1352 /// Return true if the predicate is SLT or ULT.
1353 ///
1354 static bool isLT(Predicate P) {
1355 return P == ICMP_SLT || P == ICMP_ULT;
1356 }
1357
1358 /// Return true if the predicate is SGE or UGE.
1359 ///
1360 static bool isGE(Predicate P) {
1361 return P == ICMP_SGE || P == ICMP_UGE;
1362 }
1363
1364 /// Return true if the predicate is SLE or ULE.
1365 ///
1366 static bool isLE(Predicate P) {
1367 return P == ICMP_SLE || P == ICMP_ULE;
1368 }
1369
1370 /// Returns the sequence of all ICmp predicates.
1371 ///
1372 static auto predicates() { return ICmpPredicates(); }
1373
1374 /// Exchange the two operands to this instruction in such a way that it does
1375 /// not modify the semantics of the instruction. The predicate value may be
1376 /// changed to retain the same result if the predicate is order dependent
1377 /// (e.g. ult).
1378 /// Swap operands and adjust predicate.
1380 setPredicate(getSwappedPredicate());
1381 Op<0>().swap(Op<1>());
1382 }
1383
1384 /// Return result of `LHS Pred RHS` comparison.
1385 LLVM_ABI static bool compare(const APInt &LHS, const APInt &RHS,
1386 ICmpInst::Predicate Pred);
1387
1388 /// Return result of `LHS Pred RHS`, if it can be determined from the
1389 /// KnownBits. Otherwise return nullopt.
1390 LLVM_ABI static std::optional<bool>
1391 compare(const KnownBits &LHS, const KnownBits &RHS, ICmpInst::Predicate Pred);
1392
1393 // Methods for support type inquiry through isa, cast, and dyn_cast:
1394 static bool classof(const Instruction *I) {
1395 return I->getOpcode() == Instruction::ICmp;
1396 }
1397 static bool classof(const Value *V) {
1398 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1399 }
1400};
1401
1402//===----------------------------------------------------------------------===//
1403// FCmpInst Class
1404//===----------------------------------------------------------------------===//
1405
1406/// This instruction compares its operands according to the predicate given
1407/// to the constructor. It only operates on floating point values or packed
1408/// vectors of floating point values. The operands must be identical types.
1409/// Represents a floating point comparison operator.
1410class FCmpInst: public CmpInst {
1411 void AssertOK() {
1412 assert(isFPPredicate() && "Invalid FCmp predicate value");
1413 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1414 "Both operands to FCmp instruction are not of the same type!");
1415 // Check that the operands are the right type
1416 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1417 "Invalid operand types for FCmp instruction");
1418 }
1419
1420protected:
1421 // Note: Instruction needs to be a friend here to call cloneImpl.
1422 friend class Instruction;
1423
1424 /// Clone an identical FCmpInst
1425 LLVM_ABI FCmpInst *cloneImpl() const;
1426
1427public:
1428 /// Constructor with insertion semantics.
1429 FCmpInst(InsertPosition InsertBefore, ///< Where to insert
1430 Predicate pred, ///< The predicate to use for the comparison
1431 Value *LHS, ///< The left-hand-side of the expression
1432 Value *RHS, ///< The right-hand-side of the expression
1433 const Twine &NameStr = "" ///< Name of the instruction
1434 )
1435 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, pred, LHS,
1436 RHS, NameStr, InsertBefore) {
1437 AssertOK();
1438 }
1439
1440 /// Constructor with no-insertion semantics
1441 FCmpInst(Predicate Pred, ///< The predicate to use for the comparison
1442 Value *LHS, ///< The left-hand-side of the expression
1443 Value *RHS, ///< The right-hand-side of the expression
1444 const Twine &NameStr = "", ///< Name of the instruction
1445 Instruction *FlagsSource = nullptr)
1446 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1447 RHS, NameStr, nullptr, FlagsSource) {
1448 AssertOK();
1449 }
1450
1451 /// @returns true if the predicate is EQ or NE.
1452 /// Determine if this is an equality predicate.
1453 static bool isEquality(Predicate Pred) {
1454 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1455 Pred == FCMP_UNE;
1456 }
1457
1458 /// @returns true if the predicate of this instruction is EQ or NE.
1459 /// Determine if this is an equality predicate.
1460 bool isEquality() const { return isEquality(getPredicate()); }
1461
1462 /// @returns true if the predicate is commutative.
1463 /// Determine if this is a commutative predicate.
1464 static bool isCommutative(Predicate Pred) {
1465 return isEquality(Pred) || Pred == FCMP_FALSE || Pred == FCMP_TRUE ||
1466 Pred == FCMP_ORD || Pred == FCMP_UNO;
1467 }
1468
1469 /// @returns true if the predicate of this instruction is commutative.
1470 /// Determine if this is a commutative predicate.
1471 bool isCommutative() const { return isCommutative(getPredicate()); }
1472
1473 /// @returns true if the predicate is relational (not EQ or NE).
1474 /// Determine if this a relational predicate.
1475 bool isRelational() const { return !isEquality(); }
1476
1477 /// Exchange the two operands to this instruction in such a way that it does
1478 /// not modify the semantics of the instruction. The predicate value may be
1479 /// changed to retain the same result if the predicate is order dependent
1480 /// (e.g. ult).
1481 /// Swap operands and adjust predicate.
1484 Op<0>().swap(Op<1>());
1485 }
1486
1487 /// Returns the sequence of all FCmp predicates.
1488 ///
1489 static auto predicates() { return FCmpPredicates(); }
1490
1491 /// Return result of `LHS Pred RHS` comparison.
1492 LLVM_ABI static bool compare(const APFloat &LHS, const APFloat &RHS,
1493 FCmpInst::Predicate Pred);
1494
1495 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1496 static bool classof(const Instruction *I) {
1497 return I->getOpcode() == Instruction::FCmp;
1498 }
1499 static bool classof(const Value *V) {
1500 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1501 }
1502};
1503
1504//===----------------------------------------------------------------------===//
1505/// This class represents a function call, abstracting a target
1506/// machine's calling convention. This class uses low bit of the SubClassData
1507/// field to indicate whether or not this is a tail call. The rest of the bits
1508/// hold the calling convention of the call.
1509///
1510class CallInst : public CallBase {
1512
1513 /// Construct a CallInst from a range of arguments
1514 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1515 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1516 AllocInfo AllocInfo, InsertPosition InsertBefore);
1517
1518 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1519 const Twine &NameStr, AllocInfo AllocInfo,
1520 InsertPosition InsertBefore)
1521 : CallInst(Ty, Func, Args, {}, NameStr, AllocInfo, InsertBefore) {}
1522
1523 LLVM_ABI explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1524 AllocInfo AllocInfo, InsertPosition InsertBefore);
1525
1526 LLVM_ABI void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1527 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1528 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1529
1530 /// Compute the number of operands to allocate.
1531 static unsigned ComputeNumOperands(unsigned NumArgs,
1532 unsigned NumBundleInputs = 0) {
1533 // We need one operand for the called function, plus the input operand
1534 // counts provided.
1535 return 1 + NumArgs + NumBundleInputs;
1536 }
1537
1538protected:
1539 // Note: Instruction needs to be a friend here to call cloneImpl.
1540 friend class Instruction;
1541
1542 LLVM_ABI CallInst *cloneImpl() const;
1543
1544public:
1545 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1546 InsertPosition InsertBefore = nullptr) {
1547 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(0)};
1548 return new (AllocMarker)
1549 CallInst(Ty, F, NameStr, AllocMarker, InsertBefore);
1550 }
1551
1553 const Twine &NameStr,
1554 InsertPosition InsertBefore = nullptr) {
1555 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(Args.size())};
1556 return new (AllocMarker)
1557 CallInst(Ty, Func, Args, {}, NameStr, AllocMarker, InsertBefore);
1558 }
1559
1561 ArrayRef<OperandBundleDef> Bundles = {},
1562 const Twine &NameStr = "",
1563 InsertPosition InsertBefore = nullptr) {
1564 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
1565 ComputeNumOperands(unsigned(Args.size()), CountBundleInputs(Bundles)),
1566 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
1567
1568 return new (AllocMarker)
1569 CallInst(Ty, Func, Args, Bundles, NameStr, AllocMarker, InsertBefore);
1570 }
1571
1572 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1573 InsertPosition InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1575 InsertBefore);
1576 }
1577
1579 ArrayRef<OperandBundleDef> Bundles = {},
1580 const Twine &NameStr = "",
1581 InsertPosition InsertBefore = nullptr) {
1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1583 NameStr, InsertBefore);
1584 }
1585
1587 const Twine &NameStr,
1588 InsertPosition InsertBefore = nullptr) {
1589 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1590 InsertBefore);
1591 }
1592
1593 /// Create a clone of \p CI with a different set of operand bundles and
1594 /// insert it before \p InsertBefore.
1595 ///
1596 /// The returned call instruction is identical \p CI in every way except that
1597 /// the operand bundles for the new instruction are set to the operand bundles
1598 /// in \p Bundles.
1599 LLVM_ABI static CallInst *Create(CallInst *CI,
1601 InsertPosition InsertPt = nullptr);
1602
1603 // Note that 'musttail' implies 'tail'.
1604 enum TailCallKind : unsigned {
1611
1613 static_assert(
1614 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1615 "Bitfields must be contiguous");
1616
1618 return getSubclassData<TailCallKindField>();
1619 }
1620
1621 bool isTailCall() const {
1623 return Kind == TCK_Tail || Kind == TCK_MustTail;
1624 }
1625
1626 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1627
1628 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1629
1631 setSubclassData<TailCallKindField>(TCK);
1632 }
1633
1634 void setTailCall(bool IsTc = true) {
1636 }
1637
1638 /// Return true if the call can return twice
1639 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1640 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1641
1642 /// Return true if the call is for a noreturn trap intrinsic.
1644 switch (getIntrinsicID()) {
1645 case Intrinsic::trap:
1646 case Intrinsic::ubsantrap:
1647 return !hasFnAttr("trap-func-name");
1648 default:
1649 return false;
1650 }
1651 }
1652
1653 // Methods for support type inquiry through isa, cast, and dyn_cast:
1654 static bool classof(const Instruction *I) {
1655 return I->getOpcode() == Instruction::Call;
1656 }
1657 static bool classof(const Value *V) {
1658 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1659 }
1660
1661 /// Updates profile metadata by scaling it by \p S / \p T.
1663
1664private:
1665 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1666 // method so that subclasses cannot accidentally use it.
1667 template <typename Bitfield>
1668 void setSubclassData(typename Bitfield::Type Value) {
1669 Instruction::setSubclassData<Bitfield>(Value);
1670 }
1671};
1672
1673CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1674 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1675 AllocInfo AllocInfo, InsertPosition InsertBefore)
1676 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
1677 InsertBefore) {
1678 assert(AllocInfo.NumOps ==
1679 unsigned(Args.size() + CountBundleInputs(Bundles) + 1));
1680 init(Ty, Func, Args, Bundles, NameStr);
1681}
1682
1683//===----------------------------------------------------------------------===//
1684// SelectInst Class
1685//===----------------------------------------------------------------------===//
1686
1687/// This class represents the LLVM 'select' instruction.
1688///
1689class SelectInst : public Instruction {
1690 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1691
1692 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1693 InsertPosition InsertBefore)
1694 : Instruction(S1->getType(), Instruction::Select, AllocMarker,
1695 InsertBefore) {
1696 init(C, S1, S2);
1697 setName(NameStr);
1698 }
1699
1700 void init(Value *C, Value *S1, Value *S2) {
1701 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1702 Op<0>() = C;
1703 Op<1>() = S1;
1704 Op<2>() = S2;
1705 }
1706
1707protected:
1708 // Note: Instruction needs to be a friend here to call cloneImpl.
1709 friend class Instruction;
1710
1711 LLVM_ABI SelectInst *cloneImpl() const;
1712
1713public:
1715 const Twine &NameStr = "",
1716 InsertPosition InsertBefore = nullptr,
1717 Instruction *MDFrom = nullptr) {
1718 SelectInst *Sel =
1719 new (AllocMarker) SelectInst(C, S1, S2, NameStr, InsertBefore);
1720 if (MDFrom)
1721 Sel->copyMetadata(*MDFrom);
1722 return Sel;
1723 }
1724
1725 const Value *getCondition() const { return Op<0>(); }
1726 const Value *getTrueValue() const { return Op<1>(); }
1727 const Value *getFalseValue() const { return Op<2>(); }
1728 Value *getCondition() { return Op<0>(); }
1729 Value *getTrueValue() { return Op<1>(); }
1730 Value *getFalseValue() { return Op<2>(); }
1731
1732 void setCondition(Value *V) { Op<0>() = V; }
1733 void setTrueValue(Value *V) { Op<1>() = V; }
1734 void setFalseValue(Value *V) { Op<2>() = V; }
1735
1736 /// Swap the true and false values of the select instruction.
1737 /// This doesn't swap prof metadata.
1738 void swapValues() { Op<1>().swap(Op<2>()); }
1739
1740 /// Return a string if the specified operands are invalid
1741 /// for a select operation, otherwise return null.
1742 LLVM_ABI static const char *areInvalidOperands(Value *Cond, Value *True,
1743 Value *False);
1744
1745 /// Transparently provide more efficient getOperand methods.
1747
1749 return static_cast<OtherOps>(Instruction::getOpcode());
1750 }
1751
1752 // Methods for support type inquiry through isa, cast, and dyn_cast:
1753 static bool classof(const Instruction *I) {
1754 return I->getOpcode() == Instruction::Select;
1755 }
1756 static bool classof(const Value *V) {
1757 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1758 }
1759};
1760
1761template <>
1762struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1763};
1764
1766
1767//===----------------------------------------------------------------------===//
1768// VAArgInst Class
1769//===----------------------------------------------------------------------===//
1770
1771/// This class represents the va_arg llvm instruction, which returns
1772/// an argument of the specified type given a va_list and increments that list
1773///
1775protected:
1776 // Note: Instruction needs to be a friend here to call cloneImpl.
1777 friend class Instruction;
1778
1779 LLVM_ABI VAArgInst *cloneImpl() const;
1780
1781public:
1782 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1783 InsertPosition InsertBefore = nullptr)
1784 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1785 setName(NameStr);
1786 }
1787
1788 Value *getPointerOperand() { return getOperand(0); }
1789 const Value *getPointerOperand() const { return getOperand(0); }
1790 static unsigned getPointerOperandIndex() { return 0U; }
1791
1792 // Methods for support type inquiry through isa, cast, and dyn_cast:
1793 static bool classof(const Instruction *I) {
1794 return I->getOpcode() == VAArg;
1795 }
1796 static bool classof(const Value *V) {
1797 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1798 }
1799};
1800
1801//===----------------------------------------------------------------------===//
1802// ExtractElementInst Class
1803//===----------------------------------------------------------------------===//
1804
1805/// This instruction extracts a single (scalar)
1806/// element from a VectorType value
1807///
1809 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1810
1811 LLVM_ABI ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1812 InsertPosition InsertBefore = nullptr);
1813
1814protected:
1815 // Note: Instruction needs to be a friend here to call cloneImpl.
1816 friend class Instruction;
1817
1819
1820public:
1822 const Twine &NameStr = "",
1823 InsertPosition InsertBefore = nullptr) {
1824 return new (AllocMarker)
1825 ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1826 }
1827
1828 /// Return true if an extractelement instruction can be
1829 /// formed with the specified operands.
1830 LLVM_ABI static bool isValidOperands(const Value *Vec, const Value *Idx);
1831
1833 Value *getIndexOperand() { return Op<1>(); }
1834 const Value *getVectorOperand() const { return Op<0>(); }
1835 const Value *getIndexOperand() const { return Op<1>(); }
1836
1838 return cast<VectorType>(getVectorOperand()->getType());
1839 }
1840
1841 /// Transparently provide more efficient getOperand methods.
1843
1844 // Methods for support type inquiry through isa, cast, and dyn_cast:
1845 static bool classof(const Instruction *I) {
1846 return I->getOpcode() == Instruction::ExtractElement;
1847 }
1848 static bool classof(const Value *V) {
1849 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1850 }
1851};
1852
1853template <>
1855 public FixedNumOperandTraits<ExtractElementInst, 2> {
1856};
1857
1859
1860//===----------------------------------------------------------------------===//
1861// InsertElementInst Class
1862//===----------------------------------------------------------------------===//
1863
1864/// This instruction inserts a single (scalar)
1865/// element into a VectorType value
1866///
1868 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1869
1871 const Twine &NameStr = "",
1872 InsertPosition InsertBefore = nullptr);
1873
1874protected:
1875 // Note: Instruction needs to be a friend here to call cloneImpl.
1876 friend class Instruction;
1877
1878 LLVM_ABI InsertElementInst *cloneImpl() const;
1879
1880public:
1881 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1882 const Twine &NameStr = "",
1883 InsertPosition InsertBefore = nullptr) {
1884 return new (AllocMarker)
1885 InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1886 }
1887
1888 /// Return true if an insertelement instruction can be
1889 /// formed with the specified operands.
1890 LLVM_ABI static bool isValidOperands(const Value *Vec, const Value *NewElt,
1891 const Value *Idx);
1892
1893 /// Overload to return most specific vector type.
1894 ///
1896 return cast<VectorType>(Instruction::getType());
1897 }
1898
1899 /// Transparently provide more efficient getOperand methods.
1901
1902 // Methods for support type inquiry through isa, cast, and dyn_cast:
1903 static bool classof(const Instruction *I) {
1904 return I->getOpcode() == Instruction::InsertElement;
1905 }
1906 static bool classof(const Value *V) {
1907 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1908 }
1909};
1910
1911template <>
1913 public FixedNumOperandTraits<InsertElementInst, 3> {
1914};
1915
1917
1918//===----------------------------------------------------------------------===//
1919// ShuffleVectorInst Class
1920//===----------------------------------------------------------------------===//
1921
1922constexpr int PoisonMaskElem = -1;
1923
1924/// This instruction constructs a fixed permutation of two
1925/// input vectors.
1926///
1927/// For each element of the result vector, the shuffle mask selects an element
1928/// from one of the input vectors to copy to the result. Non-negative elements
1929/// in the mask represent an index into the concatenated pair of input vectors.
1930/// PoisonMaskElem (-1) specifies that the result element is poison.
1931///
1932/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1933/// requirement may be relaxed in the future.
1935 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1936
1937 SmallVector<int, 4> ShuffleMask;
1938 Constant *ShuffleMaskForBitcode;
1939
1940protected:
1941 // Note: Instruction needs to be a friend here to call cloneImpl.
1942 friend class Instruction;
1943
1944 LLVM_ABI ShuffleVectorInst *cloneImpl() const;
1945
1946public:
1947 LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1948 InsertPosition InsertBefore = nullptr);
1950 const Twine &NameStr = "",
1951 InsertPosition InsertBefore = nullptr);
1952 LLVM_ABI ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1953 const Twine &NameStr = "",
1954 InsertPosition InsertBefore = nullptr);
1956 const Twine &NameStr = "",
1957 InsertPosition InsertBefore = nullptr);
1958
1959 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
1960 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
1961
1962 /// Swap the operands and adjust the mask to preserve the semantics
1963 /// of the instruction.
1964 LLVM_ABI void commute();
1965
1966 /// Return true if a shufflevector instruction can be
1967 /// formed with the specified operands.
1968 LLVM_ABI static bool isValidOperands(const Value *V1, const Value *V2,
1969 const Value *Mask);
1970 LLVM_ABI static bool isValidOperands(const Value *V1, const Value *V2,
1971 ArrayRef<int> Mask);
1972
1973 /// Overload to return most specific vector type.
1974 ///
1976 return cast<VectorType>(Instruction::getType());
1977 }
1978
1979 /// Transparently provide more efficient getOperand methods.
1981
1982 /// Return the shuffle mask value of this instruction for the given element
1983 /// index. Return PoisonMaskElem if the element is undef.
1984 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
1985
1986 /// Convert the input shuffle mask operand to a vector of integers. Undefined
1987 /// elements of the mask are returned as PoisonMaskElem.
1988 LLVM_ABI static void getShuffleMask(const Constant *Mask,
1989 SmallVectorImpl<int> &Result);
1990
1991 /// Return the mask for this instruction as a vector of integers. Undefined
1992 /// elements of the mask are returned as PoisonMaskElem.
1994 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
1995 }
1996
1997 /// Return the mask for this instruction, for use in bitcode.
1998 ///
1999 /// TODO: This is temporary until we decide a new bitcode encoding for
2000 /// shufflevector.
2001 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2002
2003 LLVM_ABI static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2004 Type *ResultTy);
2005
2006 LLVM_ABI void setShuffleMask(ArrayRef<int> Mask);
2007
2008 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2009
2010 /// Return true if this shuffle returns a vector with a different number of
2011 /// elements than its source vectors.
2012 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2013 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2014 bool changesLength() const {
2015 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2016 ->getElementCount()
2017 .getKnownMinValue();
2018 unsigned NumMaskElts = ShuffleMask.size();
2019 return NumSourceElts != NumMaskElts;
2020 }
2021
2022 /// Return true if this shuffle returns a vector with a greater number of
2023 /// elements than its source vectors.
2024 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2025 bool increasesLength() const {
2026 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2027 ->getElementCount()
2028 .getKnownMinValue();
2029 unsigned NumMaskElts = ShuffleMask.size();
2030 return NumSourceElts < NumMaskElts;
2031 }
2032
2033 /// Return true if this shuffle mask chooses elements from exactly one source
2034 /// vector.
2035 /// Example: <7,5,undef,7>
2036 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2037 /// length as the mask.
2038 LLVM_ABI static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2039 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2040 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2041 SmallVector<int, 16> MaskAsInts;
2042 getShuffleMask(Mask, MaskAsInts);
2043 return isSingleSourceMask(MaskAsInts, NumSrcElts);
2044 }
2045
2046 /// Return true if this shuffle chooses elements from exactly one source
2047 /// vector without changing the length of that vector.
2048 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2049 /// TODO: Optionally allow length-changing shuffles.
2050 bool isSingleSource() const {
2051 return !changesLength() &&
2052 isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2053 }
2054
2055 /// Return true if this shuffle mask chooses elements from exactly one source
2056 /// vector without lane crossings. A shuffle using this mask is not
2057 /// necessarily a no-op because it may change the number of elements from its
2058 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2059 /// Example: <undef,undef,2,3>
2060 LLVM_ABI static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2061 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2062 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2063
2064 // Not possible to express a shuffle mask for a scalable vector for this
2065 // case.
2066 if (isa<ScalableVectorType>(Mask->getType()))
2067 return false;
2068
2069 SmallVector<int, 16> MaskAsInts;
2070 getShuffleMask(Mask, MaskAsInts);
2071 return isIdentityMask(MaskAsInts, NumSrcElts);
2072 }
2073
2074 /// Return true if this shuffle chooses elements from exactly one source
2075 /// vector without lane crossings and does not change the number of elements
2076 /// from its input vectors.
2077 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2078 bool isIdentity() const {
2079 // Not possible to express a shuffle mask for a scalable vector for this
2080 // case.
2081 if (isa<ScalableVectorType>(getType()))
2082 return false;
2083
2084 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2085 }
2086
2087 /// Return true if this shuffle lengthens exactly one source vector with
2088 /// undefs in the high elements.
2089 LLVM_ABI bool isIdentityWithPadding() const;
2090
2091 /// Return true if this shuffle extracts the first N elements of exactly one
2092 /// source vector.
2093 LLVM_ABI bool isIdentityWithExtract() const;
2094
2095 /// Return true if this shuffle concatenates its 2 source vectors. This
2096 /// returns false if either input is undefined. In that case, the shuffle is
2097 /// is better classified as an identity with padding operation.
2098 LLVM_ABI bool isConcat() const;
2099
2100 /// Return true if this shuffle mask chooses elements from its source vectors
2101 /// without lane crossings. A shuffle using this mask would be
2102 /// equivalent to a vector select with a constant condition operand.
2103 /// Example: <4,1,6,undef>
2104 /// This returns false if the mask does not choose from both input vectors.
2105 /// In that case, the shuffle is better classified as an identity shuffle.
2106 /// This assumes that vector operands are the same length as the mask
2107 /// (a length-changing shuffle can never be equivalent to a vector select).
2108 LLVM_ABI static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2109 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2110 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2111 SmallVector<int, 16> MaskAsInts;
2112 getShuffleMask(Mask, MaskAsInts);
2113 return isSelectMask(MaskAsInts, NumSrcElts);
2114 }
2115
2116 /// Return true if this shuffle chooses elements from its source vectors
2117 /// without lane crossings and all operands have the same number of elements.
2118 /// In other words, this shuffle is equivalent to a vector select with a
2119 /// constant condition operand.
2120 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2121 /// This returns false if the mask does not choose from both input vectors.
2122 /// In that case, the shuffle is better classified as an identity shuffle.
2123 /// TODO: Optionally allow length-changing shuffles.
2124 bool isSelect() const {
2125 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2126 }
2127
2128 /// Return true if this shuffle mask swaps the order of elements from exactly
2129 /// one source vector.
2130 /// Example: <7,6,undef,4>
2131 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2132 /// length as the mask.
2133 LLVM_ABI static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2134 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2135 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2136 SmallVector<int, 16> MaskAsInts;
2137 getShuffleMask(Mask, MaskAsInts);
2138 return isReverseMask(MaskAsInts, NumSrcElts);
2139 }
2140
2141 /// Return true if this shuffle swaps the order of elements from exactly
2142 /// one source vector.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2144 /// TODO: Optionally allow length-changing shuffles.
2145 bool isReverse() const {
2146 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2147 }
2148
2149 /// Return true if this shuffle mask chooses all elements with the same value
2150 /// as the first element of exactly one source vector.
2151 /// Example: <4,undef,undef,4>
2152 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2153 /// length as the mask.
2154 LLVM_ABI static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2155 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2156 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2157 SmallVector<int, 16> MaskAsInts;
2158 getShuffleMask(Mask, MaskAsInts);
2159 return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2160 }
2161
2162 /// Return true if all elements of this shuffle are the same value as the
2163 /// first element of exactly one source vector without changing the length
2164 /// of that vector.
2165 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2166 /// TODO: Optionally allow length-changing shuffles.
2167 /// TODO: Optionally allow splats from other elements.
2168 bool isZeroEltSplat() const {
2169 return !changesLength() &&
2170 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2171 }
2172
2173 /// Return true if this shuffle mask is a transpose mask.
2174 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2175 /// even- or odd-numbered vector elements from two n-dimensional source
2176 /// vectors and write each result into consecutive elements of an
2177 /// n-dimensional destination vector. Two shuffles are necessary to complete
2178 /// the transpose, one for the even elements and another for the odd elements.
2179 /// This description closely follows how the TRN1 and TRN2 AArch64
2180 /// instructions operate.
2181 ///
2182 /// For example, a simple 2x2 matrix can be transposed with:
2183 ///
2184 /// ; Original matrix
2185 /// m0 = < a, b >
2186 /// m1 = < c, d >
2187 ///
2188 /// ; Transposed matrix
2189 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2190 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2191 ///
2192 /// For matrices having greater than n columns, the resulting nx2 transposed
2193 /// matrix is stored in two result vectors such that one vector contains
2194 /// interleaved elements from all the even-numbered rows and the other vector
2195 /// contains interleaved elements from all the odd-numbered rows. For example,
2196 /// a 2x4 matrix can be transposed with:
2197 ///
2198 /// ; Original matrix
2199 /// m0 = < a, b, c, d >
2200 /// m1 = < e, f, g, h >
2201 ///
2202 /// ; Transposed matrix
2203 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2204 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2205 LLVM_ABI static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2206 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2207 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2208 SmallVector<int, 16> MaskAsInts;
2209 getShuffleMask(Mask, MaskAsInts);
2210 return isTransposeMask(MaskAsInts, NumSrcElts);
2211 }
2212
2213 /// Return true if this shuffle transposes the elements of its inputs without
2214 /// changing the length of the vectors. This operation may also be known as a
2215 /// merge or interleave. See the description for isTransposeMask() for the
2216 /// exact specification.
2217 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2218 bool isTranspose() const {
2219 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2220 }
2221
2222 /// Return true if this shuffle mask is a splice mask, concatenating the two
2223 /// inputs together and then extracts an original width vector starting from
2224 /// the splice index.
2225 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2226 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2227 /// length as the mask.
2228 LLVM_ABI static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2229 int &Index);
2230 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2231 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2232 SmallVector<int, 16> MaskAsInts;
2233 getShuffleMask(Mask, MaskAsInts);
2234 return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2235 }
2236
2237 /// Return true if this shuffle splices two inputs without changing the length
2238 /// of the vectors. This operation concatenates the two inputs together and
2239 /// then extracts an original width vector starting from the splice index.
2240 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2241 bool isSplice(int &Index) const {
2242 return !changesLength() &&
2243 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2244 }
2245
2246 /// Return true if this shuffle mask is an extract subvector mask.
2247 /// A valid extract subvector mask returns a smaller vector from a single
2248 /// source operand. The base extraction index is returned as well.
2249 LLVM_ABI static bool isExtractSubvectorMask(ArrayRef<int> Mask,
2250 int NumSrcElts, int &Index);
2251 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2252 int &Index) {
2253 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2254 // Not possible to express a shuffle mask for a scalable vector for this
2255 // case.
2256 if (isa<ScalableVectorType>(Mask->getType()))
2257 return false;
2258 SmallVector<int, 16> MaskAsInts;
2259 getShuffleMask(Mask, MaskAsInts);
2260 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2261 }
2262
2263 /// Return true if this shuffle mask is an extract subvector mask.
2265 // Not possible to express a shuffle mask for a scalable vector for this
2266 // case.
2267 if (isa<ScalableVectorType>(getType()))
2268 return false;
2269
2270 int NumSrcElts =
2271 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2272 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2273 }
2274
2275 /// Return true if this shuffle mask is an insert subvector mask.
2276 /// A valid insert subvector mask inserts the lowest elements of a second
2277 /// source operand into an in-place first source operand.
2278 /// Both the sub vector width and the insertion index is returned.
2279 LLVM_ABI static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2280 int &NumSubElts, int &Index);
2281 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2282 int &NumSubElts, int &Index) {
2283 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2284 // Not possible to express a shuffle mask for a scalable vector for this
2285 // case.
2286 if (isa<ScalableVectorType>(Mask->getType()))
2287 return false;
2288 SmallVector<int, 16> MaskAsInts;
2289 getShuffleMask(Mask, MaskAsInts);
2290 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2291 }
2292
2293 /// Return true if this shuffle mask is an insert subvector mask.
2294 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2295 // Not possible to express a shuffle mask for a scalable vector for this
2296 // case.
2297 if (isa<ScalableVectorType>(getType()))
2298 return false;
2299
2300 int NumSrcElts =
2301 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2302 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2303 }
2304
2305 /// Return true if this shuffle mask replicates each of the \p VF elements
2306 /// in a vector \p ReplicationFactor times.
2307 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2308 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2309 LLVM_ABI static bool isReplicationMask(ArrayRef<int> Mask,
2310 int &ReplicationFactor, int &VF);
2311 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2312 int &VF) {
2313 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2314 // Not possible to express a shuffle mask for a scalable vector for this
2315 // case.
2316 if (isa<ScalableVectorType>(Mask->getType()))
2317 return false;
2318 SmallVector<int, 16> MaskAsInts;
2319 getShuffleMask(Mask, MaskAsInts);
2320 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2321 }
2322
2323 /// Return true if this shuffle mask is a replication mask.
2324 LLVM_ABI bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2325
2326 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2327 /// i.e. each index between [0..VF) is used exactly once in each submask of
2328 /// size VF.
2329 /// For example, the mask for \p VF=4 is:
2330 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2331 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2332 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2333 /// element 3 is used twice in the second submask
2334 /// (3,3,1,0) and index 2 is not used at all.
2335 LLVM_ABI static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2336
2337 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2338 /// mask.
2339 LLVM_ABI bool isOneUseSingleSourceMask(int VF) const;
2340
2341 /// Change values in a shuffle permute mask assuming the two vector operands
2342 /// of length InVecNumElts have swapped position.
2344 unsigned InVecNumElts) {
2345 for (int &Idx : Mask) {
2346 if (Idx == -1)
2347 continue;
2348 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2349 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2350 "shufflevector mask index out of range");
2351 }
2352 }
2353
2354 /// Return if this shuffle interleaves its two input vectors together.
2355 LLVM_ABI bool isInterleave(unsigned Factor);
2356
2357 /// Return true if the mask interleaves one or more input vectors together.
2358 ///
2359 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2360 /// E.g. For a Factor of 2 (LaneLen=4):
2361 /// <0, 4, 1, 5, 2, 6, 3, 7>
2362 /// E.g. For a Factor of 3 (LaneLen=4):
2363 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2364 /// E.g. For a Factor of 4 (LaneLen=2):
2365 /// <0, 2, 6, 4, 1, 3, 7, 5>
2366 ///
2367 /// NumInputElts is the total number of elements in the input vectors.
2368 ///
2369 /// StartIndexes are the first indexes of each vector being interleaved,
2370 /// substituting any indexes that were undef
2371 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2372 ///
2373 /// Note that this does not check if the input vectors are consecutive:
2374 /// It will return true for masks such as
2375 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2376 LLVM_ABI static bool
2377 isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2378 SmallVectorImpl<unsigned> &StartIndexes);
2379 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2380 unsigned NumInputElts) {
2381 SmallVector<unsigned, 8> StartIndexes;
2382 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2383 }
2384
2385 /// Check if the mask is a DE-interleave mask of the given factor
2386 /// \p Factor like:
2387 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2388 LLVM_ABI static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2389 unsigned Factor,
2390 unsigned &Index);
2391 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2392 unsigned Unused;
2393 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused);
2394 }
2395
2396 /// Checks if the shuffle is a bit rotation of the first operand across
2397 /// multiple subelements, e.g:
2398 ///
2399 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2400 ///
2401 /// could be expressed as
2402 ///
2403 /// rotl <4 x i16> %a, 8
2404 ///
2405 /// If it can be expressed as a rotation, returns the number of subelements to
2406 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2407 LLVM_ABI static bool isBitRotateMask(ArrayRef<int> Mask,
2408 unsigned EltSizeInBits,
2409 unsigned MinSubElts, unsigned MaxSubElts,
2410 unsigned &NumSubElts,
2411 unsigned &RotateAmt);
2412
2413 // Methods for support type inquiry through isa, cast, and dyn_cast:
2414 static bool classof(const Instruction *I) {
2415 return I->getOpcode() == Instruction::ShuffleVector;
2416 }
2417 static bool classof(const Value *V) {
2418 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2419 }
2420};
2421
2422template <>
2424 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2425
2427
2428//===----------------------------------------------------------------------===//
2429// ExtractValueInst Class
2430//===----------------------------------------------------------------------===//
2431
2432/// This instruction extracts a struct member or array
2433/// element value from an aggregate value.
2434///
2437
2439
2440 /// Constructors - Create a extractvalue instruction with a base aggregate
2441 /// value and a list of indices. The first and second ctor can optionally
2442 /// insert before an existing instruction, the third appends the new
2443 /// instruction to the specified BasicBlock.
2444 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2445 const Twine &NameStr, InsertPosition InsertBefore);
2446
2447 LLVM_ABI void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2448
2449protected:
2450 // Note: Instruction needs to be a friend here to call cloneImpl.
2451 friend class Instruction;
2452
2453 LLVM_ABI ExtractValueInst *cloneImpl() const;
2454
2455public:
2457 const Twine &NameStr = "",
2458 InsertPosition InsertBefore = nullptr) {
2459 return new
2460 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2461 }
2462
2463 /// Returns the type of the element that would be extracted
2464 /// with an extractvalue instruction with the specified parameters.
2465 ///
2466 /// Null is returned if the indices are invalid for the specified type.
2467 LLVM_ABI static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2468
2469 using idx_iterator = const unsigned*;
2470
2471 inline idx_iterator idx_begin() const { return Indices.begin(); }
2472 inline idx_iterator idx_end() const { return Indices.end(); }
2474 return make_range(idx_begin(), idx_end());
2475 }
2476
2478 return getOperand(0);
2479 }
2481 return getOperand(0);
2482 }
2483 static unsigned getAggregateOperandIndex() {
2484 return 0U; // get index for modifying correct operand
2485 }
2486
2488 return Indices;
2489 }
2490
2491 unsigned getNumIndices() const {
2492 return (unsigned)Indices.size();
2493 }
2494
2495 bool hasIndices() const {
2496 return true;
2497 }
2498
2499 // Methods for support type inquiry through isa, cast, and dyn_cast:
2500 static bool classof(const Instruction *I) {
2501 return I->getOpcode() == Instruction::ExtractValue;
2502 }
2503 static bool classof(const Value *V) {
2504 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2505 }
2506};
2507
2508ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2509 const Twine &NameStr,
2510 InsertPosition InsertBefore)
2511 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2512 ExtractValue, Agg, InsertBefore) {
2513 init(Idxs, NameStr);
2514}
2515
2516//===----------------------------------------------------------------------===//
2517// InsertValueInst Class
2518//===----------------------------------------------------------------------===//
2519
2520/// This instruction inserts a struct field of array element
2521/// value into an aggregate value.
2522///
2524 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
2525
2527
2528 InsertValueInst(const InsertValueInst &IVI);
2529
2530 /// Constructors - Create a insertvalue instruction with a base aggregate
2531 /// value, a value to insert, and a list of indices. The first and second ctor
2532 /// can optionally insert before an existing instruction, the third appends
2533 /// the new instruction to the specified BasicBlock.
2534 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2535 const Twine &NameStr, InsertPosition InsertBefore);
2536
2537 /// Constructors - These three constructors are convenience methods because
2538 /// one and two index insertvalue instructions are so common.
2539 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2540 const Twine &NameStr = "",
2541 InsertPosition InsertBefore = nullptr);
2542
2543 LLVM_ABI void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2544 const Twine &NameStr);
2545
2546protected:
2547 // Note: Instruction needs to be a friend here to call cloneImpl.
2548 friend class Instruction;
2549
2551
2552public:
2553 // allocate space for exactly two operands
2554 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2555 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2556
2557 static InsertValueInst *Create(Value *Agg, Value *Val,
2558 ArrayRef<unsigned> Idxs,
2559 const Twine &NameStr = "",
2560 InsertPosition InsertBefore = nullptr) {
2561 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2562 }
2563
2564 /// Transparently provide more efficient getOperand methods.
2566
2567 using idx_iterator = const unsigned*;
2568
2569 inline idx_iterator idx_begin() const { return Indices.begin(); }
2570 inline idx_iterator idx_end() const { return Indices.end(); }
2572 return make_range(idx_begin(), idx_end());
2573 }
2574
2576 return getOperand(0);
2577 }
2579 return getOperand(0);
2580 }
2581 static unsigned getAggregateOperandIndex() {
2582 return 0U; // get index for modifying correct operand
2583 }
2584
2586 return getOperand(1);
2587 }
2589 return getOperand(1);
2590 }
2592 return 1U; // get index for modifying correct operand
2593 }
2594
2596 return Indices;
2597 }
2598
2599 unsigned getNumIndices() const {
2600 return (unsigned)Indices.size();
2601 }
2602
2603 bool hasIndices() const {
2604 return true;
2605 }
2606
2607 // Methods for support type inquiry through isa, cast, and dyn_cast:
2608 static bool classof(const Instruction *I) {
2609 return I->getOpcode() == Instruction::InsertValue;
2610 }
2611 static bool classof(const Value *V) {
2612 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2613 }
2614};
2615
2616template <>
2618 public FixedNumOperandTraits<InsertValueInst, 2> {
2619};
2620
2621InsertValueInst::InsertValueInst(Value *Agg, Value *Val,
2622 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2623 InsertPosition InsertBefore)
2624 : Instruction(Agg->getType(), InsertValue, AllocMarker, InsertBefore) {
2625 init(Agg, Val, Idxs, NameStr);
2626}
2627
2628DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2629
2630//===----------------------------------------------------------------------===//
2631// PHINode Class
2632//===----------------------------------------------------------------------===//
2633
2634// PHINode - The PHINode class is used to represent the magical mystical PHI
2635// node, that can not exist in nature, but can be synthesized in a computer
2636// scientist's overactive imagination.
2637//
2638class PHINode : public Instruction {
2639 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2640
2641 /// The number of operands actually allocated. NumOperands is
2642 /// the number actually in use.
2643 unsigned ReservedSpace;
2644
2645 PHINode(const PHINode &PN);
2646
2647 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2648 const Twine &NameStr = "",
2649 InsertPosition InsertBefore = nullptr)
2650 : Instruction(Ty, Instruction::PHI, AllocMarker, InsertBefore),
2651 ReservedSpace(NumReservedValues) {
2652 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2653 setName(NameStr);
2654 allocHungoffUses(ReservedSpace);
2655 }
2656
2657protected:
2658 // Note: Instruction needs to be a friend here to call cloneImpl.
2659 friend class Instruction;
2660
2661 LLVM_ABI PHINode *cloneImpl() const;
2662
2663 // allocHungoffUses - this is more complicated than the generic
2664 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2665 // values and pointers to the incoming blocks, all in one allocation.
2666 void allocHungoffUses(unsigned N) {
2667 User::allocHungoffUses(N, /* IsPhi */ true);
2668 }
2669
2670public:
2671 /// Constructors - NumReservedValues is a hint for the number of incoming
2672 /// edges that this phi node will have (use 0 if you really have no idea).
2673 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2674 const Twine &NameStr = "",
2675 InsertPosition InsertBefore = nullptr) {
2676 return new (AllocMarker)
2677 PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2678 }
2679
2680 /// Provide fast operand accessors
2682
2683 // Block iterator interface. This provides access to the list of incoming
2684 // basic blocks, which parallels the list of incoming values.
2685 // Please note that we are not providing non-const iterators for blocks to
2686 // force all updates go through an interface function.
2687
2690
2692 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2693 }
2694
2696 return block_begin() + getNumOperands();
2697 }
2698
2700 return make_range(block_begin(), block_end());
2701 }
2702
2703 op_range incoming_values() { return operands(); }
2704
2705 const_op_range incoming_values() const { return operands(); }
2706
2707 /// Return the number of incoming edges
2708 ///
2709 unsigned getNumIncomingValues() const { return getNumOperands(); }
2710
2711 /// Return incoming value number x
2712 ///
2713 Value *getIncomingValue(unsigned i) const {
2714 return getOperand(i);
2715 }
2716 void setIncomingValue(unsigned i, Value *V) {
2717 assert(V && "PHI node got a null value!");
2718 assert(getType() == V->getType() &&
2719 "All operands to PHI node must be the same type as the PHI node!");
2720 setOperand(i, V);
2721 }
2722
2723 static unsigned getOperandNumForIncomingValue(unsigned i) {
2724 return i;
2725 }
2726
2727 static unsigned getIncomingValueNumForOperand(unsigned i) {
2728 return i;
2729 }
2730
2731 /// Return incoming basic block number @p i.
2732 ///
2733 BasicBlock *getIncomingBlock(unsigned i) const {
2734 return block_begin()[i];
2735 }
2736
2737 /// Return incoming basic block corresponding
2738 /// to an operand of the PHI.
2739 ///
2741 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2742 return getIncomingBlock(unsigned(&U - op_begin()));
2743 }
2744
2745 /// Return incoming basic block corresponding
2746 /// to value use iterator.
2747 ///
2749 return getIncomingBlock(I.getUse());
2750 }
2751
2752 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2753 const_cast<block_iterator>(block_begin())[i] = BB;
2754 }
2755
2756 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2757 /// of this PHINode, starting at \p ToIdx.
2759 uint32_t ToIdx = 0) {
2760 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2761 }
2762
2763 /// Replace every incoming basic block \p Old to basic block \p New.
2765 assert(New && Old && "PHI node got a null basic block!");
2766 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2767 if (getIncomingBlock(Op) == Old)
2768 setIncomingBlock(Op, New);
2769 }
2770
2771 /// Add an incoming value to the end of the PHI list
2772 ///
2774 if (getNumOperands() == ReservedSpace)
2775 growOperands(); // Get more space!
2776 // Initialize some new operands.
2777 setNumHungOffUseOperands(getNumOperands() + 1);
2778 setIncomingValue(getNumOperands() - 1, V);
2779 setIncomingBlock(getNumOperands() - 1, BB);
2780 }
2781
2782 /// Remove an incoming value. This is useful if a
2783 /// predecessor basic block is deleted. The value removed is returned.
2784 ///
2785 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2786 /// is true), the PHI node is destroyed and any uses of it are replaced with
2787 /// dummy values. The only time there should be zero incoming values to a PHI
2788 /// node is when the block is dead, so this strategy is sound.
2789 ///
2790 LLVM_ABI Value *removeIncomingValue(unsigned Idx,
2791 bool DeletePHIIfEmpty = true);
2792
2793 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2794 int Idx = getBasicBlockIndex(BB);
2795 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2796 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2797 }
2798
2799 /// Remove all incoming values for which the predicate returns true.
2800 /// The predicate accepts the incoming value index.
2801 LLVM_ABI void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2802 bool DeletePHIIfEmpty = true);
2803
2804 /// Return the first index of the specified basic
2805 /// block in the value list for this PHI. Returns -1 if no instance.
2806 ///
2807 int getBasicBlockIndex(const BasicBlock *BB) const {
2808 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2809 if (block_begin()[i] == BB)
2810 return i;
2811 return -1;
2812 }
2813
2815 int Idx = getBasicBlockIndex(BB);
2816 assert(Idx >= 0 && "Invalid basic block argument!");
2817 return getIncomingValue(Idx);
2818 }
2819
2820 /// Set every incoming value(s) for block \p BB to \p V.
2822 assert(BB && "PHI node got a null basic block!");
2823 bool Found = false;
2824 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2825 if (getIncomingBlock(Op) == BB) {
2826 Found = true;
2827 setIncomingValue(Op, V);
2828 }
2829 (void)Found;
2830 assert(Found && "Invalid basic block argument to set!");
2831 }
2832
2833 /// If the specified PHI node always merges together the
2834 /// same value, return the value, otherwise return null.
2835 LLVM_ABI Value *hasConstantValue() const;
2836
2837 /// Whether the specified PHI node always merges
2838 /// together the same value, assuming undefs are equal to a unique
2839 /// non-undef value.
2840 LLVM_ABI bool hasConstantOrUndefValue() const;
2841
2842 /// If the PHI node is complete which means all of its parent's predecessors
2843 /// have incoming value in this PHI, return true, otherwise return false.
2844 bool isComplete() const {
2846 [this](const BasicBlock *Pred) {
2847 return getBasicBlockIndex(Pred) >= 0;
2848 });
2849 }
2850
2851 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2852 static bool classof(const Instruction *I) {
2853 return I->getOpcode() == Instruction::PHI;
2854 }
2855 static bool classof(const Value *V) {
2856 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2857 }
2858
2859private:
2860 LLVM_ABI void growOperands();
2861};
2862
2863template <> struct OperandTraits<PHINode> : public HungoffOperandTraits {};
2864
2866
2867//===----------------------------------------------------------------------===//
2868// LandingPadInst Class
2869//===----------------------------------------------------------------------===//
2870
2871//===---------------------------------------------------------------------------
2872/// The landingpad instruction holds all of the information
2873/// necessary to generate correct exception handling. The landingpad instruction
2874/// cannot be moved from the top of a landing pad block, which itself is
2875/// accessible only from the 'unwind' edge of an invoke. This uses the
2876/// SubclassData field in Value to store whether or not the landingpad is a
2877/// cleanup.
2878///
2880 using CleanupField = BoolBitfieldElementT<0>;
2881
2882 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2883
2884 /// The number of operands actually allocated. NumOperands is
2885 /// the number actually in use.
2886 unsigned ReservedSpace;
2887
2888 LandingPadInst(const LandingPadInst &LP);
2889
2890public:
2892
2893private:
2894 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2895 const Twine &NameStr, InsertPosition InsertBefore);
2896
2897 // Allocate space for exactly zero operands.
2898 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2899
2900 LLVM_ABI void growOperands(unsigned Size);
2901 void init(unsigned NumReservedValues, const Twine &NameStr);
2902
2903protected:
2904 // Note: Instruction needs to be a friend here to call cloneImpl.
2905 friend class Instruction;
2906
2907 LLVM_ABI LandingPadInst *cloneImpl() const;
2908
2909public:
2910 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2911
2912 /// Constructors - NumReservedClauses is a hint for the number of incoming
2913 /// clauses that this landingpad will have (use 0 if you really have no idea).
2914 LLVM_ABI static LandingPadInst *Create(Type *RetTy,
2915 unsigned NumReservedClauses,
2916 const Twine &NameStr = "",
2917 InsertPosition InsertBefore = nullptr);
2918
2919 /// Provide fast operand accessors
2921
2922 /// Return 'true' if this landingpad instruction is a
2923 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2924 /// doesn't catch the exception.
2925 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2926
2927 /// Indicate that this landingpad instruction is a cleanup.
2928 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2929
2930 /// Add a catch or filter clause to the landing pad.
2931 LLVM_ABI void addClause(Constant *ClauseVal);
2932
2933 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2934 /// determine what type of clause this is.
2935 Constant *getClause(unsigned Idx) const {
2936 return cast<Constant>(getOperandList()[Idx]);
2937 }
2938
2939 /// Return 'true' if the clause and index Idx is a catch clause.
2940 bool isCatch(unsigned Idx) const {
2941 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2942 }
2943
2944 /// Return 'true' if the clause and index Idx is a filter clause.
2945 bool isFilter(unsigned Idx) const {
2946 return isa<ArrayType>(getOperandList()[Idx]->getType());
2947 }
2948
2949 /// Get the number of clauses for this landing pad.
2950 unsigned getNumClauses() const { return getNumOperands(); }
2951
2952 /// Grow the size of the operand list to accommodate the new
2953 /// number of clauses.
2954 void reserveClauses(unsigned Size) { growOperands(Size); }
2955
2956 // Methods for support type inquiry through isa, cast, and dyn_cast:
2957 static bool classof(const Instruction *I) {
2958 return I->getOpcode() == Instruction::LandingPad;
2959 }
2960 static bool classof(const Value *V) {
2961 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2962 }
2963};
2964
2965template <>
2967
2969
2970//===----------------------------------------------------------------------===//
2971// ReturnInst Class
2972//===----------------------------------------------------------------------===//
2973
2974//===---------------------------------------------------------------------------
2975/// Return a value (possibly void), from a function. Execution
2976/// does not continue in this function any longer.
2977///
2978class ReturnInst : public Instruction {
2980
2981private:
2982 // ReturnInst constructors:
2983 // ReturnInst() - 'ret void' instruction
2984 // ReturnInst( null) - 'ret void' instruction
2985 // ReturnInst(Value* X) - 'ret X' instruction
2986 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
2987 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
2988 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2989 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2990 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2991 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2992 //
2993 // NOTE: If the Value* passed is of type void then the constructor behaves as
2994 // if it was passed NULL.
2995 LLVM_ABI explicit ReturnInst(LLVMContext &C, Value *retVal,
2997 InsertPosition InsertBefore);
2998
2999protected:
3000 // Note: Instruction needs to be a friend here to call cloneImpl.
3001 friend class Instruction;
3002
3003 LLVM_ABI ReturnInst *cloneImpl() const;
3004
3005public:
3006 static ReturnInst *Create(LLVMContext &C, Value *retVal = nullptr,
3007 InsertPosition InsertBefore = nullptr) {
3008 IntrusiveOperandsAllocMarker AllocMarker{retVal ? 1U : 0U};
3009 return new (AllocMarker) ReturnInst(C, retVal, AllocMarker, InsertBefore);
3010 }
3011
3012 static ReturnInst *Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3013 IntrusiveOperandsAllocMarker AllocMarker{0};
3014 return new (AllocMarker) ReturnInst(C, nullptr, AllocMarker, InsertAtEnd);
3015 }
3016
3017 /// Provide fast operand accessors
3019
3020 /// Convenience accessor. Returns null if there is no return value.
3022 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3023 }
3024
3025 unsigned getNumSuccessors() const { return 0; }
3026
3027 // Methods for support type inquiry through isa, cast, and dyn_cast:
3028 static bool classof(const Instruction *I) {
3029 return (I->getOpcode() == Instruction::Ret);
3030 }
3031 static bool classof(const Value *V) {
3032 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3033 }
3034
3035private:
3036 BasicBlock *getSuccessor(unsigned idx) const {
3037 llvm_unreachable("ReturnInst has no successors!");
3038 }
3039
3040 void setSuccessor(unsigned idx, BasicBlock *B) {
3041 llvm_unreachable("ReturnInst has no successors!");
3042 }
3043};
3044
3045template <>
3046struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {};
3047
3049
3050//===----------------------------------------------------------------------===//
3051// BranchInst Class
3052//===----------------------------------------------------------------------===//
3053
3054//===---------------------------------------------------------------------------
3055/// Conditional or Unconditional Branch instruction.
3056///
3057class BranchInst : public Instruction {
3058 /// Ops list - Branches are strange. The operands are ordered:
3059 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3060 /// they don't have to check for cond/uncond branchness. These are mostly
3061 /// accessed relative from op_end().
3063 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3064 // BranchInst(BB *B) - 'br B'
3065 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3066 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3067 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3068 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3069 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3070 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3071 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3073 InsertPosition InsertBefore);
3074 LLVM_ABI BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3075 AllocInfo AllocInfo, InsertPosition InsertBefore);
3076
3077 void AssertOK();
3078
3079protected:
3080 // Note: Instruction needs to be a friend here to call cloneImpl.
3081 friend class Instruction;
3082
3083 LLVM_ABI BranchInst *cloneImpl() const;
3084
3085public:
3086 /// Iterator type that casts an operand to a basic block.
3087 ///
3088 /// This only makes sense because the successors are stored as adjacent
3089 /// operands for branch instructions.
3091 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3092 std::random_access_iterator_tag, BasicBlock *,
3093 ptrdiff_t, BasicBlock *, BasicBlock *> {
3095
3096 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3097 BasicBlock *operator->() const { return operator*(); }
3098 };
3099
3100 /// The const version of `succ_op_iterator`.
3102 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3103 std::random_access_iterator_tag,
3104 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3105 const BasicBlock *> {
3108
3109 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3110 const BasicBlock *operator->() const { return operator*(); }
3111 };
3112
3114 InsertPosition InsertBefore = nullptr) {
3115 IntrusiveOperandsAllocMarker AllocMarker{1};
3116 return new (AllocMarker) BranchInst(IfTrue, AllocMarker, InsertBefore);
3117 }
3118
3119 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3120 Value *Cond,
3121 InsertPosition InsertBefore = nullptr) {
3122 IntrusiveOperandsAllocMarker AllocMarker{3};
3123 return new (AllocMarker)
3124 BranchInst(IfTrue, IfFalse, Cond, AllocMarker, InsertBefore);
3125 }
3126
3127 /// Transparently provide more efficient getOperand methods.
3129
3130 bool isUnconditional() const { return getNumOperands() == 1; }
3131 bool isConditional() const { return getNumOperands() == 3; }
3132
3134 assert(isConditional() && "Cannot get condition of an uncond branch!");
3135 return Op<-3>();
3136 }
3137
3139 assert(isConditional() && "Cannot set condition of unconditional branch!");
3140 Op<-3>() = V;
3141 }
3142
3143 unsigned getNumSuccessors() const { return 1+isConditional(); }
3144
3145 BasicBlock *getSuccessor(unsigned i) const {
3146 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3147 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3148 }
3149
3150 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3151 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3152 *(&Op<-1>() - idx) = NewSucc;
3153 }
3154
3155 /// Swap the successors of this branch instruction.
3156 ///
3157 /// Swaps the successors of the branch instruction. This also swaps any
3158 /// branch weight metadata associated with the instruction so that it
3159 /// continues to map correctly to each operand.
3160 LLVM_ABI void swapSuccessors();
3161
3163 return make_range(
3164 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3165 succ_op_iterator(value_op_end()));
3166 }
3167
3170 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3171 const_succ_op_iterator(value_op_end()));
3172 }
3173
3174 // Methods for support type inquiry through isa, cast, and dyn_cast:
3175 static bool classof(const Instruction *I) {
3176 return (I->getOpcode() == Instruction::Br);
3177 }
3178 static bool classof(const Value *V) {
3179 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3180 }
3181};
3182
3183template <>
3184struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst> {};
3185
3187
3188//===----------------------------------------------------------------------===//
3189// SwitchInst Class
3190//===----------------------------------------------------------------------===//
3191
3192//===---------------------------------------------------------------------------
3193/// Multiway switch
3194///
3195class SwitchInst : public Instruction {
3196 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3197
3198 unsigned ReservedSpace;
3199
3200 // Operand[0] = Value to switch on
3201 // Operand[1] = Default basic block destination
3202 // Operand[2n ] = Value to match
3203 // Operand[2n+1] = BasicBlock to go to on match
3204 SwitchInst(const SwitchInst &SI);
3205
3206 /// Create a new switch instruction, specifying a value to switch on and a
3207 /// default destination. The number of additional cases can be specified here
3208 /// to make memory allocation more efficient. This constructor can also
3209 /// auto-insert before another instruction.
3210 LLVM_ABI SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3211 InsertPosition InsertBefore);
3212
3213 // allocate space for exactly zero operands
3214 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3215
3216 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3217 void growOperands();
3218
3219protected:
3220 // Note: Instruction needs to be a friend here to call cloneImpl.
3221 friend class Instruction;
3222
3223 LLVM_ABI SwitchInst *cloneImpl() const;
3224
3225public:
3226 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3227
3228 // -2
3229 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3230
3231 template <typename CaseHandleT> class CaseIteratorImpl;
3232
3233 /// A handle to a particular switch case. It exposes a convenient interface
3234 /// to both the case value and the successor block.
3235 ///
3236 /// We define this as a template and instantiate it to form both a const and
3237 /// non-const handle.
3238 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3240 // Directly befriend both const and non-const iterators.
3241 friend class SwitchInst::CaseIteratorImpl<
3242 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3243
3244 protected:
3245 // Expose the switch type we're parameterized with to the iterator.
3246 using SwitchInstType = SwitchInstT;
3247
3248 SwitchInstT *SI;
3250
3251 CaseHandleImpl() = default;
3253
3254 public:
3255 /// Resolves case value for current case.
3256 ConstantIntT *getCaseValue() const {
3257 assert((unsigned)Index < SI->getNumCases() &&
3258 "Index out the number of cases.");
3259 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3260 }
3261
3262 /// Resolves successor for current case.
3263 BasicBlockT *getCaseSuccessor() const {
3264 assert(((unsigned)Index < SI->getNumCases() ||
3265 (unsigned)Index == DefaultPseudoIndex) &&
3266 "Index out the number of cases.");
3267 return SI->getSuccessor(getSuccessorIndex());
3268 }
3269
3270 /// Returns number of current case.
3271 unsigned getCaseIndex() const { return Index; }
3272
3273 /// Returns successor index for current case successor.
3274 unsigned getSuccessorIndex() const {
3275 assert(((unsigned)Index == DefaultPseudoIndex ||
3276 (unsigned)Index < SI->getNumCases()) &&
3277 "Index out the number of cases.");
3278 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3279 }
3280
3281 bool operator==(const CaseHandleImpl &RHS) const {
3282 assert(SI == RHS.SI && "Incompatible operators.");
3283 return Index == RHS.Index;
3284 }
3285 };
3286
3289
3291 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3293
3294 public:
3296
3297 /// Sets the new value for current case.
3298 void setValue(ConstantInt *V) const {
3299 assert((unsigned)Index < SI->getNumCases() &&
3300 "Index out the number of cases.");
3301 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3302 }
3303
3304 /// Sets the new successor for current case.
3305 void setSuccessor(BasicBlock *S) const {
3306 SI->setSuccessor(getSuccessorIndex(), S);
3307 }
3308 };
3309
3310 template <typename CaseHandleT>
3312 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3313 std::random_access_iterator_tag,
3314 const CaseHandleT> {
3315 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3316
3317 CaseHandleT Case;
3318
3319 public:
3320 /// Default constructed iterator is in an invalid state until assigned to
3321 /// a case for a particular switch.
3322 CaseIteratorImpl() = default;
3323
3324 /// Initializes case iterator for given SwitchInst and for given
3325 /// case number.
3326 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3327
3328 /// Initializes case iterator for given SwitchInst and for given
3329 /// successor index.
3331 unsigned SuccessorIndex) {
3332 assert(SuccessorIndex < SI->getNumSuccessors() &&
3333 "Successor index # out of range!");
3334 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3335 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3336 }
3337
3338 /// Support converting to the const variant. This will be a no-op for const
3339 /// variant.
3341 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3342 }
3343
3345 // Check index correctness after addition.
3346 // Note: Index == getNumCases() means end().
3347 assert(Case.Index + N >= 0 &&
3348 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3349 "Case.Index out the number of cases.");
3350 Case.Index += N;
3351 return *this;
3352 }
3354 // Check index correctness after subtraction.
3355 // Note: Case.Index == getNumCases() means end().
3356 assert(Case.Index - N >= 0 &&
3357 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3358 "Case.Index out the number of cases.");
3359 Case.Index -= N;
3360 return *this;
3361 }
3363 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3364 return Case.Index - RHS.Case.Index;
3365 }
3366 bool operator==(const CaseIteratorImpl &RHS) const {
3367 return Case == RHS.Case;
3368 }
3369 bool operator<(const CaseIteratorImpl &RHS) const {
3370 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3371 return Case.Index < RHS.Case.Index;
3372 }
3373 const CaseHandleT &operator*() const { return Case; }
3374 };
3375
3378
3380 unsigned NumCases,
3381 InsertPosition InsertBefore = nullptr) {
3382 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3383 }
3384
3385 /// Provide fast operand accessors
3387
3388 // Accessor Methods for Switch stmt
3389 Value *getCondition() const { return getOperand(0); }
3390 void setCondition(Value *V) { setOperand(0, V); }
3391
3393 return cast<BasicBlock>(getOperand(1));
3394 }
3395
3396 /// Returns true if the default branch must result in immediate undefined
3397 /// behavior, false otherwise.
3399 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg());
3400 }
3401
3402 void setDefaultDest(BasicBlock *DefaultCase) {
3403 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3404 }
3405
3406 /// Return the number of 'cases' in this switch instruction, excluding the
3407 /// default case.
3408 unsigned getNumCases() const {
3409 return getNumOperands()/2 - 1;
3410 }
3411
3412 /// Returns a read/write iterator that points to the first case in the
3413 /// SwitchInst.
3415 return CaseIt(this, 0);
3416 }
3417
3418 /// Returns a read-only iterator that points to the first case in the
3419 /// SwitchInst.
3421 return ConstCaseIt(this, 0);
3422 }
3423
3424 /// Returns a read/write iterator that points one past the last in the
3425 /// SwitchInst.
3427 return CaseIt(this, getNumCases());
3428 }
3429
3430 /// Returns a read-only iterator that points one past the last in the
3431 /// SwitchInst.
3433 return ConstCaseIt(this, getNumCases());
3434 }
3435
3436 /// Iteration adapter for range-for loops.
3438 return make_range(case_begin(), case_end());
3439 }
3440
3441 /// Constant iteration adapter for range-for loops.
3443 return make_range(case_begin(), case_end());
3444 }
3445
3446 /// Returns an iterator that points to the default case.
3447 /// Note: this iterator allows to resolve successor only. Attempt
3448 /// to resolve case value causes an assertion.
3449 /// Also note, that increment and decrement also causes an assertion and
3450 /// makes iterator invalid.
3452 return CaseIt(this, DefaultPseudoIndex);
3453 }
3455 return ConstCaseIt(this, DefaultPseudoIndex);
3456 }
3457
3458 /// Search all of the case values for the specified constant. If it is
3459 /// explicitly handled, return the case iterator of it, otherwise return
3460 /// default case iterator to indicate that it is handled by the default
3461 /// handler.
3463 return CaseIt(
3464 this,
3465 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3466 }
3468 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3469 return Case.getCaseValue() == C;
3470 });
3471 if (I != case_end())
3472 return I;
3473
3474 return case_default();
3475 }
3476
3477 /// Finds the unique case value for a given successor. Returns null if the
3478 /// successor is not found, not unique, or is the default case.
3480 if (BB == getDefaultDest())
3481 return nullptr;
3482
3483 ConstantInt *CI = nullptr;
3484 for (auto Case : cases()) {
3485 if (Case.getCaseSuccessor() != BB)
3486 continue;
3487
3488 if (CI)
3489 return nullptr; // Multiple cases lead to BB.
3490
3491 CI = Case.getCaseValue();
3492 }
3493
3494 return CI;
3495 }
3496
3497 /// Add an entry to the switch instruction.
3498 /// Note:
3499 /// This action invalidates case_end(). Old case_end() iterator will
3500 /// point to the added case.
3501 LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3502
3503 /// This method removes the specified case and its successor from the switch
3504 /// instruction. Note that this operation may reorder the remaining cases at
3505 /// index idx and above.
3506 /// Note:
3507 /// This action invalidates iterators for all cases following the one removed,
3508 /// including the case_end() iterator. It returns an iterator for the next
3509 /// case.
3510 LLVM_ABI CaseIt removeCase(CaseIt I);
3511
3512 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3513 BasicBlock *getSuccessor(unsigned idx) const {
3514 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3515 return cast<BasicBlock>(getOperand(idx*2+1));
3516 }
3517 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3518 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3519 setOperand(idx * 2 + 1, NewSucc);
3520 }
3521
3522 // Methods for support type inquiry through isa, cast, and dyn_cast:
3523 static bool classof(const Instruction *I) {
3524 return I->getOpcode() == Instruction::Switch;
3525 }
3526 static bool classof(const Value *V) {
3527 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3528 }
3529};
3530
3531/// A wrapper class to simplify modification of SwitchInst cases along with
3532/// their prof branch_weights metadata.
3534 SwitchInst &SI;
3535 std::optional<SmallVector<uint32_t, 8>> Weights;
3536 bool Changed = false;
3537
3538protected:
3540
3541 LLVM_ABI void init();
3542
3543public:
3544 using CaseWeightOpt = std::optional<uint32_t>;
3545 SwitchInst *operator->() { return &SI; }
3546 SwitchInst &operator*() { return SI; }
3547 operator SwitchInst *() { return &SI; }
3548
3550
3552 if (Changed)
3553 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3554 }
3555
3556 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3557 /// correspondent branch weight.
3559
3560 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3561 /// specified branch weight for the added case.
3562 LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3563
3564 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3565 /// this object to not touch the underlying SwitchInst in destructor.
3567
3568 LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3570
3572 unsigned idx);
3573};
3574
3575template <> struct OperandTraits<SwitchInst> : public HungoffOperandTraits {};
3576
3578
3579//===----------------------------------------------------------------------===//
3580// IndirectBrInst Class
3581//===----------------------------------------------------------------------===//
3582
3583//===---------------------------------------------------------------------------
3584/// Indirect Branch Instruction.
3585///
3587 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3588
3589 unsigned ReservedSpace;
3590
3591 // Operand[0] = Address to jump to
3592 // Operand[n+1] = n-th destination
3593 IndirectBrInst(const IndirectBrInst &IBI);
3594
3595 /// Create a new indirectbr instruction, specifying an
3596 /// Address to jump to. The number of expected destinations can be specified
3597 /// here to make memory allocation more efficient. This constructor can also
3598 /// autoinsert before another instruction.
3599 LLVM_ABI IndirectBrInst(Value *Address, unsigned NumDests,
3600 InsertPosition InsertBefore);
3601
3602 // allocate space for exactly zero operands
3603 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3604
3605 void init(Value *Address, unsigned NumDests);
3606 void growOperands();
3607
3608protected:
3609 // Note: Instruction needs to be a friend here to call cloneImpl.
3610 friend class Instruction;
3611
3612 LLVM_ABI IndirectBrInst *cloneImpl() const;
3613
3614public:
3615 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3616
3617 /// Iterator type that casts an operand to a basic block.
3618 ///
3619 /// This only makes sense because the successors are stored as adjacent
3620 /// operands for indirectbr instructions.
3622 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3623 std::random_access_iterator_tag, BasicBlock *,
3624 ptrdiff_t, BasicBlock *, BasicBlock *> {
3626
3627 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3628 BasicBlock *operator->() const { return operator*(); }
3629 };
3630
3631 /// The const version of `succ_op_iterator`.
3633 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3634 std::random_access_iterator_tag,
3635 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3636 const BasicBlock *> {
3639
3640 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3641 const BasicBlock *operator->() const { return operator*(); }
3642 };
3643
3644 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3645 InsertPosition InsertBefore = nullptr) {
3646 return new IndirectBrInst(Address, NumDests, InsertBefore);
3647 }
3648
3649 /// Provide fast operand accessors.
3651
3652 // Accessor Methods for IndirectBrInst instruction.
3653 Value *getAddress() { return getOperand(0); }
3654 const Value *getAddress() const { return getOperand(0); }
3655 void setAddress(Value *V) { setOperand(0, V); }
3656
3657 /// return the number of possible destinations in this
3658 /// indirectbr instruction.
3659 unsigned getNumDestinations() const { return getNumOperands()-1; }
3660
3661 /// Return the specified destination.
3662 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3663 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3664
3665 /// Add a destination.
3666 ///
3667 LLVM_ABI void addDestination(BasicBlock *Dest);
3668
3669 /// This method removes the specified successor from the
3670 /// indirectbr instruction.
3671 LLVM_ABI void removeDestination(unsigned i);
3672
3673 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3674 BasicBlock *getSuccessor(unsigned i) const {
3675 return cast<BasicBlock>(getOperand(i+1));
3676 }
3677 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3678 setOperand(i + 1, NewSucc);
3679 }
3680
3682 return make_range(succ_op_iterator(std::next(value_op_begin())),
3683 succ_op_iterator(value_op_end()));
3684 }
3685
3687 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3688 const_succ_op_iterator(value_op_end()));
3689 }
3690
3691 // Methods for support type inquiry through isa, cast, and dyn_cast:
3692 static bool classof(const Instruction *I) {
3693 return I->getOpcode() == Instruction::IndirectBr;
3694 }
3695 static bool classof(const Value *V) {
3696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3697 }
3698};
3699
3700template <>
3702
3704
3705//===----------------------------------------------------------------------===//
3706// InvokeInst Class
3707//===----------------------------------------------------------------------===//
3708
3709/// Invoke instruction. The SubclassData field is used to hold the
3710/// calling convention of the call.
3711///
3712class InvokeInst : public CallBase {
3713 /// The number of operands for this call beyond the called function,
3714 /// arguments, and operand bundles.
3715 static constexpr int NumExtraOperands = 2;
3716
3717 /// The index from the end of the operand array to the normal destination.
3718 static constexpr int NormalDestOpEndIdx = -3;
3719
3720 /// The index from the end of the operand array to the unwind destination.
3721 static constexpr int UnwindDestOpEndIdx = -2;
3722
3724
3725 /// Construct an InvokeInst given a range of arguments.
3726 ///
3727 /// Construct an InvokeInst from a range of arguments
3728 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3729 BasicBlock *IfException, ArrayRef<Value *> Args,
3731 const Twine &NameStr, InsertPosition InsertBefore);
3732
3733 LLVM_ABI void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3734 BasicBlock *IfException, ArrayRef<Value *> Args,
3735 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3736
3737 /// Compute the number of operands to allocate.
3738 static unsigned ComputeNumOperands(unsigned NumArgs,
3739 size_t NumBundleInputs = 0) {
3740 // We need one operand for the called function, plus our extra operands and
3741 // the input operand counts provided.
3742 return 1 + NumExtraOperands + NumArgs + unsigned(NumBundleInputs);
3743 }
3744
3745protected:
3746 // Note: Instruction needs to be a friend here to call cloneImpl.
3747 friend class Instruction;
3748
3749 LLVM_ABI InvokeInst *cloneImpl() const;
3750
3751public:
3752 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3753 BasicBlock *IfException, ArrayRef<Value *> Args,
3754 const Twine &NameStr,
3755 InsertPosition InsertBefore = nullptr) {
3756 IntrusiveOperandsAllocMarker AllocMarker{
3757 ComputeNumOperands(unsigned(Args.size()))};
3758 return new (AllocMarker) InvokeInst(Ty, Func, IfNormal, IfException, Args,
3759 {}, AllocMarker, NameStr, InsertBefore);
3760 }
3761
3762 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3763 BasicBlock *IfException, ArrayRef<Value *> Args,
3764 ArrayRef<OperandBundleDef> Bundles = {},
3765 const Twine &NameStr = "",
3766 InsertPosition InsertBefore = nullptr) {
3767 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3768 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)),
3769 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3770
3771 return new (AllocMarker)
3772 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, AllocMarker,
3773 NameStr, InsertBefore);
3774 }
3775
3777 BasicBlock *IfException, ArrayRef<Value *> Args,
3778 const Twine &NameStr,
3779 InsertPosition InsertBefore = nullptr) {
3780 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3781 IfException, Args, {}, NameStr, InsertBefore);
3782 }
3783
3785 BasicBlock *IfException, ArrayRef<Value *> Args,
3786 ArrayRef<OperandBundleDef> Bundles = {},
3787 const Twine &NameStr = "",
3788 InsertPosition InsertBefore = nullptr) {
3789 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3790 IfException, Args, Bundles, NameStr, InsertBefore);
3791 }
3792
3793 /// Create a clone of \p II with a different set of operand bundles and
3794 /// insert it before \p InsertBefore.
3795 ///
3796 /// The returned invoke instruction is identical to \p II in every way except
3797 /// that the operand bundles for the new instruction are set to the operand
3798 /// bundles in \p Bundles.
3799 LLVM_ABI static InvokeInst *Create(InvokeInst *II,
3800 ArrayRef<OperandBundleDef> Bundles,
3801 InsertPosition InsertPt = nullptr);
3802
3803 // get*Dest - Return the destination basic blocks...
3805 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3806 }
3808 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3809 }
3811 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3812 }
3814 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3815 }
3816
3817 /// Get the landingpad instruction from the landing pad
3818 /// block (the unwind destination).
3819 LLVM_ABI LandingPadInst *getLandingPadInst() const;
3820
3821 BasicBlock *getSuccessor(unsigned i) const {
3822 assert(i < 2 && "Successor # out of range for invoke!");
3823 return i == 0 ? getNormalDest() : getUnwindDest();
3824 }
3825
3826 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3827 assert(i < 2 && "Successor # out of range for invoke!");
3828 if (i == 0)
3829 setNormalDest(NewSucc);
3830 else
3831 setUnwindDest(NewSucc);
3832 }
3833
3834 unsigned getNumSuccessors() const { return 2; }
3835
3836 /// Updates profile metadata by scaling it by \p S / \p T.
3837 LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T);
3838
3839 // Methods for support type inquiry through isa, cast, and dyn_cast:
3840 static bool classof(const Instruction *I) {
3841 return (I->getOpcode() == Instruction::Invoke);
3842 }
3843 static bool classof(const Value *V) {
3844 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3845 }
3846
3847private:
3848 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3849 // method so that subclasses cannot accidentally use it.
3850 template <typename Bitfield>
3851 void setSubclassData(typename Bitfield::Type Value) {
3852 Instruction::setSubclassData<Bitfield>(Value);
3853 }
3854};
3855
3856InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3857 BasicBlock *IfException, ArrayRef<Value *> Args,
3858 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3859 const Twine &NameStr, InsertPosition InsertBefore)
3860 : CallBase(Ty->getReturnType(), Instruction::Invoke, AllocInfo,
3861 InsertBefore) {
3862 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3863}
3864
3865//===----------------------------------------------------------------------===//
3866// CallBrInst Class
3867//===----------------------------------------------------------------------===//
3868
3869/// CallBr instruction, tracking function calls that may not return control but
3870/// instead transfer it to a third location. The SubclassData field is used to
3871/// hold the calling convention of the call.
3872///
3873class CallBrInst : public CallBase {
3874
3875 unsigned NumIndirectDests;
3876
3878
3879 /// Construct a CallBrInst given a range of arguments.
3880 ///
3881 /// Construct a CallBrInst from a range of arguments
3882 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3883 ArrayRef<BasicBlock *> IndirectDests,
3885 AllocInfo AllocInfo, const Twine &NameStr,
3886 InsertPosition InsertBefore);
3887
3888 LLVM_ABI void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3889 ArrayRef<BasicBlock *> IndirectDests,
3891 const Twine &NameStr);
3892
3893 /// Compute the number of operands to allocate.
3894 static unsigned ComputeNumOperands(int NumArgs, int NumIndirectDests,
3895 int NumBundleInputs = 0) {
3896 // We need one operand for the called function, plus our extra operands and
3897 // the input operand counts provided.
3898 return unsigned(2 + NumIndirectDests + NumArgs + NumBundleInputs);
3899 }
3900
3901protected:
3902 // Note: Instruction needs to be a friend here to call cloneImpl.
3903 friend class Instruction;
3904
3905 LLVM_ABI CallBrInst *cloneImpl() const;
3906
3907public:
3909 BasicBlock *DefaultDest,
3910 ArrayRef<BasicBlock *> IndirectDests,
3911 ArrayRef<Value *> Args, const Twine &NameStr,
3912 InsertPosition InsertBefore = nullptr) {
3913 IntrusiveOperandsAllocMarker AllocMarker{
3914 ComputeNumOperands(Args.size(), IndirectDests.size())};
3915 return new (AllocMarker)
3916 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, {}, AllocMarker,
3917 NameStr, InsertBefore);
3918 }
3919
3920 static CallBrInst *
3921 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3922 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3923 ArrayRef<OperandBundleDef> Bundles = {}, const Twine &NameStr = "",
3924 InsertPosition InsertBefore = nullptr) {
3925 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3926 ComputeNumOperands(Args.size(), IndirectDests.size(),
3927 CountBundleInputs(Bundles)),
3928 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3929
3930 return new (AllocMarker)
3931 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3932 AllocMarker, NameStr, InsertBefore);
3933 }
3934
3935 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3936 ArrayRef<BasicBlock *> IndirectDests,
3937 ArrayRef<Value *> Args, const Twine &NameStr,
3938 InsertPosition InsertBefore = nullptr) {
3939 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3940 IndirectDests, Args, NameStr, InsertBefore);
3941 }
3942
3943 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3944 ArrayRef<BasicBlock *> IndirectDests,
3945 ArrayRef<Value *> Args,
3946 ArrayRef<OperandBundleDef> Bundles = {},
3947 const Twine &NameStr = "",
3948 InsertPosition InsertBefore = nullptr) {
3949 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3950 IndirectDests, Args, Bundles, NameStr, InsertBefore);
3951 }
3952
3953 /// Create a clone of \p CBI with a different set of operand bundles and
3954 /// insert it before \p InsertBefore.
3955 ///
3956 /// The returned callbr instruction is identical to \p CBI in every way
3957 /// except that the operand bundles for the new instruction are set to the
3958 /// operand bundles in \p Bundles.
3959 LLVM_ABI static CallBrInst *Create(CallBrInst *CBI,
3960 ArrayRef<OperandBundleDef> Bundles,
3961 InsertPosition InsertBefore = nullptr);
3962
3963 /// Return the number of callbr indirect dest labels.
3964 ///
3965 unsigned getNumIndirectDests() const { return NumIndirectDests; }
3966
3967 /// getIndirectDestLabel - Return the i-th indirect dest label.
3968 ///
3969 Value *getIndirectDestLabel(unsigned i) const {
3970 assert(i < getNumIndirectDests() && "Out of bounds!");
3971 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
3972 }
3973
3974 Value *getIndirectDestLabelUse(unsigned i) const {
3975 assert(i < getNumIndirectDests() && "Out of bounds!");
3976 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
3977 }
3978
3979 // Return the destination basic blocks...
3981 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
3982 }
3983 BasicBlock *getIndirectDest(unsigned i) const {
3984 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
3985 }
3987 SmallVector<BasicBlock *, 16> IndirectDests;
3988 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
3989 IndirectDests.push_back(getIndirectDest(i));
3990 return IndirectDests;
3991 }
3993 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
3994 }
3995 void setIndirectDest(unsigned i, BasicBlock *B) {
3996 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
3997 }
3998
3999 BasicBlock *getSuccessor(unsigned i) const {
4000 assert(i < getNumSuccessors() + 1 &&
4001 "Successor # out of range for callbr!");
4002 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4003 }
4004
4005 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4006 assert(i < getNumIndirectDests() + 1 &&
4007 "Successor # out of range for callbr!");
4008 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4009 }
4010
4011 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4012
4013 // Methods for support type inquiry through isa, cast, and dyn_cast:
4014 static bool classof(const Instruction *I) {
4015 return (I->getOpcode() == Instruction::CallBr);
4016 }
4017 static bool classof(const Value *V) {
4018 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4019 }
4020
4021private:
4022 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4023 // method so that subclasses cannot accidentally use it.
4024 template <typename Bitfield>
4025 void setSubclassData(typename Bitfield::Type Value) {
4026 Instruction::setSubclassData<Bitfield>(Value);
4027 }
4028};
4029
4030CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4031 ArrayRef<BasicBlock *> IndirectDests,
4032 ArrayRef<Value *> Args,
4033 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
4034 const Twine &NameStr, InsertPosition InsertBefore)
4035 : CallBase(Ty->getReturnType(), Instruction::CallBr, AllocInfo,
4036 InsertBefore) {
4037 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4038}
4039
4040//===----------------------------------------------------------------------===//
4041// ResumeInst Class
4042//===----------------------------------------------------------------------===//
4043
4044//===---------------------------------------------------------------------------
4045/// Resume the propagation of an exception.
4046///
4047class ResumeInst : public Instruction {
4048 constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
4049
4050 ResumeInst(const ResumeInst &RI);
4051
4052 LLVM_ABI explicit ResumeInst(Value *Exn,
4053 InsertPosition InsertBefore = nullptr);
4054
4055protected:
4056 // Note: Instruction needs to be a friend here to call cloneImpl.
4057 friend class Instruction;
4058
4059 LLVM_ABI ResumeInst *cloneImpl() const;
4060
4061public:
4062 static ResumeInst *Create(Value *Exn, InsertPosition InsertBefore = nullptr) {
4063 return new (AllocMarker) ResumeInst(Exn, InsertBefore);
4064 }
4065
4066 /// Provide fast operand accessors
4068
4069 /// Convenience accessor.
4070 Value *getValue() const { return Op<0>(); }
4071
4072 unsigned getNumSuccessors() const { return 0; }
4073
4074 // Methods for support type inquiry through isa, cast, and dyn_cast:
4075 static bool classof(const Instruction *I) {
4076 return I->getOpcode() == Instruction::Resume;
4077 }
4078 static bool classof(const Value *V) {
4079 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4080 }
4081
4082private:
4083 BasicBlock *getSuccessor(unsigned idx) const {
4084 llvm_unreachable("ResumeInst has no successors!");
4085 }
4086
4087 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4088 llvm_unreachable("ResumeInst has no successors!");
4089 }
4090};
4091
4092template <>
4094 public FixedNumOperandTraits<ResumeInst, 1> {
4095};
4096
4098
4099//===----------------------------------------------------------------------===//
4100// CatchSwitchInst Class
4101//===----------------------------------------------------------------------===//
4103 using UnwindDestField = BoolBitfieldElementT<0>;
4104
4105 constexpr static HungOffOperandsAllocMarker AllocMarker{};
4106
4107 /// The number of operands actually allocated. NumOperands is
4108 /// the number actually in use.
4109 unsigned ReservedSpace;
4110
4111 // Operand[0] = Outer scope
4112 // Operand[1] = Unwind block destination
4113 // Operand[n] = BasicBlock to go to on match
4114 CatchSwitchInst(const CatchSwitchInst &CSI);
4115
4116 /// Create a new switch instruction, specifying a
4117 /// default destination. The number of additional handlers can be specified
4118 /// here to make memory allocation more efficient.
4119 /// This constructor can also autoinsert before another instruction.
4120 LLVM_ABI CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4121 unsigned NumHandlers, const Twine &NameStr,
4122 InsertPosition InsertBefore);
4123
4124 // allocate space for exactly zero operands
4125 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4126
4127 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4128 void growOperands(unsigned Size);
4129
4130protected:
4131 // Note: Instruction needs to be a friend here to call cloneImpl.
4132 friend class Instruction;
4133
4134 LLVM_ABI CatchSwitchInst *cloneImpl() const;
4135
4136public:
4137 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4138
4139 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4140 unsigned NumHandlers,
4141 const Twine &NameStr = "",
4142 InsertPosition InsertBefore = nullptr) {
4143 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4144 InsertBefore);
4145 }
4146
4147 /// Provide fast operand accessors
4149
4150 // Accessor Methods for CatchSwitch stmt
4151 Value *getParentPad() const { return getOperand(0); }
4152 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4153
4154 // Accessor Methods for CatchSwitch stmt
4155 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4156 bool unwindsToCaller() const { return !hasUnwindDest(); }
4158 if (hasUnwindDest())
4159 return cast<BasicBlock>(getOperand(1));
4160 return nullptr;
4161 }
4162 void setUnwindDest(BasicBlock *UnwindDest) {
4163 assert(UnwindDest);
4164 assert(hasUnwindDest());
4165 setOperand(1, UnwindDest);
4166 }
4167
4168 /// return the number of 'handlers' in this catchswitch
4169 /// instruction, except the default handler
4170 unsigned getNumHandlers() const {
4171 if (hasUnwindDest())
4172 return getNumOperands() - 2;
4173 return getNumOperands() - 1;
4174 }
4175
4176private:
4177 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4178 static const BasicBlock *handler_helper(const Value *V) {
4179 return cast<BasicBlock>(V);
4180 }
4181
4182public:
4183 using DerefFnTy = BasicBlock *(*)(Value *);
4186 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4190
4191 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4193 op_iterator It = op_begin() + 1;
4194 if (hasUnwindDest())
4195 ++It;
4196 return handler_iterator(It, DerefFnTy(handler_helper));
4197 }
4198
4199 /// Returns an iterator that points to the first handler in the
4200 /// CatchSwitchInst.
4202 const_op_iterator It = op_begin() + 1;
4203 if (hasUnwindDest())
4204 ++It;
4205 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4206 }
4207
4208 /// Returns a read-only iterator that points one past the last
4209 /// handler in the CatchSwitchInst.
4211 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4212 }
4213
4214 /// Returns an iterator that points one past the last handler in the
4215 /// CatchSwitchInst.
4217 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4218 }
4219
4220 /// iteration adapter for range-for loops.
4222 return make_range(handler_begin(), handler_end());
4223 }
4224
4225 /// iteration adapter for range-for loops.
4227 return make_range(handler_begin(), handler_end());
4228 }
4229
4230 /// Add an entry to the switch instruction...
4231 /// Note:
4232 /// This action invalidates handler_end(). Old handler_end() iterator will
4233 /// point to the added handler.
4234 LLVM_ABI void addHandler(BasicBlock *Dest);
4235
4236 LLVM_ABI void removeHandler(handler_iterator HI);
4237
4238 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4239 BasicBlock *getSuccessor(unsigned Idx) const {
4240 assert(Idx < getNumSuccessors() &&
4241 "Successor # out of range for catchswitch!");
4242 return cast<BasicBlock>(getOperand(Idx + 1));
4243 }
4244 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4245 assert(Idx < getNumSuccessors() &&
4246 "Successor # out of range for catchswitch!");
4247 setOperand(Idx + 1, NewSucc);
4248 }
4249
4250 // Methods for support type inquiry through isa, cast, and dyn_cast:
4251 static bool classof(const Instruction *I) {
4252 return I->getOpcode() == Instruction::CatchSwitch;
4253 }
4254 static bool classof(const Value *V) {
4255 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4256 }
4257};
4258
4259template <>
4261
4263
4264//===----------------------------------------------------------------------===//
4265// CleanupPadInst Class
4266//===----------------------------------------------------------------------===//
4268private:
4269 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4270 AllocInfo AllocInfo, const Twine &NameStr,
4271 InsertPosition InsertBefore)
4272 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, AllocInfo,
4273 NameStr, InsertBefore) {}
4274
4275public:
4276 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = {},
4277 const Twine &NameStr = "",
4278 InsertPosition InsertBefore = nullptr) {
4279 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4280 return new (AllocMarker)
4281 CleanupPadInst(ParentPad, Args, AllocMarker, NameStr, InsertBefore);
4282 }
4283
4284 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4285 static bool classof(const Instruction *I) {
4286 return I->getOpcode() == Instruction::CleanupPad;
4287 }
4288 static bool classof(const Value *V) {
4289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4290 }
4291};
4292
4293//===----------------------------------------------------------------------===//
4294// CatchPadInst Class
4295//===----------------------------------------------------------------------===//
4297private:
4298 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4299 AllocInfo AllocInfo, const Twine &NameStr,
4300 InsertPosition InsertBefore)
4301 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, AllocInfo,
4302 NameStr, InsertBefore) {}
4303
4304public:
4305 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4306 const Twine &NameStr = "",
4307 InsertPosition InsertBefore = nullptr) {
4308 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4309 return new (AllocMarker)
4310 CatchPadInst(CatchSwitch, Args, AllocMarker, NameStr, InsertBefore);
4311 }
4312
4313 /// Convenience accessors
4315 return cast<CatchSwitchInst>(Op<-1>());
4316 }
4317 void setCatchSwitch(Value *CatchSwitch) {
4318 assert(CatchSwitch);
4319 Op<-1>() = CatchSwitch;
4320 }
4321
4322 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4323 static bool classof(const Instruction *I) {
4324 return I->getOpcode() == Instruction::CatchPad;
4325 }
4326 static bool classof(const Value *V) {
4327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4328 }
4329};
4330
4331//===----------------------------------------------------------------------===//
4332// CatchReturnInst Class
4333//===----------------------------------------------------------------------===//
4334
4336 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
4337
4340 InsertPosition InsertBefore);
4341
4342 void init(Value *CatchPad, BasicBlock *BB);
4343
4344protected:
4345 // Note: Instruction needs to be a friend here to call cloneImpl.
4346 friend class Instruction;
4347
4349
4350public:
4351 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4352 InsertPosition InsertBefore = nullptr) {
4353 assert(CatchPad);
4354 assert(BB);
4355 return new (AllocMarker) CatchReturnInst(CatchPad, BB, InsertBefore);
4356 }
4357
4358 /// Provide fast operand accessors
4360
4361 /// Convenience accessors.
4362 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4363 void setCatchPad(CatchPadInst *CatchPad) {
4364 assert(CatchPad);
4365 Op<0>() = CatchPad;
4366 }
4367
4368 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4369 void setSuccessor(BasicBlock *NewSucc) {
4370 assert(NewSucc);
4371 Op<1>() = NewSucc;
4372 }
4373 unsigned getNumSuccessors() const { return 1; }
4374
4375 /// Get the parentPad of this catchret's catchpad's catchswitch.
4376 /// The successor block is implicitly a member of this funclet.
4379 }
4380
4381 // Methods for support type inquiry through isa, cast, and dyn_cast:
4382 static bool classof(const Instruction *I) {
4383 return (I->getOpcode() == Instruction::CatchRet);
4384 }
4385 static bool classof(const Value *V) {
4386 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4387 }
4388
4389private:
4390 BasicBlock *getSuccessor(unsigned Idx) const {
4391 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4392 return getSuccessor();
4393 }
4394
4395 void setSuccessor(unsigned Idx, BasicBlock *B) {
4396 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4397 setSuccessor(B);
4398 }
4399};
4400
4401template <>
4403 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4404
4406
4407//===----------------------------------------------------------------------===//
4408// CleanupReturnInst Class
4409//===----------------------------------------------------------------------===//
4410
4412 using UnwindDestField = BoolBitfieldElementT<0>;
4413
4414private:
4416 LLVM_ABI CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
4418 InsertPosition InsertBefore = nullptr);
4419
4420 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4421
4422protected:
4423 // Note: Instruction needs to be a friend here to call cloneImpl.
4424 friend class Instruction;
4425
4426 LLVM_ABI CleanupReturnInst *cloneImpl() const;
4427
4428public:
4429 static CleanupReturnInst *Create(Value *CleanupPad,
4430 BasicBlock *UnwindBB = nullptr,
4431 InsertPosition InsertBefore = nullptr) {
4432 assert(CleanupPad);
4433 unsigned Values = 1;
4434 if (UnwindBB)
4435 ++Values;
4436 IntrusiveOperandsAllocMarker AllocMarker{Values};
4437 return new (AllocMarker)
4438 CleanupReturnInst(CleanupPad, UnwindBB, AllocMarker, InsertBefore);
4439 }
4440
4441 /// Provide fast operand accessors
4443
4444 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4445 bool unwindsToCaller() const { return !hasUnwindDest(); }
4446
4447 /// Convenience accessor.
4449 return cast<CleanupPadInst>(Op<0>());
4450 }
4451 void setCleanupPad(CleanupPadInst *CleanupPad) {
4452 assert(CleanupPad);
4453 Op<0>() = CleanupPad;
4454 }
4455
4456 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4457
4459 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4460 }
4461 void setUnwindDest(BasicBlock *NewDest) {
4462 assert(NewDest);
4463 assert(hasUnwindDest());
4464 Op<1>() = NewDest;
4465 }
4466
4467 // Methods for support type inquiry through isa, cast, and dyn_cast:
4468 static bool classof(const Instruction *I) {
4469 return (I->getOpcode() == Instruction::CleanupRet);
4470 }
4471 static bool classof(const Value *V) {
4472 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4473 }
4474
4475private:
4476 BasicBlock *getSuccessor(unsigned Idx) const {
4477 assert(Idx == 0);
4478 return getUnwindDest();
4479 }
4480
4481 void setSuccessor(unsigned Idx, BasicBlock *B) {
4482 assert(Idx == 0);
4483 setUnwindDest(B);
4484 }
4485
4486 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4487 // method so that subclasses cannot accidentally use it.
4488 template <typename Bitfield>
4489 void setSubclassData(typename Bitfield::Type Value) {
4490 Instruction::setSubclassData<Bitfield>(Value);
4491 }
4492};
4493
4494template <>
4496 : public VariadicOperandTraits<CleanupReturnInst> {};
4497
4499
4500//===----------------------------------------------------------------------===//
4501// UnreachableInst Class
4502//===----------------------------------------------------------------------===//
4503
4504//===---------------------------------------------------------------------------
4505/// This function has undefined behavior. In particular, the
4506/// presence of this instruction indicates some higher level knowledge that the
4507/// end of the block cannot be reached.
4508///
4510 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
4511
4512protected:
4513 // Note: Instruction needs to be a friend here to call cloneImpl.
4514 friend class Instruction;
4515
4516 LLVM_ABI UnreachableInst *cloneImpl() const;
4517
4518public:
4520 InsertPosition InsertBefore = nullptr);
4521
4522 // allocate space for exactly zero operands
4523 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4524 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4525
4526 unsigned getNumSuccessors() const { return 0; }
4527
4528 // Methods for support type inquiry through isa, cast, and dyn_cast:
4529 static bool classof(const Instruction *I) {
4530 return I->getOpcode() == Instruction::Unreachable;
4531 }
4532 static bool classof(const Value *V) {
4533 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4534 }
4535
4536 // Whether to do target lowering in SelectionDAG.
4537 LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable,
4538 bool NoTrapAfterNoreturn) const;
4539
4540private:
4541 BasicBlock *getSuccessor(unsigned idx) const {
4542 llvm_unreachable("UnreachableInst has no successors!");
4543 }
4544
4545 void setSuccessor(unsigned idx, BasicBlock *B) {
4546 llvm_unreachable("UnreachableInst has no successors!");
4547 }
4548};
4549
4550//===----------------------------------------------------------------------===//
4551// TruncInst Class
4552//===----------------------------------------------------------------------===//
4553
4554/// This class represents a truncation of integer types.
4555class TruncInst : public CastInst {
4556protected:
4557 // Note: Instruction needs to be a friend here to call cloneImpl.
4558 friend class Instruction;
4559
4560 /// Clone an identical TruncInst
4561 LLVM_ABI TruncInst *cloneImpl() const;
4562
4563public:
4564 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
4565
4566 /// Constructor with insert-before-instruction semantics
4567 LLVM_ABI
4568 TruncInst(Value *S, ///< The value to be truncated
4569 Type *Ty, ///< The (smaller) type to truncate to
4570 const Twine &NameStr = "", ///< A name for the new instruction
4571 InsertPosition InsertBefore =
4572 nullptr ///< Where to insert the new instruction
4573 );
4574
4575 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4576 static bool classof(const Instruction *I) {
4577 return I->getOpcode() == Trunc;
4578 }
4579 static bool classof(const Value *V) {
4580 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4581 }
4582
4585 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
4586 }
4589 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
4590 }
4591
4592 /// Test whether this operation is known to never
4593 /// undergo unsigned overflow, aka the nuw property.
4594 bool hasNoUnsignedWrap() const {
4596 }
4597
4598 /// Test whether this operation is known to never
4599 /// undergo signed overflow, aka the nsw property.
4600 bool hasNoSignedWrap() const {
4601 return (SubclassOptionalData & NoSignedWrap) != 0;
4602 }
4603
4604 /// Returns the no-wrap kind of the operation.
4605 unsigned getNoWrapKind() const {
4606 unsigned NoWrapKind = 0;
4607 if (hasNoUnsignedWrap())
4608 NoWrapKind |= NoUnsignedWrap;
4609
4610 if (hasNoSignedWrap())
4611 NoWrapKind |= NoSignedWrap;
4612
4613 return NoWrapKind;
4614 }
4615};
4616
4617//===----------------------------------------------------------------------===//
4618// ZExtInst Class
4619//===----------------------------------------------------------------------===//
4620
4621/// This class represents zero extension of integer types.
4622class ZExtInst : public CastInst {
4623protected:
4624 // Note: Instruction needs to be a friend here to call cloneImpl.
4625 friend class Instruction;
4626
4627 /// Clone an identical ZExtInst
4628 LLVM_ABI ZExtInst *cloneImpl() const;
4629
4630public:
4631 /// Constructor with insert-before-instruction semantics
4632 LLVM_ABI
4633 ZExtInst(Value *S, ///< The value to be zero extended
4634 Type *Ty, ///< The type to zero extend to
4635 const Twine &NameStr = "", ///< A name for the new instruction
4636 InsertPosition InsertBefore =
4637 nullptr ///< Where to insert the new instruction
4638 );
4639
4640 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4641 static bool classof(const Instruction *I) {
4642 return I->getOpcode() == ZExt;
4643 }
4644 static bool classof(const Value *V) {
4645 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4646 }
4647};
4648
4649//===----------------------------------------------------------------------===//
4650// SExtInst Class
4651//===----------------------------------------------------------------------===//
4652
4653/// This class represents a sign extension of integer types.
4654class SExtInst : public CastInst {
4655protected:
4656 // Note: Instruction needs to be a friend here to call cloneImpl.
4657 friend class Instruction;
4658
4659 /// Clone an identical SExtInst
4660 LLVM_ABI SExtInst *cloneImpl() const;
4661
4662public:
4663 /// Constructor with insert-before-instruction semantics
4664 LLVM_ABI
4665 SExtInst(Value *S, ///< The value to be sign extended
4666 Type *Ty, ///< The type to sign extend to
4667 const Twine &NameStr = "", ///< A name for the new instruction
4668 InsertPosition InsertBefore =
4669 nullptr ///< Where to insert the new instruction
4670 );
4671
4672 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4673 static bool classof(const Instruction *I) {
4674 return I->getOpcode() == SExt;
4675 }
4676 static bool classof(const Value *V) {
4677 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4678 }
4679};
4680
4681//===----------------------------------------------------------------------===//
4682// FPTruncInst Class
4683//===----------------------------------------------------------------------===//
4684
4685/// This class represents a truncation of floating point types.
4686class FPTruncInst : public CastInst {
4687protected:
4688 // Note: Instruction needs to be a friend here to call cloneImpl.
4689 friend class Instruction;
4690
4691 /// Clone an identical FPTruncInst
4693
4694public: /// Constructor with insert-before-instruction semantics
4695 LLVM_ABI
4696 FPTruncInst(Value *S, ///< The value to be truncated
4697 Type *Ty, ///< The type to truncate to
4698 const Twine &NameStr = "", ///< A name for the new instruction
4699 InsertPosition InsertBefore =
4700 nullptr ///< Where to insert the new instruction
4701 );
4702
4703 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4704 static bool classof(const Instruction *I) {
4705 return I->getOpcode() == FPTrunc;
4706 }
4707 static bool classof(const Value *V) {
4708 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4709 }
4710};
4711
4712//===----------------------------------------------------------------------===//
4713// FPExtInst Class
4714//===----------------------------------------------------------------------===//
4715
4716/// This class represents an extension of floating point types.
4717class FPExtInst : public CastInst {
4718protected:
4719 // Note: Instruction needs to be a friend here to call cloneImpl.
4720 friend class Instruction;
4721
4722 /// Clone an identical FPExtInst
4723 LLVM_ABI FPExtInst *cloneImpl() const;
4724
4725public:
4726 /// Constructor with insert-before-instruction semantics
4727 LLVM_ABI
4728 FPExtInst(Value *S, ///< The value to be extended
4729 Type *Ty, ///< The type to extend to
4730 const Twine &NameStr = "", ///< A name for the new instruction
4731 InsertPosition InsertBefore =
4732 nullptr ///< Where to insert the new instruction
4733 );
4734
4735 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4736 static bool classof(const Instruction *I) {
4737 return I->getOpcode() == FPExt;
4738 }
4739 static bool classof(const Value *V) {
4740 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4741 }
4742};
4743
4744//===----------------------------------------------------------------------===//
4745// UIToFPInst Class
4746//===----------------------------------------------------------------------===//
4747
4748/// This class represents a cast unsigned integer to floating point.
4749class UIToFPInst : public CastInst {
4750protected:
4751 // Note: Instruction needs to be a friend here to call cloneImpl.
4752 friend class Instruction;
4753
4754 /// Clone an identical UIToFPInst
4755 LLVM_ABI UIToFPInst *cloneImpl() const;
4756
4757public:
4758 /// Constructor with insert-before-instruction semantics
4759 LLVM_ABI
4760 UIToFPInst(Value *S, ///< The value to be converted
4761 Type *Ty, ///< The type to convert to
4762 const Twine &NameStr = "", ///< A name for the new instruction
4763 InsertPosition InsertBefore =
4764 nullptr ///< Where to insert the new instruction
4765 );
4766
4767 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4768 static bool classof(const Instruction *I) {
4769 return I->getOpcode() == UIToFP;
4770 }
4771 static bool classof(const Value *V) {
4772 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4773 }
4774};
4775
4776//===----------------------------------------------------------------------===//
4777// SIToFPInst Class
4778//===----------------------------------------------------------------------===//
4779
4780/// This class represents a cast from signed integer to floating point.
4781class SIToFPInst : public CastInst {
4782protected:
4783 // Note: Instruction needs to be a friend here to call cloneImpl.
4784 friend class Instruction;
4785
4786 /// Clone an identical SIToFPInst
4787 LLVM_ABI SIToFPInst *cloneImpl() const;
4788
4789public:
4790 /// Constructor with insert-before-instruction semantics
4791 LLVM_ABI
4792 SIToFPInst(Value *S, ///< The value to be converted
4793 Type *Ty, ///< The type to convert to
4794 const Twine &NameStr = "", ///< A name for the new instruction
4795 InsertPosition InsertBefore =
4796 nullptr ///< Where to insert the new instruction
4797 );
4798
4799 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4800 static bool classof(const Instruction *I) {
4801 return I->getOpcode() == SIToFP;
4802 }
4803 static bool classof(const Value *V) {
4804 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4805 }
4806};
4807
4808//===----------------------------------------------------------------------===//
4809// FPToUIInst Class
4810//===----------------------------------------------------------------------===//
4811
4812/// This class represents a cast from floating point to unsigned integer
4813class FPToUIInst : public CastInst {
4814protected:
4815 // Note: Instruction needs to be a friend here to call cloneImpl.
4816 friend class Instruction;
4817
4818 /// Clone an identical FPToUIInst
4819 LLVM_ABI FPToUIInst *cloneImpl() const;
4820
4821public:
4822 /// Constructor with insert-before-instruction semantics
4823 LLVM_ABI
4824 FPToUIInst(Value *S, ///< The value to be converted
4825 Type *Ty, ///< The type to convert to
4826 const Twine &NameStr = "", ///< A name for the new instruction
4827 InsertPosition InsertBefore =
4828 nullptr ///< Where to insert the new instruction
4829 );
4830
4831 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4832 static bool classof(const Instruction *I) {
4833 return I->getOpcode() == FPToUI;
4834 }
4835 static bool classof(const Value *V) {
4836 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4837 }
4838};
4839
4840//===----------------------------------------------------------------------===//
4841// FPToSIInst Class
4842//===----------------------------------------------------------------------===//
4843
4844/// This class represents a cast from floating point to signed integer.
4845class FPToSIInst : public CastInst {
4846protected:
4847 // Note: Instruction needs to be a friend here to call cloneImpl.
4848 friend class Instruction;
4849
4850 /// Clone an identical FPToSIInst
4851 LLVM_ABI FPToSIInst *cloneImpl() const;
4852
4853public:
4854 /// Constructor with insert-before-instruction semantics
4855 LLVM_ABI
4856 FPToSIInst(Value *S, ///< The value to be converted
4857 Type *Ty, ///< The type to convert to
4858 const Twine &NameStr = "", ///< A name for the new instruction
4859 InsertPosition InsertBefore =
4860 nullptr ///< Where to insert the new instruction
4861 );
4862
4863 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4864 static bool classof(const Instruction *I) {
4865 return I->getOpcode() == FPToSI;
4866 }
4867 static bool classof(const Value *V) {
4868 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4869 }
4870};
4871
4872//===----------------------------------------------------------------------===//
4873// IntToPtrInst Class
4874//===----------------------------------------------------------------------===//
4875
4876/// This class represents a cast from an integer to a pointer.
4877class IntToPtrInst : public CastInst {
4878public:
4879 // Note: Instruction needs to be a friend here to call cloneImpl.
4880 friend class Instruction;
4881
4882 /// Constructor with insert-before-instruction semantics
4883 LLVM_ABI
4884 IntToPtrInst(Value *S, ///< The value to be converted
4885 Type *Ty, ///< The type to convert to
4886 const Twine &NameStr = "", ///< A name for the new instruction
4887 InsertPosition InsertBefore =
4888 nullptr ///< Where to insert the new instruction
4889 );
4890
4891 /// Clone an identical IntToPtrInst.
4893
4894 /// Returns the address space of this instruction's pointer type.
4895 unsigned getAddressSpace() const {
4896 return getType()->getPointerAddressSpace();
4897 }
4898
4899 // Methods for support type inquiry through isa, cast, and dyn_cast:
4900 static bool classof(const Instruction *I) {
4901 return I->getOpcode() == IntToPtr;
4902 }
4903 static bool classof(const Value *V) {
4904 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4905 }
4906};
4907
4908//===----------------------------------------------------------------------===//
4909// PtrToIntInst Class
4910//===----------------------------------------------------------------------===//
4911
4912/// This class represents a cast from a pointer to an integer.
4913class PtrToIntInst : public CastInst {
4914protected:
4915 // Note: Instruction needs to be a friend here to call cloneImpl.
4916 friend class Instruction;
4917
4918 /// Clone an identical PtrToIntInst.
4920
4921public:
4922 /// Constructor with insert-before-instruction semantics
4923 LLVM_ABI
4924 PtrToIntInst(Value *S, ///< The value to be converted
4925 Type *Ty, ///< The type to convert to
4926 const Twine &NameStr = "", ///< A name for the new instruction
4927 InsertPosition InsertBefore =
4928 nullptr ///< Where to insert the new instruction
4929 );
4930
4931 /// Gets the pointer operand.
4933 /// Gets the pointer operand.
4934 const Value *getPointerOperand() const { return getOperand(0); }
4935 /// Gets the operand index of the pointer operand.
4936 static unsigned getPointerOperandIndex() { return 0U; }
4937
4938 /// Returns the address space of the pointer operand.
4939 unsigned getPointerAddressSpace() const {
4941 }
4942
4943 // Methods for support type inquiry through isa, cast, and dyn_cast:
4944 static bool classof(const Instruction *I) {
4945 return I->getOpcode() == PtrToInt;
4946 }
4947 static bool classof(const Value *V) {
4948 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4949 }
4950};
4951
4952/// This class represents a cast from a pointer to an address (non-capturing
4953/// ptrtoint).
4954class PtrToAddrInst : public CastInst {
4955protected:
4956 // Note: Instruction needs to be a friend here to call cloneImpl.
4957 friend class Instruction;
4958
4959 /// Clone an identical PtrToAddrInst.
4960 PtrToAddrInst *cloneImpl() const;
4961
4962public:
4963 /// Constructor with insert-before-instruction semantics
4964 PtrToAddrInst(Value *S, ///< The value to be converted
4965 Type *Ty, ///< The type to convert to
4966 const Twine &NameStr = "", ///< A name for the new instruction
4967 InsertPosition InsertBefore =
4968 nullptr ///< Where to insert the new instruction
4969 );
4970
4971 /// Gets the pointer operand.
4973 /// Gets the pointer operand.
4974 const Value *getPointerOperand() const { return getOperand(0); }
4975 /// Gets the operand index of the pointer operand.
4976 static unsigned getPointerOperandIndex() { return 0U; }
4977
4978 /// Returns the address space of the pointer operand.
4979 unsigned getPointerAddressSpace() const {
4981 }
4982
4983 // Methods for support type inquiry through isa, cast, and dyn_cast:
4984 static bool classof(const Instruction *I) {
4985 return I->getOpcode() == PtrToAddr;
4986 }
4987 static bool classof(const Value *V) {
4988 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4989 }
4990};
4991
4992//===----------------------------------------------------------------------===//
4993// BitCastInst Class
4994//===----------------------------------------------------------------------===//
4995
4996/// This class represents a no-op cast from one type to another.
4997class BitCastInst : public CastInst {
4998protected:
4999 // Note: Instruction needs to be a friend here to call cloneImpl.
5000 friend class Instruction;
5001
5002 /// Clone an identical BitCastInst.
5004
5005public:
5006 /// Constructor with insert-before-instruction semantics
5007 LLVM_ABI
5008 BitCastInst(Value *S, ///< The value to be casted
5009 Type *Ty, ///< The type to casted to
5010 const Twine &NameStr = "", ///< A name for the new instruction
5011 InsertPosition InsertBefore =
5012 nullptr ///< Where to insert the new instruction
5013 );
5014
5015 // Methods for support type inquiry through isa, cast, and dyn_cast:
5016 static bool classof(const Instruction *I) {
5017 return I->getOpcode() == BitCast;
5018 }
5019 static bool classof(const Value *V) {
5020 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5021 }
5022};
5023
5024//===----------------------------------------------------------------------===//
5025// AddrSpaceCastInst Class
5026//===----------------------------------------------------------------------===//
5027
5028/// This class represents a conversion between pointers from one address space
5029/// to another.
5031protected:
5032 // Note: Instruction needs to be a friend here to call cloneImpl.
5033 friend class Instruction;
5034
5035 /// Clone an identical AddrSpaceCastInst.
5037
5038public:
5039 /// Constructor with insert-before-instruction semantics
5041 Value *S, ///< The value to be casted
5042 Type *Ty, ///< The type to casted to
5043 const Twine &NameStr = "", ///< A name for the new instruction
5044 InsertPosition InsertBefore =
5045 nullptr ///< Where to insert the new instruction
5046 );
5047
5048 // Methods for support type inquiry through isa, cast, and dyn_cast:
5049 static bool classof(const Instruction *I) {
5050 return I->getOpcode() == AddrSpaceCast;
5051 }
5052 static bool classof(const Value *V) {
5053 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5054 }
5055
5056 /// Gets the pointer operand.
5058 return getOperand(0);
5059 }
5060
5061 /// Gets the pointer operand.
5062 const Value *getPointerOperand() const {
5063 return getOperand(0);
5064 }
5065
5066 /// Gets the operand index of the pointer operand.
5067 static unsigned getPointerOperandIndex() {
5068 return 0U;
5069 }
5070
5071 /// Returns the address space of the pointer operand.
5072 unsigned getSrcAddressSpace() const {
5074 }
5075
5076 /// Returns the address space of the result.
5077 unsigned getDestAddressSpace() const {
5078 return getType()->getPointerAddressSpace();
5079 }
5080};
5081
5082//===----------------------------------------------------------------------===//
5083// Helper functions
5084//===----------------------------------------------------------------------===//
5085
5086/// A helper function that returns the pointer operand of a load or store
5087/// instruction. Returns nullptr if not load or store.
5088inline const Value *getLoadStorePointerOperand(const Value *V) {
5089 if (auto *Load = dyn_cast<LoadInst>(V))
5090 return Load->getPointerOperand();
5091 if (auto *Store = dyn_cast<StoreInst>(V))
5092 return Store->getPointerOperand();
5093 return nullptr;
5094}
5096 return const_cast<Value *>(
5097 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5098}
5099
5100/// A helper function that returns the pointer operand of a load, store
5101/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5102inline const Value *getPointerOperand(const Value *V) {
5103 if (auto *Ptr = getLoadStorePointerOperand(V))
5104 return Ptr;
5105 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5106 return Gep->getPointerOperand();
5107 return nullptr;
5108}
5110 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5111}
5112
5113/// A helper function that returns the alignment of load or store instruction.
5115 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5116 "Expected Load or Store instruction");
5117 if (auto *LI = dyn_cast<LoadInst>(I))
5118 return LI->getAlign();
5119 return cast<StoreInst>(I)->getAlign();
5120}
5121
5122/// A helper function that set the alignment of load or store instruction.
5123inline void setLoadStoreAlignment(Value *I, Align NewAlign) {
5124 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5125 "Expected Load or Store instruction");
5126 if (auto *LI = dyn_cast<LoadInst>(I))
5127 LI->setAlignment(NewAlign);
5128 else
5129 cast<StoreInst>(I)->setAlignment(NewAlign);
5130}
5131
5132/// A helper function that returns the address space of the pointer operand of
5133/// load or store instruction.
5134inline unsigned getLoadStoreAddressSpace(const Value *I) {
5135 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5136 "Expected Load or Store instruction");
5137 if (auto *LI = dyn_cast<LoadInst>(I))
5138 return LI->getPointerAddressSpace();
5139 return cast<StoreInst>(I)->getPointerAddressSpace();
5140}
5141
5142/// A helper function that returns the type of a load or store instruction.
5143inline Type *getLoadStoreType(const Value *I) {
5144 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5145 "Expected Load or Store instruction");
5146 if (auto *LI = dyn_cast<LoadInst>(I))
5147 return LI->getType();
5148 return cast<StoreInst>(I)->getValueOperand()->getType();
5149}
5150
5151/// A helper function that returns an atomic operation's sync scope; returns
5152/// std::nullopt if it is not an atomic operation.
5153inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5154 if (!I->isAtomic())
5155 return std::nullopt;
5156 if (auto *AI = dyn_cast<LoadInst>(I))
5157 return AI->getSyncScopeID();
5158 if (auto *AI = dyn_cast<StoreInst>(I))
5159 return AI->getSyncScopeID();
5160 if (auto *AI = dyn_cast<FenceInst>(I))
5161 return AI->getSyncScopeID();
5162 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5163 return AI->getSyncScopeID();
5164 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5165 return AI->getSyncScopeID();
5166 llvm_unreachable("unhandled atomic operation");
5167}
5168
5169/// A helper function that sets an atomic operation's sync scope.
5171 assert(I->isAtomic());
5172 if (auto *AI = dyn_cast<LoadInst>(I))
5173 AI->setSyncScopeID(SSID);
5174 else if (auto *AI = dyn_cast<StoreInst>(I))
5175 AI->setSyncScopeID(SSID);
5176 else if (auto *AI = dyn_cast<FenceInst>(I))
5177 AI->setSyncScopeID(SSID);
5178 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5179 AI->setSyncScopeID(SSID);
5180 else if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5181 AI->setSyncScopeID(SSID);
5182 else
5183 llvm_unreachable("unhandled atomic operation");
5184}
5185
5186//===----------------------------------------------------------------------===//
5187// FreezeInst Class
5188//===----------------------------------------------------------------------===//
5189
5190/// This class represents a freeze function that returns random concrete
5191/// value if an operand is either a poison value or an undef value
5193protected:
5194 // Note: Instruction needs to be a friend here to call cloneImpl.
5195 friend class Instruction;
5196
5197 /// Clone an identical FreezeInst
5198 LLVM_ABI FreezeInst *cloneImpl() const;
5199
5200public:
5201 LLVM_ABI explicit FreezeInst(Value *S, const Twine &NameStr = "",
5202 InsertPosition InsertBefore = nullptr);
5203
5204 // Methods for support type inquiry through isa, cast, and dyn_cast:
5205 static inline bool classof(const Instruction *I) {
5206 return I->getOpcode() == Freeze;
5207 }
5208 static inline bool classof(const Value *V) {
5209 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5210 }
5211};
5212
5213} // end namespace llvm
5214
5215#endif // LLVM_IR_INSTRUCTIONS_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
static bool isReverseMask(ArrayRef< int > M, EVT VT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Atomic ordering constants.
static const Function * getParent(const Value *V)
This file implements methods to test, set and extract typed bits from packed unsigned integers.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
RelocType Type
Definition: COFFYAML.cpp:410
#define LLVM_ABI
Definition: Compiler.h:213
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Align
std::string Name
uint32_t Index
uint64_t Size
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
#define P(N)
PowerPC Reduce CR logical Operation
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents a conversion between pointers from one address space to another.
const Value * getPointerOperand() const
Gets the pointer operand.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
Value * getPointerOperand()
Gets the pointer operand.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
unsigned getSrcAddressSpace() const
Returns the address space of the pointer operand.
unsigned getDestAddressSpace() const
Returns the address space of the result.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
static bool classof(const Value *V)
Definition: Instructions.h:161
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:153
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:155
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
void setAllocatedType(Type *Ty)
for use only in special circumstances that need to generically transform a whole instruction (eg: IR ...
Definition: Instructions.h:124
static bool classof(const Instruction *I)
Definition: Instructions.h:158
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
void setUsedWithInAlloca(bool V)
Specify whether this alloca is used to represent the arguments to a call.
Definition: Instructions.h:148
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:143
Value * getArraySize()
Definition: Instructions.h:98
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:132
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:534
const Value * getCompareOperand() const
Definition: Instructions.h:639
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:630
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
Definition: Instructions.h:612
void setWeak(bool IsWeak)
Definition: Instructions.h:569
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:560
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:645
BoolBitfieldElementT< VolatileField::NextBit > WeakField
Definition: Instructions.h:535
AtomicOrderingBitfieldElementT< SuccessOrderingField::NextBit > FailureOrderingField
Definition: Instructions.h:539
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:604
static bool isValidFailureOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:579
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:592
AlignmentBitfieldElementT< FailureOrderingField::NextBit > AlignmentField
Definition: Instructions.h:541
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Definition: Instructions.h:657
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:549
const Value * getPointerOperand() const
Definition: Instructions.h:635
static bool classof(const Value *V)
Definition: Instructions.h:676
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:567
void setAlignment(Align Align)
Definition: Instructions.h:553
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
Definition: Instructions.h:564
static bool isValidSuccessOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:574
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
AtomicOrderingBitfieldElementT< WeakField::NextBit > SuccessOrderingField
Definition: Instructions.h:537
static unsigned getPointerOperandIndex()
Definition: Instructions.h:636
const Value * getNewValOperand() const
Definition: Instructions.h:642
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
static bool classof(const Instruction *I)
Definition: Instructions.h:673
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:843
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:823
static unsigned getPointerOperandIndex()
Definition: Instructions.h:888
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:853
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:857
BinOpBitfieldElement< AtomicOrderingField::NextBit > OperationField
Definition: Instructions.h:813
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:721
@ Add
*p = old + v
Definition: Instructions.h:725
@ FAdd
*p = old + v
Definition: Instructions.h:746
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:777
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
Definition: Instructions.h:765
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:739
@ Or
*p = old | v
Definition: Instructions.h:733
@ Sub
*p = old - v
Definition: Instructions.h:727
@ And
*p = old & v
Definition: Instructions.h:729
@ Xor
*p = old ^ v
Definition: Instructions.h:735
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:781
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
Definition: Instructions.h:761
@ FSub
*p = old - v
Definition: Instructions.h:749
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:769
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:737
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:743
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:757
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:741
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:753
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:773
@ Nand
*p = ~(old & v)
Definition: Instructions.h:731
AtomicOrderingBitfieldElementT< VolatileField::NextBit > AtomicOrderingField
Definition: Instructions.h:812
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:882
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
Value * getPointerOperand()
Definition: Instructions.h:886
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:868
bool isFloatingPointOperation() const
Definition: Instructions.h:898
static bool classof(const Instruction *I)
Definition: Instructions.h:903
const Value * getPointerOperand() const
Definition: Instructions.h:887
void setOperation(BinOp Operation)
Definition: Instructions.h:837
static bool classof(const Value *V)
Definition: Instructions.h:906
BinOp getOperation() const
Definition: Instructions.h:819
const Value * getValOperand() const
Definition: Instructions.h:891
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:877
void setAlignment(Align Align)
Definition: Instructions.h:847
Value * getValOperand()
Definition: Instructions.h:890
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:863
AlignmentBitfieldElementT< OperationField::NextBit > AlignmentField
Definition: Instructions.h:814
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:810
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:894
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
This class represents a no-op cast from one type to another.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
Conditional or Unconditional Branch instruction.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
iterator_range< succ_op_iterator > successors()
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
static bool classof(const Instruction *I)
bool isConditional() const
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
Value * getCondition() const
iterator_range< const_succ_op_iterator > successors() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1481
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1458
FunctionType * FTy
Definition: InstrTypes.h:1131
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2313
unsigned arg_size() const
Definition: InstrTypes.h:1290
unsigned getNumTotalBundleOperands() const
Return the total number operands (not operand bundles) used by every operand bundle in this OperandBu...
Definition: InstrTypes.h:2040
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static bool classof(const Value *V)
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i) const
Value * getIndirectDestLabelUse(unsigned i) const
BasicBlock * getIndirectDest(unsigned i) const
void setDefaultDest(BasicBlock *B)
unsigned getNumSuccessors() const
void setIndirectDest(unsigned i, BasicBlock *B)
Value * getIndirectDestLabel(unsigned i) const
getIndirectDestLabel - Return the i-th indirect dest label.
BasicBlock * getDefaultDest() const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static bool classof(const Value *V)
bool isTailCall() const
void setCanReturnTwice()
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
bool canReturnTwice() const
Return true if the call can return twice.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
bool isMustTailCall() const
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
bool isNonContinuableTrap() const
Return true if the call is for a noreturn trap intrinsic.
static CallInst * Create(FunctionCallee Func, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:448
CatchSwitchInst * getCatchSwitch() const
Convenience accessors.
void setCatchSwitch(Value *CatchSwitch)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CatchPadInst * Create(Value *CatchSwitch, ArrayRef< Value * > Args, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Value *V)
static bool classof(const Instruction *I)
BasicBlock * getSuccessor() const
CatchPadInst * getCatchPad() const
Convenience accessors.
void setSuccessor(BasicBlock *NewSucc)
static bool classof(const Value *V)
static CatchReturnInst * Create(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
void setCatchPad(CatchPadInst *CatchPad)
LLVM_ABI CatchReturnInst * cloneImpl() const
Value * getCatchSwitchParentPad() const
Get the parentPad of this catchret's catchpad's catchswitch.
void setUnwindDest(BasicBlock *UnwindDest)
static bool classof(const Instruction *I)
BasicBlock *(*)(Value *) DerefFnTy
const BasicBlock *(*)(const Value *) ConstDerefFnTy
unsigned getNumSuccessors() const
const_handler_iterator handler_begin() const
Returns an iterator that points to the first handler in the CatchSwitchInst.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
void setSuccessor(unsigned Idx, BasicBlock *NewSucc)
Value * getParentPad() const
void setParentPad(Value *ParentPad)
bool unwindsToCaller() const
static bool classof(const Value *V)
handler_iterator handler_end()
Returns a read-only iterator that points one past the last handler in the CatchSwitchInst.
BasicBlock * getUnwindDest() const
BasicBlock * getSuccessor(unsigned Idx) const
const_handler_iterator handler_end() const
Returns an iterator that points one past the last handler in the CatchSwitchInst.
bool hasUnwindDest() const
handler_iterator handler_begin()
Returns an iterator that points to the first handler in CatchSwitchInst.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
handler_range handlers()
iteration adapter for range-for loops.
const_handler_range handlers() const
iteration adapter for range-for loops.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
CleanupPadInst * getCleanupPad() const
Convenience accessor.
unsigned getNumSuccessors() const
BasicBlock * getUnwindDest() const
bool unwindsToCaller() const
void setCleanupPad(CleanupPadInst *CleanupPad)
static bool classof(const Value *V)
void setUnwindDest(BasicBlock *NewDest)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
bool hasUnwindDest() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:666
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:984
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:770
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:681
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:695
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:689
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:687
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:694
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:680
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:688
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:829
static auto FCmpPredicates()
Returns the sequence of all FCmp predicates.
Definition: InstrTypes.h:717
bool isFPPredicate() const
Definition: InstrTypes.h:784
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:767
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:23
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:43
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This instruction extracts a single (scalar) element from a VectorType value.
const Value * getVectorOperand() const
LLVM_ABI ExtractElementInst * cloneImpl() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool classof(const Value *V)
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getIndexOperand() const
static bool classof(const Instruction *I)
VectorType * getVectorOperandType() const
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getAggregateOperand() const
static unsigned getAggregateOperandIndex()
idx_iterator idx_begin() const
This instruction compares its operands according to the predicate given to the constructor.
bool isRelational() const
FCmpInst(Predicate Pred, Value *LHS, Value *RHS, const Twine &NameStr="", Instruction *FlagsSource=nullptr)
Constructor with no-insertion semantics.
bool isEquality() const
static bool classof(const Value *V)
bool isCommutative() const
static bool isCommutative(Predicate Pred)
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isEquality(Predicate Pred)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static auto predicates()
Returns the sequence of all FCmp predicates.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
static bool classof(const Value *V)
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to signed integer.
static bool classof(const Value *V)
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to unsigned integer.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:429
static bool classof(const Value *V)
Definition: Instructions.h:478
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:470
static bool classof(const Instruction *I)
Definition: Instructions.h:475
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:460
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:454
This class represents a freeze function that returns random concrete value if an operand is either a ...
static bool classof(const Value *V)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
static bool classof(const Instruction *I)
friend class CatchPadInst
Definition: InstrTypes.h:2372
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
Class to represent function types.
Definition: DerivedTypes.h:105
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setResultElementType(Type *Ty)
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
iterator_range< const_op_iterator > indices() const
Type * getResultElementType() const
static bool classof(const Instruction *I)
static bool classof(const Value *V)
iterator_range< op_iterator > indices()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:973
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
void setSourceElementType(Type *Ty)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:997
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, GEPNoWrapFlags NW, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:984
static unsigned getPointerOperandIndex()
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
const_op_iterator idx_begin() const
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
unsigned getNumIndices() const
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
const_op_iterator idx_end() const
const Value * getPointerOperand() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This instruction compares its operands according to the predicate given to the constructor.
bool hasSameSign() const
An icmp instruction, which can be marked as "samesign", indicating that the two operands have the sam...
static bool classof(const Value *V)
void setSameSign(bool B=true)
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static bool isCommutative(Predicate P)
static CmpPredicate getSwappedCmpPredicate(CmpPredicate Pred)
CmpPredicate getCmpPredicate() const
bool isCommutative() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
static bool classof(const Instruction *I)
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getNonStrictCmpPredicate(CmpPredicate Pred)
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
static auto predicates()
Returns the sequence of all ICmp predicates.
ICmpInst(Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with no-insertion semantics.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Indirect Branch Instruction.
static IndirectBrInst * Create(Value *Address, unsigned NumDests, InsertPosition InsertBefore=nullptr)
BasicBlock * getDestination(unsigned i)
Return the specified destination.
static bool classof(const Value *V)
const Value * getAddress() const
static bool classof(const Instruction *I)
BasicBlock * getSuccessor(unsigned i) const
iterator_range< const_succ_op_iterator > successors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
const BasicBlock * getDestination(unsigned i) const
void setSuccessor(unsigned i, BasicBlock *NewSucc)
void setAddress(Value *V)
unsigned getNumSuccessors() const
iterator_range< succ_op_iterator > successors()
This instruction inserts a single (scalar) element into a VectorType value.
static bool classof(const Value *V)
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
VectorType * getType() const
Overload to return most specific vector type.
static bool classof(const Instruction *I)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
static bool classof(const Instruction *I)
static unsigned getAggregateOperandIndex()
Value * getAggregateOperand()
static bool classof(const Value *V)
unsigned getNumIndices() const
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
static unsigned getInsertedValueOperandIndex()
LLVM_ABI InsertValueInst * cloneImpl() const
idx_iterator idx_end() const
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
const Value * getAggregateOperand() const
bool hasIndices() const
const Value * getInsertedValueOperand() const
idx_iterator idx_begin() const
typename Bitfield::Element< AtomicOrdering, Offset, 3, AtomicOrdering::LAST > AtomicOrderingBitfieldElementT
Definition: Instruction.h:155
typename Bitfield::Element< bool, Offset, 1 > BoolBitfieldElementT
Definition: Instruction.h:150
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
typename Bitfield::Element< unsigned, Offset, 6, Value::MaxAlignmentExponent > AlignmentBitfieldElementT
Definition: Instruction.h:147
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:312
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
friend class BasicBlock
Various leaf nodes.
Definition: Instruction.h:1036
This class represents a cast from an integer to a pointer.
static bool classof(const Instruction *I)
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static bool classof(const Value *V)
Invoke instruction.
static bool classof(const Instruction *I)
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
static bool classof(const Value *V)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
void setUnwindDest(BasicBlock *B)
BasicBlock * getNormalDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static bool classof(const Value *V)
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void reserveClauses(unsigned Size)
Grow the size of the operand list to accommodate the new number of clauses.
static bool classof(const Instruction *I)
An instruction for reading from memory.
Definition: Instructions.h:180
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:265
const Value * getPointerOperand() const
Definition: Instructions.h:260
void setAlignment(Align Align)
Definition: Instructions.h:219
Value * getPointerOperand()
Definition: Instructions.h:259
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:209
static bool classof(const Instruction *I)
Definition: Instructions.h:270
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:229
static bool classof(const Value *V)
Definition: Instructions.h:273
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this load instruction.
Definition: Instructions.h:239
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:245
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:224
Type * getPointerOperandType() const
Definition: Instructions.h:262
static unsigned getPointerOperandIndex()
Definition: Instructions.h:261
bool isUnordered() const
Definition: Instructions.h:253
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:212
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:234
bool isSimple() const
Definition: Instructions.h:251
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
Metadata node.
Definition: Metadata.h:1077
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:303
BasicBlock * getIncomingBlock(Value::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static bool classof(const Value *V)
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock *const * const_block_iterator
void setIncomingValue(unsigned i, Value *V)
static unsigned getOperandNumForIncomingValue(unsigned i)
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static unsigned getIncomingValueNumForOperand(unsigned i)
const_op_range incoming_values() const
Value * removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true)
void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New)
Replace every incoming basic block Old to basic block New.
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Class to represent pointers.
Definition: DerivedTypes.h:700
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:740
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
static bool classof(const Instruction *I)
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
static bool classof(const Value *V)
const Value * getPointerOperand() const
Gets the pointer operand.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static bool classof(const Value *V)
const Value * getPointerOperand() const
Gets the pointer operand.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
static bool classof(const Instruction *I)
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
static ResumeInst * Create(Value *Exn, InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getValue() const
Convenience accessor.
static bool classof(const Value *V)
unsigned getNumSuccessors() const
LLVM_ABI ResumeInst * cloneImpl() const
static bool classof(const Instruction *I)
Return a value (possibly void), from a function.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
static ReturnInst * Create(LLVMContext &C, BasicBlock *InsertAtEnd)
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a sign extension of integer types.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void setFalseValue(Value *V)
const Value * getFalseValue() const
void setTrueValue(Value *V)
OtherOps getOpcode() const
Value * getCondition()
Value * getTrueValue()
void swapValues()
Swap the true and false values of the select instruction.
Value * getFalseValue()
const Value * getCondition() const
LLVM_ABI SelectInst * cloneImpl() const
friend class Instruction
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static bool classof(const Value *V)
void setCondition(Value *V)
const Value * getTrueValue() const
static bool classof(const Instruction *I)
This instruction constructs a fixed permutation of two input vectors.
static bool classof(const Value *V)
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts)
Constant * getShuffleMaskForBitcode() const
Return the mask for this instruction, for use in bitcode.
bool isSingleSource() const
Return true if this shuffle chooses elements from exactly one source vector without changing the leng...
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
bool isExtractSubvectorMask(int &Index) const
Return true if this shuffle mask is an extract subvector mask.
ArrayRef< int > getShuffleMask() const
static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, int &NumSubElts, int &Index)
static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts)
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
void getShuffleMask(SmallVectorImpl< int > &Result) const
Return the mask for this instruction as a vector of integers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor)
VectorType * getType() const
Overload to return most specific vector type.
bool isInsertSubvectorMask(int &NumSubElts, int &Index) const
Return true if this shuffle mask is an insert subvector mask.
bool increasesLength() const
Return true if this shuffle returns a vector with a greater number of elements than its source vector...
bool isZeroEltSplat() const
Return true if all elements of this shuffle are the same value as the first element of exactly one so...
static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isTranspose() const
Return true if this shuffle transposes the elements of its inputs without changing the length of the ...
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
bool isSplice(int &Index) const
Return true if this shuffle splices two inputs without changing the length of the vectors.
static bool isReverseMask(const Constant *Mask, int NumSrcElts)
static bool isSelectMask(const Constant *Mask, int NumSrcElts)
static bool classof(const Instruction *I)
static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts)
bool isIdentity() const
Return true if this shuffle chooses elements from exactly one source vector without lane crossings an...
static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, int &VF)
static bool isIdentityMask(const Constant *Mask, int NumSrcElts)
static bool isTransposeMask(const Constant *Mask, int NumSrcElts)
bool isReverse() const
Return true if this shuffle swaps the order of elements from exactly one source vector.
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
static bool classof(const Instruction *I)
Definition: Instructions.h:397
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:347
const Value * getPointerOperand() const
Definition: Instructions.h:387
Align getAlign() const
Definition: Instructions.h:338
Type * getPointerOperandType() const
Definition: Instructions.h:389
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:333
void setAlignment(Align Align)
Definition: Instructions.h:342
bool isSimple() const
Definition: Instructions.h:375
const Value * getValueOperand() const
Definition: Instructions.h:384
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:353
Value * getValueOperand()
Definition: Instructions.h:383
static bool classof(const Value *V)
Definition: Instructions.h:400
bool isUnordered() const
Definition: Instructions.h:377
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this store instruction.
Definition: Instructions.h:363
LLVM_ABI StoreInst * cloneImpl() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:392
static unsigned getPointerOperandIndex()
Definition: Instructions.h:388
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:358
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:330
Value * getPointerOperand()
Definition: Instructions.h:386
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:369
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
SwitchInstProfUpdateWrapper(SwitchInst &SI)
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
A handle to a particular switch case.
unsigned getCaseIndex() const
Returns number of current case.
unsigned getSuccessorIndex() const
Returns successor index for current case successor.
BasicBlockT * getCaseSuccessor() const
Resolves successor for current case.
CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index)
bool operator==(const CaseHandleImpl &RHS) const
ConstantIntT * getCaseValue() const
Resolves case value for current case.
CaseHandle(SwitchInst *SI, ptrdiff_t Index)
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
const CaseHandleT & operator*() const
CaseIteratorImpl()=default
Default constructed iterator is in an invalid state until assigned to a case for a particular switch.
CaseIteratorImpl & operator-=(ptrdiff_t N)
bool operator==(const CaseIteratorImpl &RHS) const
CaseIteratorImpl & operator+=(ptrdiff_t N)
ptrdiff_t operator-(const CaseIteratorImpl &RHS) const
bool operator<(const CaseIteratorImpl &RHS) const
CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum)
Initializes case iterator for given SwitchInst and for given case number.
static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, unsigned SuccessorIndex)
Initializes case iterator for given SwitchInst and for given successor index.
Multiway switch.
BasicBlock * getDefaultDest() const
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
BasicBlock * getSuccessor(unsigned idx) const
ConstCaseIt findCaseValue(const ConstantInt *C) const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static SwitchInst * Create(Value *Value, BasicBlock *Default, unsigned NumCases, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
bool defaultDestUnreachable() const
Returns true if the default branch must result in immediate undefined behavior, false otherwise.
ConstCaseIt case_begin() const
Returns a read-only iterator that points to the first case in the SwitchInst.
iterator_range< ConstCaseIt > cases() const
Constant iteration adapter for range-for loops.
ConstantInt * findCaseDest(BasicBlock *BB)
Finds the unique case value for a given successor.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
static bool classof(const Value *V)
unsigned getNumSuccessors() const
CaseIt case_default()
Returns an iterator that points to the default case.
void setDefaultDest(BasicBlock *DefaultCase)
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt findCaseValue(const ConstantInt *C)
Search all of the case values for the specified constant.
Value * getCondition() const
ConstCaseIt case_default() const
CaseIt case_begin()
Returns a read/write iterator that points to the first case in the SwitchInst.
static bool classof(const Instruction *I)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
ConstCaseIt case_end() const
Returns a read-only iterator that points one past the last in the SwitchInst.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
void setHasNoUnsignedWrap(bool B)
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
static bool classof(const Value *V)
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
This class represents a cast unsigned integer to floating point.
static bool classof(const Value *V)
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This function has undefined behavior.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM_ABI void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:284
const Use & getOperandUse(unsigned i) const
Definition: User.h:245
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
op_iterator op_end()
Definition: User.h:286
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static bool classof(const Instruction *I)
Value * getPointerOperand()
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getPointerOperand() const
static bool classof(const Value *V)
static unsigned getPointerOperandIndex()
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:392
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:390
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
CRTP base class for adapting an iterator to a different type.
Definition: iterator.h:237
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:80
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:81
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:58
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
Type * checkGEPType(Type *Ty)
Definition: Instructions.h:941
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2235
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setAtomicSyncScopeID(Instruction *I, SyncScope::ID SSID)
A helper function that sets an atomic operation's sync scope.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1854
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1777
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
void setLoadStoreAlignment(Value *I, Align NewAlign)
A helper function that set the alignment of load or store instruction.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
static constexpr bool areContiguous()
Definition: Bitfields.h:280
The const version of succ_op_iterator.
const BasicBlock * operator->() const
const_succ_op_iterator(const_value_op_iterator I)
const BasicBlock * operator*() const
Iterator type that casts an operand to a basic block.
succ_op_iterator(value_op_iterator I)
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:93
The const version of succ_op_iterator.
const_succ_op_iterator(const_value_op_iterator I)
Iterator type that casts an operand to a basic block.
Compile-time customization of User operands.
Definition: User.h:42
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:249
Information about how a User object was allocated, to be passed into the User constructor.
Definition: User.h:79
Indicates this User has operands "hung off" in another allocation.
Definition: User.h:57
Indicates this User has operands co-allocated.
Definition: User.h:60
Iterator for directly iterating over the operand Values.
Definition: User.h:303
VariadicOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:67