LLVM 21.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/Use.h"
36#include "llvm/IR/User.h"
39#include <cassert>
40#include <cstddef>
41#include <cstdint>
42#include <iterator>
43#include <optional>
44
45namespace llvm {
46
47class APFloat;
48class APInt;
49class BasicBlock;
50class ConstantInt;
51class DataLayout;
52struct KnownBits;
53class StringRef;
54class Type;
55class Value;
56class UnreachableInst;
57
58//===----------------------------------------------------------------------===//
59// AllocaInst Class
60//===----------------------------------------------------------------------===//
61
62/// an instruction to allocate memory on the stack
64 Type *AllocatedType;
65
66 using AlignmentField = AlignmentBitfieldElementT<0>;
67 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
69 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
70 SwiftErrorField>(),
71 "Bitfields must be contiguous");
72
73protected:
74 // Note: Instruction needs to be a friend here to call cloneImpl.
75 friend class Instruction;
76
77 AllocaInst *cloneImpl() const;
78
79public:
80 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, InsertPosition InsertBefore);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 InsertPosition InsertBefore);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", InsertPosition InsertBefore = nullptr);
88
89 /// Return true if there is an allocation size parameter to the allocation
90 /// instruction that is not 1.
91 bool isArrayAllocation() const;
92
93 /// Get the number of elements allocated. For a simple allocation of a single
94 /// element, this will return a constant 1 value.
95 const Value *getArraySize() const { return getOperand(0); }
96 Value *getArraySize() { return getOperand(0); }
97
98 /// Overload to return most specific pointer type.
100 return cast<PointerType>(Instruction::getType());
101 }
102
103 /// Return the address space for the allocation.
104 unsigned getAddressSpace() const {
105 return getType()->getAddressSpace();
106 }
107
108 /// Get allocation size in bytes. Returns std::nullopt if size can't be
109 /// determined, e.g. in case of a VLA.
110 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
111
112 /// Get allocation size in bits. Returns std::nullopt if size can't be
113 /// determined, e.g. in case of a VLA.
114 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
115
116 /// Return the type that is being allocated by the instruction.
117 Type *getAllocatedType() const { return AllocatedType; }
118 /// for use only in special circumstances that need to generically
119 /// transform a whole instruction (eg: IR linking and vectorization).
120 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
121
122 /// Return the alignment of the memory that is being allocated by the
123 /// instruction.
124 Align getAlign() const {
125 return Align(1ULL << getSubclassData<AlignmentField>());
126 }
127
129 setSubclassData<AlignmentField>(Log2(Align));
130 }
131
132 /// Return true if this alloca is in the entry block of the function and is a
133 /// constant size. If so, the code generator will fold it into the
134 /// prolog/epilog code, so it is basically free.
135 bool isStaticAlloca() const;
136
137 /// Return true if this alloca is used as an inalloca argument to a call. Such
138 /// allocas are never considered static even if they are in the entry block.
139 bool isUsedWithInAlloca() const {
140 return getSubclassData<UsedWithInAllocaField>();
141 }
142
143 /// Specify whether this alloca is used to represent the arguments to a call.
144 void setUsedWithInAlloca(bool V) {
145 setSubclassData<UsedWithInAllocaField>(V);
146 }
147
148 /// Return true if this alloca is used as a swifterror argument to a call.
149 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
150 /// Specify whether this alloca is used to represent a swifterror.
151 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
152
153 // Methods for support type inquiry through isa, cast, and dyn_cast:
154 static bool classof(const Instruction *I) {
155 return (I->getOpcode() == Instruction::Alloca);
156 }
157 static bool classof(const Value *V) {
158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
159 }
160
161private:
162 // Shadow Instruction::setInstructionSubclassData with a private forwarding
163 // method so that subclasses cannot accidentally use it.
164 template <typename Bitfield>
165 void setSubclassData(typename Bitfield::Type Value) {
166 Instruction::setSubclassData<Bitfield>(Value);
167 }
168};
169
170//===----------------------------------------------------------------------===//
171// LoadInst Class
172//===----------------------------------------------------------------------===//
173
174/// An instruction for reading from memory. This uses the SubclassData field in
175/// Value to store whether or not the load is volatile.
177 using VolatileField = BoolBitfieldElementT<0>;
180 static_assert(
181 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
182 "Bitfields must be contiguous");
183
184 void AssertOK();
185
186protected:
187 // Note: Instruction needs to be a friend here to call cloneImpl.
188 friend class Instruction;
189
190 LoadInst *cloneImpl() const;
191
192public:
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
194 InsertPosition InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 InsertPosition InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Align Align, InsertPosition InsertBefore = nullptr);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 InsertPosition InsertBefore = nullptr);
203
204 /// Return true if this is a load from a volatile memory location.
205 bool isVolatile() const { return getSubclassData<VolatileField>(); }
206
207 /// Specify whether this is a volatile load or not.
208 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
209
210 /// Return the alignment of the access that is being performed.
211 Align getAlign() const {
212 return Align(1ULL << (getSubclassData<AlignmentField>()));
213 }
214
216 setSubclassData<AlignmentField>(Log2(Align));
217 }
218
219 /// Returns the ordering constraint of this load instruction.
221 return getSubclassData<OrderingField>();
222 }
223 /// Sets the ordering constraint of this load instruction. May not be Release
224 /// or AcquireRelease.
226 setSubclassData<OrderingField>(Ordering);
227 }
228
229 /// Returns the synchronization scope ID of this load instruction.
231 return SSID;
232 }
233
234 /// Sets the synchronization scope ID of this load instruction.
236 this->SSID = SSID;
237 }
238
239 /// Sets the ordering constraint and the synchronization scope ID of this load
240 /// instruction.
243 setOrdering(Ordering);
244 setSyncScopeID(SSID);
245 }
246
247 bool isSimple() const { return !isAtomic() && !isVolatile(); }
248
249 bool isUnordered() const {
252 !isVolatile();
253 }
254
256 const Value *getPointerOperand() const { return getOperand(0); }
257 static unsigned getPointerOperandIndex() { return 0U; }
259
260 /// Returns the address space of the pointer operand.
261 unsigned getPointerAddressSpace() const {
263 }
264
265 // Methods for support type inquiry through isa, cast, and dyn_cast:
266 static bool classof(const Instruction *I) {
267 return I->getOpcode() == Instruction::Load;
268 }
269 static bool classof(const Value *V) {
270 return isa<Instruction>(V) && classof(cast<Instruction>(V));
271 }
272
273private:
274 // Shadow Instruction::setInstructionSubclassData with a private forwarding
275 // method so that subclasses cannot accidentally use it.
276 template <typename Bitfield>
277 void setSubclassData(typename Bitfield::Type Value) {
278 Instruction::setSubclassData<Bitfield>(Value);
279 }
280
281 /// The synchronization scope ID of this load instruction. Not quite enough
282 /// room in SubClassData for everything, so synchronization scope ID gets its
283 /// own field.
284 SyncScope::ID SSID;
285};
286
287//===----------------------------------------------------------------------===//
288// StoreInst Class
289//===----------------------------------------------------------------------===//
290
291/// An instruction for storing to memory.
292class StoreInst : public Instruction {
293 using VolatileField = BoolBitfieldElementT<0>;
296 static_assert(
297 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
298 "Bitfields must be contiguous");
299
300 void AssertOK();
301
302 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
303
304protected:
305 // Note: Instruction needs to be a friend here to call cloneImpl.
306 friend class Instruction;
307
308 StoreInst *cloneImpl() const;
309
310public:
311 StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore);
312 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
313 InsertPosition InsertBefore);
315 InsertPosition InsertBefore = nullptr);
318 InsertPosition InsertBefore = nullptr);
319
320 // allocate space for exactly two operands
321 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
322 void operator delete(void *Ptr) { User::operator delete(Ptr); }
323
324 /// Return true if this is a store to a volatile memory location.
325 bool isVolatile() const { return getSubclassData<VolatileField>(); }
326
327 /// Specify whether this is a volatile store or not.
328 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
329
330 /// Transparently provide more efficient getOperand methods.
332
333 Align getAlign() const {
334 return Align(1ULL << (getSubclassData<AlignmentField>()));
335 }
336
338 setSubclassData<AlignmentField>(Log2(Align));
339 }
340
341 /// Returns the ordering constraint of this store instruction.
343 return getSubclassData<OrderingField>();
344 }
345
346 /// Sets the ordering constraint of this store instruction. May not be
347 /// Acquire or AcquireRelease.
349 setSubclassData<OrderingField>(Ordering);
350 }
351
352 /// Returns the synchronization scope ID of this store instruction.
354 return SSID;
355 }
356
357 /// Sets the synchronization scope ID of this store instruction.
359 this->SSID = SSID;
360 }
361
362 /// Sets the ordering constraint and the synchronization scope ID of this
363 /// store instruction.
366 setOrdering(Ordering);
367 setSyncScopeID(SSID);
368 }
369
370 bool isSimple() const { return !isAtomic() && !isVolatile(); }
371
372 bool isUnordered() const {
375 !isVolatile();
376 }
377
379 const Value *getValueOperand() const { return getOperand(0); }
380
382 const Value *getPointerOperand() const { return getOperand(1); }
383 static unsigned getPointerOperandIndex() { return 1U; }
385
386 /// Returns the address space of the pointer operand.
387 unsigned getPointerAddressSpace() const {
389 }
390
391 // Methods for support type inquiry through isa, cast, and dyn_cast:
392 static bool classof(const Instruction *I) {
393 return I->getOpcode() == Instruction::Store;
394 }
395 static bool classof(const Value *V) {
396 return isa<Instruction>(V) && classof(cast<Instruction>(V));
397 }
398
399private:
400 // Shadow Instruction::setInstructionSubclassData with a private forwarding
401 // method so that subclasses cannot accidentally use it.
402 template <typename Bitfield>
403 void setSubclassData(typename Bitfield::Type Value) {
404 Instruction::setSubclassData<Bitfield>(Value);
405 }
406
407 /// The synchronization scope ID of this store instruction. Not quite enough
408 /// room in SubClassData for everything, so synchronization scope ID gets its
409 /// own field.
410 SyncScope::ID SSID;
411};
412
413template <>
414struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
415};
416
418
419//===----------------------------------------------------------------------===//
420// FenceInst Class
421//===----------------------------------------------------------------------===//
422
423/// An instruction for ordering other memory operations.
424class FenceInst : public Instruction {
425 using OrderingField = AtomicOrderingBitfieldElementT<0>;
426
427 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
428
429 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
430
431protected:
432 // Note: Instruction needs to be a friend here to call cloneImpl.
433 friend class Instruction;
434
435 FenceInst *cloneImpl() const;
436
437public:
438 // Ordering may only be Acquire, Release, AcquireRelease, or
439 // SequentiallyConsistent.
442 InsertPosition InsertBefore = nullptr);
443
444 // allocate space for exactly zero operands
445 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
446 void operator delete(void *Ptr) { User::operator delete(Ptr); }
447
448 /// Returns the ordering constraint of this fence instruction.
450 return getSubclassData<OrderingField>();
451 }
452
453 /// Sets the ordering constraint of this fence instruction. May only be
454 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
456 setSubclassData<OrderingField>(Ordering);
457 }
458
459 /// Returns the synchronization scope ID of this fence instruction.
461 return SSID;
462 }
463
464 /// Sets the synchronization scope ID of this fence instruction.
466 this->SSID = SSID;
467 }
468
469 // Methods for support type inquiry through isa, cast, and dyn_cast:
470 static bool classof(const Instruction *I) {
471 return I->getOpcode() == Instruction::Fence;
472 }
473 static bool classof(const Value *V) {
474 return isa<Instruction>(V) && classof(cast<Instruction>(V));
475 }
476
477private:
478 // Shadow Instruction::setInstructionSubclassData with a private forwarding
479 // method so that subclasses cannot accidentally use it.
480 template <typename Bitfield>
481 void setSubclassData(typename Bitfield::Type Value) {
482 Instruction::setSubclassData<Bitfield>(Value);
483 }
484
485 /// The synchronization scope ID of this fence instruction. Not quite enough
486 /// room in SubClassData for everything, so synchronization scope ID gets its
487 /// own field.
488 SyncScope::ID SSID;
489};
490
491//===----------------------------------------------------------------------===//
492// AtomicCmpXchgInst Class
493//===----------------------------------------------------------------------===//
494
495/// An instruction that atomically checks whether a
496/// specified value is in a memory location, and, if it is, stores a new value
497/// there. The value returned by this instruction is a pair containing the
498/// original value as first element, and an i1 indicating success (true) or
499/// failure (false) as second element.
500///
502 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
503 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
504 SyncScope::ID SSID);
505
506 template <unsigned Offset>
507 using AtomicOrderingBitfieldElement =
510
511 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
512
513protected:
514 // Note: Instruction needs to be a friend here to call cloneImpl.
515 friend class Instruction;
516
518
519public:
520 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
521 AtomicOrdering SuccessOrdering,
522 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
523 InsertPosition InsertBefore = nullptr);
524
525 // allocate space for exactly three operands
526 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
527 void operator delete(void *Ptr) { User::operator delete(Ptr); }
528
537 static_assert(
540 "Bitfields must be contiguous");
541
542 /// Return the alignment of the memory that is being allocated by the
543 /// instruction.
544 Align getAlign() const {
545 return Align(1ULL << getSubclassData<AlignmentField>());
546 }
547
549 setSubclassData<AlignmentField>(Log2(Align));
550 }
551
552 /// Return true if this is a cmpxchg from a volatile memory
553 /// location.
554 ///
555 bool isVolatile() const { return getSubclassData<VolatileField>(); }
556
557 /// Specify whether this is a volatile cmpxchg.
558 ///
559 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
560
561 /// Return true if this cmpxchg may spuriously fail.
562 bool isWeak() const { return getSubclassData<WeakField>(); }
563
564 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
565
566 /// Transparently provide more efficient getOperand methods.
568
570 return Ordering != AtomicOrdering::NotAtomic &&
571 Ordering != AtomicOrdering::Unordered;
572 }
573
575 return Ordering != AtomicOrdering::NotAtomic &&
576 Ordering != AtomicOrdering::Unordered &&
577 Ordering != AtomicOrdering::AcquireRelease &&
578 Ordering != AtomicOrdering::Release;
579 }
580
581 /// Returns the success ordering constraint of this cmpxchg instruction.
583 return getSubclassData<SuccessOrderingField>();
584 }
585
586 /// Sets the success ordering constraint of this cmpxchg instruction.
588 assert(isValidSuccessOrdering(Ordering) &&
589 "invalid CmpXchg success ordering");
590 setSubclassData<SuccessOrderingField>(Ordering);
591 }
592
593 /// Returns the failure ordering constraint of this cmpxchg instruction.
595 return getSubclassData<FailureOrderingField>();
596 }
597
598 /// Sets the failure ordering constraint of this cmpxchg instruction.
600 assert(isValidFailureOrdering(Ordering) &&
601 "invalid CmpXchg failure ordering");
602 setSubclassData<FailureOrderingField>(Ordering);
603 }
604
605 /// Returns a single ordering which is at least as strong as both the
606 /// success and failure orderings for this cmpxchg.
615 }
616 return getSuccessOrdering();
617 }
618
619 /// Returns the synchronization scope ID of this cmpxchg instruction.
621 return SSID;
622 }
623
624 /// Sets the synchronization scope ID of this cmpxchg instruction.
626 this->SSID = SSID;
627 }
628
630 const Value *getPointerOperand() const { return getOperand(0); }
631 static unsigned getPointerOperandIndex() { return 0U; }
632
634 const Value *getCompareOperand() const { return getOperand(1); }
635
637 const Value *getNewValOperand() const { return getOperand(2); }
638
639 /// Returns the address space of the pointer operand.
640 unsigned getPointerAddressSpace() const {
642 }
643
644 /// Returns the strongest permitted ordering on failure, given the
645 /// desired ordering on success.
646 ///
647 /// If the comparison in a cmpxchg operation fails, there is no atomic store
648 /// so release semantics cannot be provided. So this function drops explicit
649 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
650 /// operation would remain SequentiallyConsistent.
651 static AtomicOrdering
653 switch (SuccessOrdering) {
654 default:
655 llvm_unreachable("invalid cmpxchg success ordering");
664 }
665 }
666
667 // Methods for support type inquiry through isa, cast, and dyn_cast:
668 static bool classof(const Instruction *I) {
669 return I->getOpcode() == Instruction::AtomicCmpXchg;
670 }
671 static bool classof(const Value *V) {
672 return isa<Instruction>(V) && classof(cast<Instruction>(V));
673 }
674
675private:
676 // Shadow Instruction::setInstructionSubclassData with a private forwarding
677 // method so that subclasses cannot accidentally use it.
678 template <typename Bitfield>
679 void setSubclassData(typename Bitfield::Type Value) {
680 Instruction::setSubclassData<Bitfield>(Value);
681 }
682
683 /// The synchronization scope ID of this cmpxchg instruction. Not quite
684 /// enough room in SubClassData for everything, so synchronization scope ID
685 /// gets its own field.
686 SyncScope::ID SSID;
687};
688
689template <>
691 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
692};
693
695
696//===----------------------------------------------------------------------===//
697// AtomicRMWInst Class
698//===----------------------------------------------------------------------===//
699
700/// an instruction that atomically reads a memory location,
701/// combines it with another value, and then stores the result back. Returns
702/// the old value.
703///
705protected:
706 // Note: Instruction needs to be a friend here to call cloneImpl.
707 friend class Instruction;
708
709 AtomicRMWInst *cloneImpl() const;
710
711public:
712 /// This enumeration lists the possible modifications atomicrmw can make. In
713 /// the descriptions, 'p' is the pointer to the instruction's memory location,
714 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
715 /// instruction. These instructions always return 'old'.
716 enum BinOp : unsigned {
717 /// *p = v
719 /// *p = old + v
721 /// *p = old - v
723 /// *p = old & v
725 /// *p = ~(old & v)
727 /// *p = old | v
729 /// *p = old ^ v
731 /// *p = old >signed v ? old : v
733 /// *p = old <signed v ? old : v
735 /// *p = old >unsigned v ? old : v
737 /// *p = old <unsigned v ? old : v
739
740 /// *p = old + v
742
743 /// *p = old - v
745
746 /// *p = maxnum(old, v)
747 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
749
750 /// *p = minnum(old, v)
751 /// \p minnum matches the behavior of \p llvm.minnum.*.
753
754 /// Increment one up to a maximum value.
755 /// *p = (old u>= v) ? 0 : (old + 1)
757
758 /// Decrement one until a minimum value or zero.
759 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
761
762 /// Subtract only if no unsigned overflow.
763 /// *p = (old u>= v) ? old - v : old
765
766 /// *p = usub.sat(old, v)
767 /// \p usub.sat matches the behavior of \p llvm.usub.sat.*.
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = USubSat,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
784
785 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
786
787public:
788 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
789 AtomicOrdering Ordering, SyncScope::ID SSID,
790 InsertPosition InsertBefore = nullptr);
791
792 // allocate space for exactly two operands
793 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
794 void operator delete(void *Ptr) { User::operator delete(Ptr); }
795
799 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
803 "Bitfields must be contiguous");
804
805 BinOp getOperation() const { return getSubclassData<OperationField>(); }
806
807 static StringRef getOperationName(BinOp Op);
808
809 static bool isFPOperation(BinOp Op) {
810 switch (Op) {
815 return true;
816 default:
817 return false;
818 }
819 }
820
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
845
846 /// Returns the ordering constraint of this rmw instruction.
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
853 assert(Ordering != AtomicOrdering::NotAtomic &&
854 "atomicrmw instructions can only be atomic.");
855 assert(Ordering != AtomicOrdering::Unordered &&
856 "atomicrmw instructions cannot be unordered.");
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
880 }
881
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
926 assert(Ty && "Invalid GetElementPtrInst indices for type!");
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
934 Type *SourceElementType;
935 Type *ResultElementType;
936
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first and second ctor can optionally insert before an
941 /// existing instruction, the third appends the new instruction to the
942 /// specified BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 const Twine &NameStr, InsertPosition InsertBefore);
946
947 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
948
949protected:
950 // Note: Instruction needs to be a friend here to call cloneImpl.
951 friend class Instruction;
952
954
955public:
956 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
957 ArrayRef<Value *> IdxList,
958 const Twine &NameStr = "",
959 InsertPosition InsertBefore = nullptr) {
960 unsigned Values = 1 + unsigned(IdxList.size());
961 assert(PointeeType && "Must specify element type");
962 IntrusiveOperandsAllocMarker AllocMarker{Values};
963 return new (AllocMarker) GetElementPtrInst(
964 PointeeType, Ptr, IdxList, AllocMarker, NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
969 const Twine &NameStr = "",
970 InsertPosition InsertBefore = nullptr) {
972 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
973 GEP->setNoWrapFlags(NW);
974 return GEP;
975 }
976
977 /// Create an "inbounds" getelementptr. See the documentation for the
978 /// "inbounds" flag in LangRef.html for details.
979 static GetElementPtrInst *
981 const Twine &NameStr = "",
982 InsertPosition InsertBefore = nullptr) {
983 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
984 NameStr, InsertBefore);
985 }
986
987 /// Transparently provide more efficient getOperand methods.
989
990 Type *getSourceElementType() const { return SourceElementType; }
991
992 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
993 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
994
996 return ResultElementType;
997 }
998
999 /// Returns the address space of this instruction's pointer type.
1000 unsigned getAddressSpace() const {
1001 // Note that this is always the same as the pointer operand's address space
1002 // and that is cheaper to compute, so cheat here.
1003 return getPointerAddressSpace();
1004 }
1005
1006 /// Returns the result type of a getelementptr with the given source
1007 /// element type and indexes.
1008 ///
1009 /// Null is returned if the indices are invalid for the specified
1010 /// source element type.
1011 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1012 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1013 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1014
1015 /// Return the type of the element at the given index of an indexable
1016 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1017 ///
1018 /// Returns null if the type can't be indexed, or the given index is not
1019 /// legal for the given type.
1020 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1021 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1022
1023 inline op_iterator idx_begin() { return op_begin()+1; }
1024 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1025 inline op_iterator idx_end() { return op_end(); }
1026 inline const_op_iterator idx_end() const { return op_end(); }
1027
1029 return make_range(idx_begin(), idx_end());
1030 }
1031
1033 return make_range(idx_begin(), idx_end());
1034 }
1035
1037 return getOperand(0);
1038 }
1039 const Value *getPointerOperand() const {
1040 return getOperand(0);
1041 }
1042 static unsigned getPointerOperandIndex() {
1043 return 0U; // get index for modifying correct operand.
1044 }
1045
1046 /// Method to return the pointer operand as a
1047 /// PointerType.
1049 return getPointerOperand()->getType();
1050 }
1051
1052 /// Returns the address space of the pointer operand.
1053 unsigned getPointerAddressSpace() const {
1055 }
1056
1057 /// Returns the pointer type returned by the GEP
1058 /// instruction, which may be a vector of pointers.
1060 // Vector GEP
1061 Type *Ty = Ptr->getType();
1062 if (Ty->isVectorTy())
1063 return Ty;
1064
1065 for (Value *Index : IdxList)
1066 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1067 ElementCount EltCount = IndexVTy->getElementCount();
1068 return VectorType::get(Ty, EltCount);
1069 }
1070 // Scalar GEP
1071 return Ty;
1072 }
1073
1074 unsigned getNumIndices() const { // Note: always non-negative
1075 return getNumOperands() - 1;
1076 }
1077
1078 bool hasIndices() const {
1079 return getNumOperands() > 1;
1080 }
1081
1082 /// Return true if all of the indices of this GEP are
1083 /// zeros. If so, the result pointer and the first operand have the same
1084 /// value, just potentially different types.
1085 bool hasAllZeroIndices() const;
1086
1087 /// Return true if all of the indices of this GEP are
1088 /// constant integers. If so, the result pointer and the first operand have
1089 /// a constant offset between them.
1090 bool hasAllConstantIndices() const;
1091
1092 /// Set nowrap flags for GEP instruction.
1094
1095 /// Set or clear the inbounds flag on this GEP instruction.
1096 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1097 /// TODO: Remove this method in favor of setNoWrapFlags().
1098 void setIsInBounds(bool b = true);
1099
1100 /// Get the nowrap flags for the GEP instruction.
1102
1103 /// Determine whether the GEP has the inbounds flag.
1104 bool isInBounds() const;
1105
1106 /// Determine whether the GEP has the nusw flag.
1107 bool hasNoUnsignedSignedWrap() const;
1108
1109 /// Determine whether the GEP has the nuw flag.
1110 bool hasNoUnsignedWrap() const;
1111
1112 /// Accumulate the constant address offset of this GEP if possible.
1113 ///
1114 /// This routine accepts an APInt into which it will accumulate the constant
1115 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1116 /// all-constant, it returns false and the value of the offset APInt is
1117 /// undefined (it is *not* preserved!). The APInt passed into this routine
1118 /// must be at least as wide as the IntPtr type for the address space of
1119 /// the base GEP pointer.
1120 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1121 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1122 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1123 APInt &ConstantOffset) const;
1124 // Methods for support type inquiry through isa, cast, and dyn_cast:
1125 static bool classof(const Instruction *I) {
1126 return (I->getOpcode() == Instruction::GetElementPtr);
1127 }
1128 static bool classof(const Value *V) {
1129 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1130 }
1131};
1132
1133template <>
1135 : public VariadicOperandTraits<GetElementPtrInst> {};
1136
1137GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1138 ArrayRef<Value *> IdxList,
1139 AllocInfo AllocInfo, const Twine &NameStr,
1140 InsertPosition InsertBefore)
1141 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, AllocInfo,
1142 InsertBefore),
1143 SourceElementType(PointeeType),
1144 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1145 init(Ptr, IdxList, NameStr);
1146}
1147
1148DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1149
1150//===----------------------------------------------------------------------===//
1151// ICmpInst Class
1152//===----------------------------------------------------------------------===//
1153
1154/// This instruction compares its operands according to the predicate given
1155/// to the constructor. It only operates on integers or pointers. The operands
1156/// must be identical types.
1157/// Represent an integer comparison operator.
1158class ICmpInst: public CmpInst {
1159 void AssertOK() {
1160 assert(isIntPredicate() &&
1161 "Invalid ICmp predicate value");
1162 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1163 "Both operands to ICmp instruction are not of the same type!");
1164 // Check that the operands are the right type
1165 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1166 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1167 "Invalid operand types for ICmp instruction");
1168 }
1169
1170 enum { SameSign = (1 << 0) };
1171
1172protected:
1173 // Note: Instruction needs to be a friend here to call cloneImpl.
1174 friend class Instruction;
1175
1176 /// Clone an identical ICmpInst
1177 ICmpInst *cloneImpl() const;
1178
1179public:
1180 /// Constructor with insertion semantics.
1181 ICmpInst(InsertPosition InsertBefore, ///< Where to insert
1182 Predicate pred, ///< The predicate to use for the comparison
1183 Value *LHS, ///< The left-hand-side of the expression
1184 Value *RHS, ///< The right-hand-side of the expression
1185 const Twine &NameStr = "" ///< Name of the instruction
1186 )
1187 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS,
1188 RHS, NameStr, InsertBefore) {
1189#ifndef NDEBUG
1190 AssertOK();
1191#endif
1192 }
1193
1194 /// Constructor with no-insertion semantics
1196 Predicate pred, ///< The predicate to use for the comparison
1197 Value *LHS, ///< The left-hand-side of the expression
1198 Value *RHS, ///< The right-hand-side of the expression
1199 const Twine &NameStr = "" ///< Name of the instruction
1200 ) : CmpInst(makeCmpResultType(LHS->getType()),
1201 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1202#ifndef NDEBUG
1203 AssertOK();
1204#endif
1205 }
1206
1207 /// @returns the predicate along with samesign information.
1209 return {getPredicate(), hasSameSign()};
1210 }
1211
1212 /// @returns the inverse predicate along with samesign information: static
1213 /// variant.
1215 return {getInversePredicate(Pred), Pred.hasSameSign()};
1216 }
1217
1218 /// @returns the inverse predicate along with samesign information.
1220 return getInverseCmpPredicate(getCmpPredicate());
1221 }
1222
1223 /// @returns the swapped predicate along with samesign information: static
1224 /// variant.
1226 return {getSwappedPredicate(Pred), Pred.hasSameSign()};
1227 }
1228
1229 /// @returns the swapped predicate along with samesign information.
1231 return getSwappedCmpPredicate(getCmpPredicate());
1232 }
1233
1234 /// @returns the non-strict predicate along with samesign information: static
1235 /// variant.
1237 return {getNonStrictPredicate(Pred), Pred.hasSameSign()};
1238 }
1239
1240 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
1241 /// @returns the non-strict predicate along with samesign information.
1243 return getNonStrictCmpPredicate(getCmpPredicate());
1244 }
1245
1246 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1247 /// @returns the predicate that would be the result if the operand were
1248 /// regarded as signed.
1249 /// Return the signed version of the predicate.
1251 return getSignedPredicate(getPredicate());
1252 }
1253
1254 /// Return the signed version of the predicate: static variant.
1255 static Predicate getSignedPredicate(Predicate Pred);
1256
1257 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1258 /// @returns the predicate that would be the result if the operand were
1259 /// regarded as unsigned.
1260 /// Return the unsigned version of the predicate.
1262 return getUnsignedPredicate(getPredicate());
1263 }
1264
1265 /// Return the unsigned version of the predicate: static variant.
1266 static Predicate getUnsignedPredicate(Predicate Pred);
1267
1268 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1269 /// @returns the unsigned version of the signed predicate pred or
1270 /// the signed version of the signed predicate pred.
1271 /// Static variant.
1272 static Predicate getFlippedSignednessPredicate(Predicate Pred);
1273
1274 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1275 /// @returns the unsigned version of the signed predicate pred or
1276 /// the signed version of the signed predicate pred.
1278 return getFlippedSignednessPredicate(getPredicate());
1279 }
1280
1281 /// Determine if Pred1 implies Pred2 is true, false, or if nothing can be
1282 /// inferred about the implication, when two compares have matching operands.
1283 static std::optional<bool> isImpliedByMatchingCmp(CmpPredicate Pred1,
1284 CmpPredicate Pred2);
1285
1286 void setSameSign(bool B = true) {
1287 SubclassOptionalData = (SubclassOptionalData & ~SameSign) | (B * SameSign);
1288 }
1289
1290 /// An icmp instruction, which can be marked as "samesign", indicating that
1291 /// the two operands have the same sign. This means that we can convert
1292 /// "slt" to "ult" and vice versa, which enables more optimizations.
1293 bool hasSameSign() const { return SubclassOptionalData & SameSign; }
1294
1295 /// Return true if this predicate is either EQ or NE. This also
1296 /// tests for commutativity.
1297 static bool isEquality(Predicate P) {
1298 return P == ICMP_EQ || P == ICMP_NE;
1299 }
1300
1301 /// Return true if this predicate is either EQ or NE. This also
1302 /// tests for commutativity.
1303 bool isEquality() const {
1304 return isEquality(getPredicate());
1305 }
1306
1307 /// @returns true if the predicate is commutative
1308 /// Determine if this relation is commutative.
1309 static bool isCommutative(Predicate P) { return isEquality(P); }
1310
1311 /// @returns true if the predicate of this ICmpInst is commutative
1312 /// Determine if this relation is commutative.
1313 bool isCommutative() const { return isCommutative(getPredicate()); }
1314
1315 /// Return true if the predicate is relational (not EQ or NE).
1316 ///
1317 bool isRelational() const {
1318 return !isEquality();
1319 }
1320
1321 /// Return true if the predicate is relational (not EQ or NE).
1322 ///
1323 static bool isRelational(Predicate P) {
1324 return !isEquality(P);
1325 }
1326
1327 /// Return true if the predicate is SGT or UGT.
1328 ///
1329 static bool isGT(Predicate P) {
1330 return P == ICMP_SGT || P == ICMP_UGT;
1331 }
1332
1333 /// Return true if the predicate is SLT or ULT.
1334 ///
1335 static bool isLT(Predicate P) {
1336 return P == ICMP_SLT || P == ICMP_ULT;
1337 }
1338
1339 /// Return true if the predicate is SGE or UGE.
1340 ///
1341 static bool isGE(Predicate P) {
1342 return P == ICMP_SGE || P == ICMP_UGE;
1343 }
1344
1345 /// Return true if the predicate is SLE or ULE.
1346 ///
1347 static bool isLE(Predicate P) {
1348 return P == ICMP_SLE || P == ICMP_ULE;
1349 }
1350
1351 /// Returns the sequence of all ICmp predicates.
1352 ///
1353 static auto predicates() { return ICmpPredicates(); }
1354
1355 /// Exchange the two operands to this instruction in such a way that it does
1356 /// not modify the semantics of the instruction. The predicate value may be
1357 /// changed to retain the same result if the predicate is order dependent
1358 /// (e.g. ult).
1359 /// Swap operands and adjust predicate.
1361 setPredicate(getSwappedPredicate());
1362 Op<0>().swap(Op<1>());
1363 }
1364
1365 /// Return result of `LHS Pred RHS` comparison.
1366 static bool compare(const APInt &LHS, const APInt &RHS,
1367 ICmpInst::Predicate Pred);
1368
1369 /// Return result of `LHS Pred RHS`, if it can be determined from the
1370 /// KnownBits. Otherwise return nullopt.
1371 static std::optional<bool> compare(const KnownBits &LHS, const KnownBits &RHS,
1372 ICmpInst::Predicate Pred);
1373
1374 // Methods for support type inquiry through isa, cast, and dyn_cast:
1375 static bool classof(const Instruction *I) {
1376 return I->getOpcode() == Instruction::ICmp;
1377 }
1378 static bool classof(const Value *V) {
1379 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1380 }
1381};
1382
1383//===----------------------------------------------------------------------===//
1384// FCmpInst Class
1385//===----------------------------------------------------------------------===//
1386
1387/// This instruction compares its operands according to the predicate given
1388/// to the constructor. It only operates on floating point values or packed
1389/// vectors of floating point values. The operands must be identical types.
1390/// Represents a floating point comparison operator.
1391class FCmpInst: public CmpInst {
1392 void AssertOK() {
1393 assert(isFPPredicate() && "Invalid FCmp predicate value");
1394 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1395 "Both operands to FCmp instruction are not of the same type!");
1396 // Check that the operands are the right type
1397 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1398 "Invalid operand types for FCmp instruction");
1399 }
1400
1401protected:
1402 // Note: Instruction needs to be a friend here to call cloneImpl.
1403 friend class Instruction;
1404
1405 /// Clone an identical FCmpInst
1406 FCmpInst *cloneImpl() const;
1407
1408public:
1409 /// Constructor with insertion semantics.
1410 FCmpInst(InsertPosition InsertBefore, ///< Where to insert
1411 Predicate pred, ///< The predicate to use for the comparison
1412 Value *LHS, ///< The left-hand-side of the expression
1413 Value *RHS, ///< The right-hand-side of the expression
1414 const Twine &NameStr = "" ///< Name of the instruction
1415 )
1417 RHS, NameStr, InsertBefore) {
1418 AssertOK();
1419 }
1420
1421 /// Constructor with no-insertion semantics
1422 FCmpInst(Predicate Pred, ///< The predicate to use for the comparison
1423 Value *LHS, ///< The left-hand-side of the expression
1424 Value *RHS, ///< The right-hand-side of the expression
1425 const Twine &NameStr = "", ///< Name of the instruction
1426 Instruction *FlagsSource = nullptr)
1427 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1428 RHS, NameStr, nullptr, FlagsSource) {
1429 AssertOK();
1430 }
1431
1432 /// @returns true if the predicate is EQ or NE.
1433 /// Determine if this is an equality predicate.
1434 static bool isEquality(Predicate Pred) {
1435 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1436 Pred == FCMP_UNE;
1437 }
1438
1439 /// @returns true if the predicate of this instruction is EQ or NE.
1440 /// Determine if this is an equality predicate.
1441 bool isEquality() const { return isEquality(getPredicate()); }
1442
1443 /// @returns true if the predicate is commutative.
1444 /// Determine if this is a commutative predicate.
1445 static bool isCommutative(Predicate Pred) {
1446 return isEquality(Pred) || Pred == FCMP_FALSE || Pred == FCMP_TRUE ||
1447 Pred == FCMP_ORD || Pred == FCMP_UNO;
1448 }
1449
1450 /// @returns true if the predicate of this instruction is commutative.
1451 /// Determine if this is a commutative predicate.
1452 bool isCommutative() const { return isCommutative(getPredicate()); }
1453
1454 /// @returns true if the predicate is relational (not EQ or NE).
1455 /// Determine if this a relational predicate.
1456 bool isRelational() const { return !isEquality(); }
1457
1458 /// Exchange the two operands to this instruction in such a way that it does
1459 /// not modify the semantics of the instruction. The predicate value may be
1460 /// changed to retain the same result if the predicate is order dependent
1461 /// (e.g. ult).
1462 /// Swap operands and adjust predicate.
1465 Op<0>().swap(Op<1>());
1466 }
1467
1468 /// Returns the sequence of all FCmp predicates.
1469 ///
1470 static auto predicates() { return FCmpPredicates(); }
1471
1472 /// Return result of `LHS Pred RHS` comparison.
1473 static bool compare(const APFloat &LHS, const APFloat &RHS,
1474 FCmpInst::Predicate Pred);
1475
1476 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1477 static bool classof(const Instruction *I) {
1478 return I->getOpcode() == Instruction::FCmp;
1479 }
1480 static bool classof(const Value *V) {
1481 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1482 }
1483};
1484
1485//===----------------------------------------------------------------------===//
1486/// This class represents a function call, abstracting a target
1487/// machine's calling convention. This class uses low bit of the SubClassData
1488/// field to indicate whether or not this is a tail call. The rest of the bits
1489/// hold the calling convention of the call.
1490///
1491class CallInst : public CallBase {
1493
1494 /// Construct a CallInst from a range of arguments
1495 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1496 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1497 AllocInfo AllocInfo, InsertPosition InsertBefore);
1498
1499 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1500 const Twine &NameStr, AllocInfo AllocInfo,
1501 InsertPosition InsertBefore)
1502 : CallInst(Ty, Func, Args, {}, NameStr, AllocInfo, InsertBefore) {}
1503
1504 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1505 AllocInfo AllocInfo, InsertPosition InsertBefore);
1506
1507 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1508 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1509 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1510
1511 /// Compute the number of operands to allocate.
1512 static unsigned ComputeNumOperands(unsigned NumArgs,
1513 unsigned NumBundleInputs = 0) {
1514 // We need one operand for the called function, plus the input operand
1515 // counts provided.
1516 return 1 + NumArgs + NumBundleInputs;
1517 }
1518
1519protected:
1520 // Note: Instruction needs to be a friend here to call cloneImpl.
1521 friend class Instruction;
1522
1523 CallInst *cloneImpl() const;
1524
1525public:
1526 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1527 InsertPosition InsertBefore = nullptr) {
1528 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(0)};
1529 return new (AllocMarker)
1530 CallInst(Ty, F, NameStr, AllocMarker, InsertBefore);
1531 }
1532
1534 const Twine &NameStr,
1535 InsertPosition InsertBefore = nullptr) {
1536 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(Args.size())};
1537 return new (AllocMarker)
1538 CallInst(Ty, Func, Args, {}, NameStr, AllocMarker, InsertBefore);
1539 }
1540
1542 ArrayRef<OperandBundleDef> Bundles = {},
1543 const Twine &NameStr = "",
1544 InsertPosition InsertBefore = nullptr) {
1545 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
1546 ComputeNumOperands(unsigned(Args.size()), CountBundleInputs(Bundles)),
1547 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
1548
1549 return new (AllocMarker)
1550 CallInst(Ty, Func, Args, Bundles, NameStr, AllocMarker, InsertBefore);
1551 }
1552
1553 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1554 InsertPosition InsertBefore = nullptr) {
1555 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1556 InsertBefore);
1557 }
1558
1560 ArrayRef<OperandBundleDef> Bundles = {},
1561 const Twine &NameStr = "",
1562 InsertPosition InsertBefore = nullptr) {
1563 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1564 NameStr, InsertBefore);
1565 }
1566
1568 const Twine &NameStr,
1569 InsertPosition InsertBefore = nullptr) {
1570 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1571 InsertBefore);
1572 }
1573
1574 /// Create a clone of \p CI with a different set of operand bundles and
1575 /// insert it before \p InsertBefore.
1576 ///
1577 /// The returned call instruction is identical \p CI in every way except that
1578 /// the operand bundles for the new instruction are set to the operand bundles
1579 /// in \p Bundles.
1581 InsertPosition InsertPt = nullptr);
1582
1583 // Note that 'musttail' implies 'tail'.
1584 enum TailCallKind : unsigned {
1591
1593 static_assert(
1594 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1595 "Bitfields must be contiguous");
1596
1598 return getSubclassData<TailCallKindField>();
1599 }
1600
1601 bool isTailCall() const {
1603 return Kind == TCK_Tail || Kind == TCK_MustTail;
1604 }
1605
1606 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1607
1608 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1609
1611 setSubclassData<TailCallKindField>(TCK);
1612 }
1613
1614 void setTailCall(bool IsTc = true) {
1616 }
1617
1618 /// Return true if the call can return twice
1619 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1620 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1621
1622 /// Return true if the call is for a noreturn trap intrinsic.
1624 switch (getIntrinsicID()) {
1625 case Intrinsic::trap:
1626 case Intrinsic::ubsantrap:
1627 return !hasFnAttr("trap-func-name");
1628 default:
1629 return false;
1630 }
1631 }
1632
1633 // Methods for support type inquiry through isa, cast, and dyn_cast:
1634 static bool classof(const Instruction *I) {
1635 return I->getOpcode() == Instruction::Call;
1636 }
1637 static bool classof(const Value *V) {
1638 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1639 }
1640
1641 /// Updates profile metadata by scaling it by \p S / \p T.
1643
1644private:
1645 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1646 // method so that subclasses cannot accidentally use it.
1647 template <typename Bitfield>
1648 void setSubclassData(typename Bitfield::Type Value) {
1649 Instruction::setSubclassData<Bitfield>(Value);
1650 }
1651};
1652
1653CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1654 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1655 AllocInfo AllocInfo, InsertPosition InsertBefore)
1656 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
1657 InsertBefore) {
1658 assert(AllocInfo.NumOps ==
1659 unsigned(Args.size() + CountBundleInputs(Bundles) + 1));
1660 init(Ty, Func, Args, Bundles, NameStr);
1661}
1662
1663//===----------------------------------------------------------------------===//
1664// SelectInst Class
1665//===----------------------------------------------------------------------===//
1666
1667/// This class represents the LLVM 'select' instruction.
1668///
1669class SelectInst : public Instruction {
1670 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1671
1672 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1673 InsertPosition InsertBefore)
1674 : Instruction(S1->getType(), Instruction::Select, AllocMarker,
1675 InsertBefore) {
1676 init(C, S1, S2);
1677 setName(NameStr);
1678 }
1679
1680 void init(Value *C, Value *S1, Value *S2) {
1681 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1682 Op<0>() = C;
1683 Op<1>() = S1;
1684 Op<2>() = S2;
1685 }
1686
1687protected:
1688 // Note: Instruction needs to be a friend here to call cloneImpl.
1689 friend class Instruction;
1690
1691 SelectInst *cloneImpl() const;
1692
1693public:
1695 const Twine &NameStr = "",
1696 InsertPosition InsertBefore = nullptr,
1697 Instruction *MDFrom = nullptr) {
1698 SelectInst *Sel =
1699 new (AllocMarker) SelectInst(C, S1, S2, NameStr, InsertBefore);
1700 if (MDFrom)
1701 Sel->copyMetadata(*MDFrom);
1702 return Sel;
1703 }
1704
1705 const Value *getCondition() const { return Op<0>(); }
1706 const Value *getTrueValue() const { return Op<1>(); }
1707 const Value *getFalseValue() const { return Op<2>(); }
1708 Value *getCondition() { return Op<0>(); }
1709 Value *getTrueValue() { return Op<1>(); }
1710 Value *getFalseValue() { return Op<2>(); }
1711
1712 void setCondition(Value *V) { Op<0>() = V; }
1713 void setTrueValue(Value *V) { Op<1>() = V; }
1714 void setFalseValue(Value *V) { Op<2>() = V; }
1715
1716 /// Swap the true and false values of the select instruction.
1717 /// This doesn't swap prof metadata.
1718 void swapValues() { Op<1>().swap(Op<2>()); }
1719
1720 /// Return a string if the specified operands are invalid
1721 /// for a select operation, otherwise return null.
1722 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1723
1724 /// Transparently provide more efficient getOperand methods.
1726
1728 return static_cast<OtherOps>(Instruction::getOpcode());
1729 }
1730
1731 // Methods for support type inquiry through isa, cast, and dyn_cast:
1732 static bool classof(const Instruction *I) {
1733 return I->getOpcode() == Instruction::Select;
1734 }
1735 static bool classof(const Value *V) {
1736 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1737 }
1738};
1739
1740template <>
1741struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1742};
1743
1745
1746//===----------------------------------------------------------------------===//
1747// VAArgInst Class
1748//===----------------------------------------------------------------------===//
1749
1750/// This class represents the va_arg llvm instruction, which returns
1751/// an argument of the specified type given a va_list and increments that list
1752///
1754protected:
1755 // Note: Instruction needs to be a friend here to call cloneImpl.
1756 friend class Instruction;
1757
1758 VAArgInst *cloneImpl() const;
1759
1760public:
1761 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1762 InsertPosition InsertBefore = nullptr)
1763 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1764 setName(NameStr);
1765 }
1766
1767 Value *getPointerOperand() { return getOperand(0); }
1768 const Value *getPointerOperand() const { return getOperand(0); }
1769 static unsigned getPointerOperandIndex() { return 0U; }
1770
1771 // Methods for support type inquiry through isa, cast, and dyn_cast:
1772 static bool classof(const Instruction *I) {
1773 return I->getOpcode() == VAArg;
1774 }
1775 static bool classof(const Value *V) {
1776 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1777 }
1778};
1779
1780//===----------------------------------------------------------------------===//
1781// ExtractElementInst Class
1782//===----------------------------------------------------------------------===//
1783
1784/// This instruction extracts a single (scalar)
1785/// element from a VectorType value
1786///
1788 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1789
1790 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1791 InsertPosition InsertBefore = nullptr);
1792
1793protected:
1794 // Note: Instruction needs to be a friend here to call cloneImpl.
1795 friend class Instruction;
1796
1798
1799public:
1801 const Twine &NameStr = "",
1802 InsertPosition InsertBefore = nullptr) {
1803 return new (AllocMarker)
1804 ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1805 }
1806
1807 /// Return true if an extractelement instruction can be
1808 /// formed with the specified operands.
1809 static bool isValidOperands(const Value *Vec, const Value *Idx);
1810
1812 Value *getIndexOperand() { return Op<1>(); }
1813 const Value *getVectorOperand() const { return Op<0>(); }
1814 const Value *getIndexOperand() const { return Op<1>(); }
1815
1817 return cast<VectorType>(getVectorOperand()->getType());
1818 }
1819
1820 /// Transparently provide more efficient getOperand methods.
1822
1823 // Methods for support type inquiry through isa, cast, and dyn_cast:
1824 static bool classof(const Instruction *I) {
1825 return I->getOpcode() == Instruction::ExtractElement;
1826 }
1827 static bool classof(const Value *V) {
1828 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1829 }
1830};
1831
1832template <>
1834 public FixedNumOperandTraits<ExtractElementInst, 2> {
1835};
1836
1838
1839//===----------------------------------------------------------------------===//
1840// InsertElementInst Class
1841//===----------------------------------------------------------------------===//
1842
1843/// This instruction inserts a single (scalar)
1844/// element into a VectorType value
1845///
1847 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1848
1849 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1850 const Twine &NameStr = "",
1851 InsertPosition InsertBefore = nullptr);
1852
1853protected:
1854 // Note: Instruction needs to be a friend here to call cloneImpl.
1855 friend class Instruction;
1856
1857 InsertElementInst *cloneImpl() const;
1858
1859public:
1860 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1861 const Twine &NameStr = "",
1862 InsertPosition InsertBefore = nullptr) {
1863 return new (AllocMarker)
1864 InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1865 }
1866
1867 /// Return true if an insertelement instruction can be
1868 /// formed with the specified operands.
1869 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1870 const Value *Idx);
1871
1872 /// Overload to return most specific vector type.
1873 ///
1875 return cast<VectorType>(Instruction::getType());
1876 }
1877
1878 /// Transparently provide more efficient getOperand methods.
1880
1881 // Methods for support type inquiry through isa, cast, and dyn_cast:
1882 static bool classof(const Instruction *I) {
1883 return I->getOpcode() == Instruction::InsertElement;
1884 }
1885 static bool classof(const Value *V) {
1886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1887 }
1888};
1889
1890template <>
1892 public FixedNumOperandTraits<InsertElementInst, 3> {
1893};
1894
1896
1897//===----------------------------------------------------------------------===//
1898// ShuffleVectorInst Class
1899//===----------------------------------------------------------------------===//
1900
1901constexpr int PoisonMaskElem = -1;
1902
1903/// This instruction constructs a fixed permutation of two
1904/// input vectors.
1905///
1906/// For each element of the result vector, the shuffle mask selects an element
1907/// from one of the input vectors to copy to the result. Non-negative elements
1908/// in the mask represent an index into the concatenated pair of input vectors.
1909/// PoisonMaskElem (-1) specifies that the result element is poison.
1910///
1911/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1912/// requirement may be relaxed in the future.
1914 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1915
1916 SmallVector<int, 4> ShuffleMask;
1917 Constant *ShuffleMaskForBitcode;
1918
1919protected:
1920 // Note: Instruction needs to be a friend here to call cloneImpl.
1921 friend class Instruction;
1922
1923 ShuffleVectorInst *cloneImpl() const;
1924
1925public:
1926 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1927 InsertPosition InsertBefore = nullptr);
1928 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1929 InsertPosition InsertBefore = nullptr);
1930 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1931 const Twine &NameStr = "",
1932 InsertPosition InsertBefore = nullptr);
1934 const Twine &NameStr = "",
1935 InsertPosition InsertBefore = nullptr);
1936
1937 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
1938 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
1939
1940 /// Swap the operands and adjust the mask to preserve the semantics
1941 /// of the instruction.
1942 void commute();
1943
1944 /// Return true if a shufflevector instruction can be
1945 /// formed with the specified operands.
1946 static bool isValidOperands(const Value *V1, const Value *V2,
1947 const Value *Mask);
1948 static bool isValidOperands(const Value *V1, const Value *V2,
1949 ArrayRef<int> Mask);
1950
1951 /// Overload to return most specific vector type.
1952 ///
1954 return cast<VectorType>(Instruction::getType());
1955 }
1956
1957 /// Transparently provide more efficient getOperand methods.
1959
1960 /// Return the shuffle mask value of this instruction for the given element
1961 /// index. Return PoisonMaskElem if the element is undef.
1962 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
1963
1964 /// Convert the input shuffle mask operand to a vector of integers. Undefined
1965 /// elements of the mask are returned as PoisonMaskElem.
1966 static void getShuffleMask(const Constant *Mask,
1967 SmallVectorImpl<int> &Result);
1968
1969 /// Return the mask for this instruction as a vector of integers. Undefined
1970 /// elements of the mask are returned as PoisonMaskElem.
1972 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
1973 }
1974
1975 /// Return the mask for this instruction, for use in bitcode.
1976 ///
1977 /// TODO: This is temporary until we decide a new bitcode encoding for
1978 /// shufflevector.
1979 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
1980
1981 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1982 Type *ResultTy);
1983
1984 void setShuffleMask(ArrayRef<int> Mask);
1985
1986 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
1987
1988 /// Return true if this shuffle returns a vector with a different number of
1989 /// elements than its source vectors.
1990 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
1991 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
1992 bool changesLength() const {
1993 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
1994 ->getElementCount()
1995 .getKnownMinValue();
1996 unsigned NumMaskElts = ShuffleMask.size();
1997 return NumSourceElts != NumMaskElts;
1998 }
1999
2000 /// Return true if this shuffle returns a vector with a greater number of
2001 /// elements than its source vectors.
2002 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2003 bool increasesLength() const {
2004 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2005 ->getElementCount()
2006 .getKnownMinValue();
2007 unsigned NumMaskElts = ShuffleMask.size();
2008 return NumSourceElts < NumMaskElts;
2009 }
2010
2011 /// Return true if this shuffle mask chooses elements from exactly one source
2012 /// vector.
2013 /// Example: <7,5,undef,7>
2014 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2015 /// length as the mask.
2016 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2017 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2018 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2019 SmallVector<int, 16> MaskAsInts;
2020 getShuffleMask(Mask, MaskAsInts);
2021 return isSingleSourceMask(MaskAsInts, NumSrcElts);
2022 }
2023
2024 /// Return true if this shuffle chooses elements from exactly one source
2025 /// vector without changing the length of that vector.
2026 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2027 /// TODO: Optionally allow length-changing shuffles.
2028 bool isSingleSource() const {
2029 return !changesLength() &&
2030 isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2031 }
2032
2033 /// Return true if this shuffle mask chooses elements from exactly one source
2034 /// vector without lane crossings. A shuffle using this mask is not
2035 /// necessarily a no-op because it may change the number of elements from its
2036 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2037 /// Example: <undef,undef,2,3>
2038 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2039 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2040 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2041
2042 // Not possible to express a shuffle mask for a scalable vector for this
2043 // case.
2044 if (isa<ScalableVectorType>(Mask->getType()))
2045 return false;
2046
2047 SmallVector<int, 16> MaskAsInts;
2048 getShuffleMask(Mask, MaskAsInts);
2049 return isIdentityMask(MaskAsInts, NumSrcElts);
2050 }
2051
2052 /// Return true if this shuffle chooses elements from exactly one source
2053 /// vector without lane crossings and does not change the number of elements
2054 /// from its input vectors.
2055 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2056 bool isIdentity() const {
2057 // Not possible to express a shuffle mask for a scalable vector for this
2058 // case.
2059 if (isa<ScalableVectorType>(getType()))
2060 return false;
2061
2062 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2063 }
2064
2065 /// Return true if this shuffle lengthens exactly one source vector with
2066 /// undefs in the high elements.
2067 bool isIdentityWithPadding() const;
2068
2069 /// Return true if this shuffle extracts the first N elements of exactly one
2070 /// source vector.
2071 bool isIdentityWithExtract() const;
2072
2073 /// Return true if this shuffle concatenates its 2 source vectors. This
2074 /// returns false if either input is undefined. In that case, the shuffle is
2075 /// is better classified as an identity with padding operation.
2076 bool isConcat() const;
2077
2078 /// Return true if this shuffle mask chooses elements from its source vectors
2079 /// without lane crossings. A shuffle using this mask would be
2080 /// equivalent to a vector select with a constant condition operand.
2081 /// Example: <4,1,6,undef>
2082 /// This returns false if the mask does not choose from both input vectors.
2083 /// In that case, the shuffle is better classified as an identity shuffle.
2084 /// This assumes that vector operands are the same length as the mask
2085 /// (a length-changing shuffle can never be equivalent to a vector select).
2086 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2087 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2088 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2089 SmallVector<int, 16> MaskAsInts;
2090 getShuffleMask(Mask, MaskAsInts);
2091 return isSelectMask(MaskAsInts, NumSrcElts);
2092 }
2093
2094 /// Return true if this shuffle chooses elements from its source vectors
2095 /// without lane crossings and all operands have the same number of elements.
2096 /// In other words, this shuffle is equivalent to a vector select with a
2097 /// constant condition operand.
2098 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2099 /// This returns false if the mask does not choose from both input vectors.
2100 /// In that case, the shuffle is better classified as an identity shuffle.
2101 /// TODO: Optionally allow length-changing shuffles.
2102 bool isSelect() const {
2103 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2104 }
2105
2106 /// Return true if this shuffle mask swaps the order of elements from exactly
2107 /// one source vector.
2108 /// Example: <7,6,undef,4>
2109 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2110 /// length as the mask.
2111 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2112 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isReverseMask(MaskAsInts, NumSrcElts);
2117 }
2118
2119 /// Return true if this shuffle swaps the order of elements from exactly
2120 /// one source vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isReverse() const {
2124 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2125 }
2126
2127 /// Return true if this shuffle mask chooses all elements with the same value
2128 /// as the first element of exactly one source vector.
2129 /// Example: <4,undef,undef,4>
2130 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2131 /// length as the mask.
2132 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2133 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2138 }
2139
2140 /// Return true if all elements of this shuffle are the same value as the
2141 /// first element of exactly one source vector without changing the length
2142 /// of that vector.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2144 /// TODO: Optionally allow length-changing shuffles.
2145 /// TODO: Optionally allow splats from other elements.
2146 bool isZeroEltSplat() const {
2147 return !changesLength() &&
2148 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2149 }
2150
2151 /// Return true if this shuffle mask is a transpose mask.
2152 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2153 /// even- or odd-numbered vector elements from two n-dimensional source
2154 /// vectors and write each result into consecutive elements of an
2155 /// n-dimensional destination vector. Two shuffles are necessary to complete
2156 /// the transpose, one for the even elements and another for the odd elements.
2157 /// This description closely follows how the TRN1 and TRN2 AArch64
2158 /// instructions operate.
2159 ///
2160 /// For example, a simple 2x2 matrix can be transposed with:
2161 ///
2162 /// ; Original matrix
2163 /// m0 = < a, b >
2164 /// m1 = < c, d >
2165 ///
2166 /// ; Transposed matrix
2167 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2168 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2169 ///
2170 /// For matrices having greater than n columns, the resulting nx2 transposed
2171 /// matrix is stored in two result vectors such that one vector contains
2172 /// interleaved elements from all the even-numbered rows and the other vector
2173 /// contains interleaved elements from all the odd-numbered rows. For example,
2174 /// a 2x4 matrix can be transposed with:
2175 ///
2176 /// ; Original matrix
2177 /// m0 = < a, b, c, d >
2178 /// m1 = < e, f, g, h >
2179 ///
2180 /// ; Transposed matrix
2181 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2182 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2183 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2184 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2185 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2186 SmallVector<int, 16> MaskAsInts;
2187 getShuffleMask(Mask, MaskAsInts);
2188 return isTransposeMask(MaskAsInts, NumSrcElts);
2189 }
2190
2191 /// Return true if this shuffle transposes the elements of its inputs without
2192 /// changing the length of the vectors. This operation may also be known as a
2193 /// merge or interleave. See the description for isTransposeMask() for the
2194 /// exact specification.
2195 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2196 bool isTranspose() const {
2197 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2198 }
2199
2200 /// Return true if this shuffle mask is a splice mask, concatenating the two
2201 /// inputs together and then extracts an original width vector starting from
2202 /// the splice index.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2204 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2205 /// length as the mask.
2206 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2207 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2208 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2209 SmallVector<int, 16> MaskAsInts;
2210 getShuffleMask(Mask, MaskAsInts);
2211 return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2212 }
2213
2214 /// Return true if this shuffle splices two inputs without changing the length
2215 /// of the vectors. This operation concatenates the two inputs together and
2216 /// then extracts an original width vector starting from the splice index.
2217 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2218 bool isSplice(int &Index) const {
2219 return !changesLength() &&
2220 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2221 }
2222
2223 /// Return true if this shuffle mask is an extract subvector mask.
2224 /// A valid extract subvector mask returns a smaller vector from a single
2225 /// source operand. The base extraction index is returned as well.
2226 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2227 int &Index);
2228 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2229 int &Index) {
2230 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2231 // Not possible to express a shuffle mask for a scalable vector for this
2232 // case.
2233 if (isa<ScalableVectorType>(Mask->getType()))
2234 return false;
2235 SmallVector<int, 16> MaskAsInts;
2236 getShuffleMask(Mask, MaskAsInts);
2237 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2238 }
2239
2240 /// Return true if this shuffle mask is an extract subvector mask.
2242 // Not possible to express a shuffle mask for a scalable vector for this
2243 // case.
2244 if (isa<ScalableVectorType>(getType()))
2245 return false;
2246
2247 int NumSrcElts =
2248 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2249 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2250 }
2251
2252 /// Return true if this shuffle mask is an insert subvector mask.
2253 /// A valid insert subvector mask inserts the lowest elements of a second
2254 /// source operand into an in-place first source operand.
2255 /// Both the sub vector width and the insertion index is returned.
2256 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2257 int &NumSubElts, int &Index);
2258 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2259 int &NumSubElts, int &Index) {
2260 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2261 // Not possible to express a shuffle mask for a scalable vector for this
2262 // case.
2263 if (isa<ScalableVectorType>(Mask->getType()))
2264 return false;
2265 SmallVector<int, 16> MaskAsInts;
2266 getShuffleMask(Mask, MaskAsInts);
2267 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2268 }
2269
2270 /// Return true if this shuffle mask is an insert subvector mask.
2271 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2272 // Not possible to express a shuffle mask for a scalable vector for this
2273 // case.
2274 if (isa<ScalableVectorType>(getType()))
2275 return false;
2276
2277 int NumSrcElts =
2278 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2279 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2280 }
2281
2282 /// Return true if this shuffle mask replicates each of the \p VF elements
2283 /// in a vector \p ReplicationFactor times.
2284 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2285 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2286 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2287 int &VF);
2288 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2289 int &VF) {
2290 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2291 // Not possible to express a shuffle mask for a scalable vector for this
2292 // case.
2293 if (isa<ScalableVectorType>(Mask->getType()))
2294 return false;
2295 SmallVector<int, 16> MaskAsInts;
2296 getShuffleMask(Mask, MaskAsInts);
2297 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2298 }
2299
2300 /// Return true if this shuffle mask is a replication mask.
2301 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2302
2303 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2304 /// i.e. each index between [0..VF) is used exactly once in each submask of
2305 /// size VF.
2306 /// For example, the mask for \p VF=4 is:
2307 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2308 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2309 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2310 /// element 3 is used twice in the second submask
2311 /// (3,3,1,0) and index 2 is not used at all.
2312 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2313
2314 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2315 /// mask.
2316 bool isOneUseSingleSourceMask(int VF) const;
2317
2318 /// Change values in a shuffle permute mask assuming the two vector operands
2319 /// of length InVecNumElts have swapped position.
2321 unsigned InVecNumElts) {
2322 for (int &Idx : Mask) {
2323 if (Idx == -1)
2324 continue;
2325 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2326 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2327 "shufflevector mask index out of range");
2328 }
2329 }
2330
2331 /// Return if this shuffle interleaves its two input vectors together.
2332 bool isInterleave(unsigned Factor);
2333
2334 /// Return true if the mask interleaves one or more input vectors together.
2335 ///
2336 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2337 /// E.g. For a Factor of 2 (LaneLen=4):
2338 /// <0, 4, 1, 5, 2, 6, 3, 7>
2339 /// E.g. For a Factor of 3 (LaneLen=4):
2340 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2341 /// E.g. For a Factor of 4 (LaneLen=2):
2342 /// <0, 2, 6, 4, 1, 3, 7, 5>
2343 ///
2344 /// NumInputElts is the total number of elements in the input vectors.
2345 ///
2346 /// StartIndexes are the first indexes of each vector being interleaved,
2347 /// substituting any indexes that were undef
2348 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2349 ///
2350 /// Note that this does not check if the input vectors are consecutive:
2351 /// It will return true for masks such as
2352 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2353 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2354 unsigned NumInputElts,
2355 SmallVectorImpl<unsigned> &StartIndexes);
2356 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2357 unsigned NumInputElts) {
2358 SmallVector<unsigned, 8> StartIndexes;
2359 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2360 }
2361
2362 /// Check if the mask is a DE-interleave mask of the given factor
2363 /// \p Factor like:
2364 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2365 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
2366 unsigned &Index);
2367 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2368 unsigned Unused;
2369 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused);
2370 }
2371
2372 /// Checks if the shuffle is a bit rotation of the first operand across
2373 /// multiple subelements, e.g:
2374 ///
2375 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2376 ///
2377 /// could be expressed as
2378 ///
2379 /// rotl <4 x i16> %a, 8
2380 ///
2381 /// If it can be expressed as a rotation, returns the number of subelements to
2382 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2383 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2384 unsigned MinSubElts, unsigned MaxSubElts,
2385 unsigned &NumSubElts, unsigned &RotateAmt);
2386
2387 // Methods for support type inquiry through isa, cast, and dyn_cast:
2388 static bool classof(const Instruction *I) {
2389 return I->getOpcode() == Instruction::ShuffleVector;
2390 }
2391 static bool classof(const Value *V) {
2392 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2393 }
2394};
2395
2396template <>
2398 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2399
2401
2402//===----------------------------------------------------------------------===//
2403// ExtractValueInst Class
2404//===----------------------------------------------------------------------===//
2405
2406/// This instruction extracts a struct member or array
2407/// element value from an aggregate value.
2408///
2411
2413
2414 /// Constructors - Create a extractvalue instruction with a base aggregate
2415 /// value and a list of indices. The first and second ctor can optionally
2416 /// insert before an existing instruction, the third appends the new
2417 /// instruction to the specified BasicBlock.
2418 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2419 const Twine &NameStr, InsertPosition InsertBefore);
2420
2421 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2422
2423protected:
2424 // Note: Instruction needs to be a friend here to call cloneImpl.
2425 friend class Instruction;
2426
2427 ExtractValueInst *cloneImpl() const;
2428
2429public:
2431 const Twine &NameStr = "",
2432 InsertPosition InsertBefore = nullptr) {
2433 return new
2434 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2435 }
2436
2437 /// Returns the type of the element that would be extracted
2438 /// with an extractvalue instruction with the specified parameters.
2439 ///
2440 /// Null is returned if the indices are invalid for the specified type.
2441 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2442
2443 using idx_iterator = const unsigned*;
2444
2445 inline idx_iterator idx_begin() const { return Indices.begin(); }
2446 inline idx_iterator idx_end() const { return Indices.end(); }
2448 return make_range(idx_begin(), idx_end());
2449 }
2450
2452 return getOperand(0);
2453 }
2455 return getOperand(0);
2456 }
2457 static unsigned getAggregateOperandIndex() {
2458 return 0U; // get index for modifying correct operand
2459 }
2460
2462 return Indices;
2463 }
2464
2465 unsigned getNumIndices() const {
2466 return (unsigned)Indices.size();
2467 }
2468
2469 bool hasIndices() const {
2470 return true;
2471 }
2472
2473 // Methods for support type inquiry through isa, cast, and dyn_cast:
2474 static bool classof(const Instruction *I) {
2475 return I->getOpcode() == Instruction::ExtractValue;
2476 }
2477 static bool classof(const Value *V) {
2478 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2479 }
2480};
2481
2482ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2483 const Twine &NameStr,
2484 InsertPosition InsertBefore)
2485 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2486 ExtractValue, Agg, InsertBefore) {
2487 init(Idxs, NameStr);
2488}
2489
2490//===----------------------------------------------------------------------===//
2491// InsertValueInst Class
2492//===----------------------------------------------------------------------===//
2493
2494/// This instruction inserts a struct field of array element
2495/// value into an aggregate value.
2496///
2498 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
2499
2501
2502 InsertValueInst(const InsertValueInst &IVI);
2503
2504 /// Constructors - Create a insertvalue instruction with a base aggregate
2505 /// value, a value to insert, and a list of indices. The first and second ctor
2506 /// can optionally insert before an existing instruction, the third appends
2507 /// the new instruction to the specified BasicBlock.
2508 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2509 const Twine &NameStr, InsertPosition InsertBefore);
2510
2511 /// Constructors - These three constructors are convenience methods because
2512 /// one and two index insertvalue instructions are so common.
2513 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2514 const Twine &NameStr = "",
2515 InsertPosition InsertBefore = nullptr);
2516
2517 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2518 const Twine &NameStr);
2519
2520protected:
2521 // Note: Instruction needs to be a friend here to call cloneImpl.
2522 friend class Instruction;
2523
2524 InsertValueInst *cloneImpl() const;
2525
2526public:
2527 // allocate space for exactly two operands
2528 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2529 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2530
2531 static InsertValueInst *Create(Value *Agg, Value *Val,
2532 ArrayRef<unsigned> Idxs,
2533 const Twine &NameStr = "",
2534 InsertPosition InsertBefore = nullptr) {
2535 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2536 }
2537
2538 /// Transparently provide more efficient getOperand methods.
2540
2541 using idx_iterator = const unsigned*;
2542
2543 inline idx_iterator idx_begin() const { return Indices.begin(); }
2544 inline idx_iterator idx_end() const { return Indices.end(); }
2546 return make_range(idx_begin(), idx_end());
2547 }
2548
2550 return getOperand(0);
2551 }
2553 return getOperand(0);
2554 }
2555 static unsigned getAggregateOperandIndex() {
2556 return 0U; // get index for modifying correct operand
2557 }
2558
2560 return getOperand(1);
2561 }
2563 return getOperand(1);
2564 }
2566 return 1U; // get index for modifying correct operand
2567 }
2568
2570 return Indices;
2571 }
2572
2573 unsigned getNumIndices() const {
2574 return (unsigned)Indices.size();
2575 }
2576
2577 bool hasIndices() const {
2578 return true;
2579 }
2580
2581 // Methods for support type inquiry through isa, cast, and dyn_cast:
2582 static bool classof(const Instruction *I) {
2583 return I->getOpcode() == Instruction::InsertValue;
2584 }
2585 static bool classof(const Value *V) {
2586 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2587 }
2588};
2589
2590template <>
2592 public FixedNumOperandTraits<InsertValueInst, 2> {
2593};
2594
2595InsertValueInst::InsertValueInst(Value *Agg, Value *Val,
2596 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2597 InsertPosition InsertBefore)
2598 : Instruction(Agg->getType(), InsertValue, AllocMarker, InsertBefore) {
2599 init(Agg, Val, Idxs, NameStr);
2600}
2601
2602DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2603
2604//===----------------------------------------------------------------------===//
2605// PHINode Class
2606//===----------------------------------------------------------------------===//
2607
2608// PHINode - The PHINode class is used to represent the magical mystical PHI
2609// node, that can not exist in nature, but can be synthesized in a computer
2610// scientist's overactive imagination.
2611//
2612class PHINode : public Instruction {
2613 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2614
2615 /// The number of operands actually allocated. NumOperands is
2616 /// the number actually in use.
2617 unsigned ReservedSpace;
2618
2619 PHINode(const PHINode &PN);
2620
2621 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2622 const Twine &NameStr = "",
2623 InsertPosition InsertBefore = nullptr)
2624 : Instruction(Ty, Instruction::PHI, AllocMarker, InsertBefore),
2625 ReservedSpace(NumReservedValues) {
2626 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2627 setName(NameStr);
2628 allocHungoffUses(ReservedSpace);
2629 }
2630
2631protected:
2632 // Note: Instruction needs to be a friend here to call cloneImpl.
2633 friend class Instruction;
2634
2635 PHINode *cloneImpl() const;
2636
2637 // allocHungoffUses - this is more complicated than the generic
2638 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2639 // values and pointers to the incoming blocks, all in one allocation.
2640 void allocHungoffUses(unsigned N) {
2641 User::allocHungoffUses(N, /* IsPhi */ true);
2642 }
2643
2644public:
2645 /// Constructors - NumReservedValues is a hint for the number of incoming
2646 /// edges that this phi node will have (use 0 if you really have no idea).
2647 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2648 const Twine &NameStr = "",
2649 InsertPosition InsertBefore = nullptr) {
2650 return new (AllocMarker)
2651 PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2652 }
2653
2654 /// Provide fast operand accessors
2656
2657 // Block iterator interface. This provides access to the list of incoming
2658 // basic blocks, which parallels the list of incoming values.
2659 // Please note that we are not providing non-const iterators for blocks to
2660 // force all updates go through an interface function.
2661
2664
2666 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2667 }
2668
2670 return block_begin() + getNumOperands();
2671 }
2672
2674 return make_range(block_begin(), block_end());
2675 }
2676
2677 op_range incoming_values() { return operands(); }
2678
2679 const_op_range incoming_values() const { return operands(); }
2680
2681 /// Return the number of incoming edges
2682 ///
2683 unsigned getNumIncomingValues() const { return getNumOperands(); }
2684
2685 /// Return incoming value number x
2686 ///
2687 Value *getIncomingValue(unsigned i) const {
2688 return getOperand(i);
2689 }
2690 void setIncomingValue(unsigned i, Value *V) {
2691 assert(V && "PHI node got a null value!");
2692 assert(getType() == V->getType() &&
2693 "All operands to PHI node must be the same type as the PHI node!");
2694 setOperand(i, V);
2695 }
2696
2697 static unsigned getOperandNumForIncomingValue(unsigned i) {
2698 return i;
2699 }
2700
2701 static unsigned getIncomingValueNumForOperand(unsigned i) {
2702 return i;
2703 }
2704
2705 /// Return incoming basic block number @p i.
2706 ///
2707 BasicBlock *getIncomingBlock(unsigned i) const {
2708 return block_begin()[i];
2709 }
2710
2711 /// Return incoming basic block corresponding
2712 /// to an operand of the PHI.
2713 ///
2715 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2716 return getIncomingBlock(unsigned(&U - op_begin()));
2717 }
2718
2719 /// Return incoming basic block corresponding
2720 /// to value use iterator.
2721 ///
2723 return getIncomingBlock(I.getUse());
2724 }
2725
2726 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2727 const_cast<block_iterator>(block_begin())[i] = BB;
2728 }
2729
2730 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2731 /// of this PHINode, starting at \p ToIdx.
2733 uint32_t ToIdx = 0) {
2734 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2735 }
2736
2737 /// Replace every incoming basic block \p Old to basic block \p New.
2739 assert(New && Old && "PHI node got a null basic block!");
2740 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2741 if (getIncomingBlock(Op) == Old)
2742 setIncomingBlock(Op, New);
2743 }
2744
2745 /// Add an incoming value to the end of the PHI list
2746 ///
2748 if (getNumOperands() == ReservedSpace)
2749 growOperands(); // Get more space!
2750 // Initialize some new operands.
2751 setNumHungOffUseOperands(getNumOperands() + 1);
2752 setIncomingValue(getNumOperands() - 1, V);
2753 setIncomingBlock(getNumOperands() - 1, BB);
2754 }
2755
2756 /// Remove an incoming value. This is useful if a
2757 /// predecessor basic block is deleted. The value removed is returned.
2758 ///
2759 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2760 /// is true), the PHI node is destroyed and any uses of it are replaced with
2761 /// dummy values. The only time there should be zero incoming values to a PHI
2762 /// node is when the block is dead, so this strategy is sound.
2763 ///
2764 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2765
2766 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2767 int Idx = getBasicBlockIndex(BB);
2768 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2769 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2770 }
2771
2772 /// Remove all incoming values for which the predicate returns true.
2773 /// The predicate accepts the incoming value index.
2774 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2775 bool DeletePHIIfEmpty = true);
2776
2777 /// Return the first index of the specified basic
2778 /// block in the value list for this PHI. Returns -1 if no instance.
2779 ///
2780 int getBasicBlockIndex(const BasicBlock *BB) const {
2781 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2782 if (block_begin()[i] == BB)
2783 return i;
2784 return -1;
2785 }
2786
2788 int Idx = getBasicBlockIndex(BB);
2789 assert(Idx >= 0 && "Invalid basic block argument!");
2790 return getIncomingValue(Idx);
2791 }
2792
2793 /// Set every incoming value(s) for block \p BB to \p V.
2795 assert(BB && "PHI node got a null basic block!");
2796 bool Found = false;
2797 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2798 if (getIncomingBlock(Op) == BB) {
2799 Found = true;
2800 setIncomingValue(Op, V);
2801 }
2802 (void)Found;
2803 assert(Found && "Invalid basic block argument to set!");
2804 }
2805
2806 /// If the specified PHI node always merges together the
2807 /// same value, return the value, otherwise return null.
2808 Value *hasConstantValue() const;
2809
2810 /// Whether the specified PHI node always merges
2811 /// together the same value, assuming undefs are equal to a unique
2812 /// non-undef value.
2813 bool hasConstantOrUndefValue() const;
2814
2815 /// If the PHI node is complete which means all of its parent's predecessors
2816 /// have incoming value in this PHI, return true, otherwise return false.
2817 bool isComplete() const {
2819 [this](const BasicBlock *Pred) {
2820 return getBasicBlockIndex(Pred) >= 0;
2821 });
2822 }
2823
2824 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2825 static bool classof(const Instruction *I) {
2826 return I->getOpcode() == Instruction::PHI;
2827 }
2828 static bool classof(const Value *V) {
2829 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2830 }
2831
2832private:
2833 void growOperands();
2834};
2835
2836template <> struct OperandTraits<PHINode> : public HungoffOperandTraits {};
2837
2839
2840//===----------------------------------------------------------------------===//
2841// LandingPadInst Class
2842//===----------------------------------------------------------------------===//
2843
2844//===---------------------------------------------------------------------------
2845/// The landingpad instruction holds all of the information
2846/// necessary to generate correct exception handling. The landingpad instruction
2847/// cannot be moved from the top of a landing pad block, which itself is
2848/// accessible only from the 'unwind' edge of an invoke. This uses the
2849/// SubclassData field in Value to store whether or not the landingpad is a
2850/// cleanup.
2851///
2853 using CleanupField = BoolBitfieldElementT<0>;
2854
2855 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2856
2857 /// The number of operands actually allocated. NumOperands is
2858 /// the number actually in use.
2859 unsigned ReservedSpace;
2860
2861 LandingPadInst(const LandingPadInst &LP);
2862
2863public:
2865
2866private:
2867 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2868 const Twine &NameStr, InsertPosition InsertBefore);
2869
2870 // Allocate space for exactly zero operands.
2871 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2872
2873 void growOperands(unsigned Size);
2874 void init(unsigned NumReservedValues, const Twine &NameStr);
2875
2876protected:
2877 // Note: Instruction needs to be a friend here to call cloneImpl.
2878 friend class Instruction;
2879
2880 LandingPadInst *cloneImpl() const;
2881
2882public:
2883 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2884
2885 /// Constructors - NumReservedClauses is a hint for the number of incoming
2886 /// clauses that this landingpad will have (use 0 if you really have no idea).
2887 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2888 const Twine &NameStr = "",
2889 InsertPosition InsertBefore = nullptr);
2890
2891 /// Provide fast operand accessors
2893
2894 /// Return 'true' if this landingpad instruction is a
2895 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2896 /// doesn't catch the exception.
2897 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2898
2899 /// Indicate that this landingpad instruction is a cleanup.
2900 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2901
2902 /// Add a catch or filter clause to the landing pad.
2903 void addClause(Constant *ClauseVal);
2904
2905 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2906 /// determine what type of clause this is.
2907 Constant *getClause(unsigned Idx) const {
2908 return cast<Constant>(getOperandList()[Idx]);
2909 }
2910
2911 /// Return 'true' if the clause and index Idx is a catch clause.
2912 bool isCatch(unsigned Idx) const {
2913 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2914 }
2915
2916 /// Return 'true' if the clause and index Idx is a filter clause.
2917 bool isFilter(unsigned Idx) const {
2918 return isa<ArrayType>(getOperandList()[Idx]->getType());
2919 }
2920
2921 /// Get the number of clauses for this landing pad.
2922 unsigned getNumClauses() const { return getNumOperands(); }
2923
2924 /// Grow the size of the operand list to accommodate the new
2925 /// number of clauses.
2926 void reserveClauses(unsigned Size) { growOperands(Size); }
2927
2928 // Methods for support type inquiry through isa, cast, and dyn_cast:
2929 static bool classof(const Instruction *I) {
2930 return I->getOpcode() == Instruction::LandingPad;
2931 }
2932 static bool classof(const Value *V) {
2933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2934 }
2935};
2936
2937template <>
2939
2941
2942//===----------------------------------------------------------------------===//
2943// ReturnInst Class
2944//===----------------------------------------------------------------------===//
2945
2946//===---------------------------------------------------------------------------
2947/// Return a value (possibly void), from a function. Execution
2948/// does not continue in this function any longer.
2949///
2950class ReturnInst : public Instruction {
2952
2953private:
2954 // ReturnInst constructors:
2955 // ReturnInst() - 'ret void' instruction
2956 // ReturnInst( null) - 'ret void' instruction
2957 // ReturnInst(Value* X) - 'ret X' instruction
2958 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
2959 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
2960 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2961 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2962 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2963 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2964 //
2965 // NOTE: If the Value* passed is of type void then the constructor behaves as
2966 // if it was passed NULL.
2967 explicit ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
2968 InsertPosition InsertBefore);
2969
2970protected:
2971 // Note: Instruction needs to be a friend here to call cloneImpl.
2972 friend class Instruction;
2973
2974 ReturnInst *cloneImpl() const;
2975
2976public:
2977 static ReturnInst *Create(LLVMContext &C, Value *retVal = nullptr,
2978 InsertPosition InsertBefore = nullptr) {
2979 IntrusiveOperandsAllocMarker AllocMarker{retVal ? 1U : 0U};
2980 return new (AllocMarker) ReturnInst(C, retVal, AllocMarker, InsertBefore);
2981 }
2982
2983 static ReturnInst *Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2984 IntrusiveOperandsAllocMarker AllocMarker{0};
2985 return new (AllocMarker) ReturnInst(C, nullptr, AllocMarker, InsertAtEnd);
2986 }
2987
2988 /// Provide fast operand accessors
2990
2991 /// Convenience accessor. Returns null if there is no return value.
2993 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2994 }
2995
2996 unsigned getNumSuccessors() const { return 0; }
2997
2998 // Methods for support type inquiry through isa, cast, and dyn_cast:
2999 static bool classof(const Instruction *I) {
3000 return (I->getOpcode() == Instruction::Ret);
3001 }
3002 static bool classof(const Value *V) {
3003 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3004 }
3005
3006private:
3007 BasicBlock *getSuccessor(unsigned idx) const {
3008 llvm_unreachable("ReturnInst has no successors!");
3009 }
3010
3011 void setSuccessor(unsigned idx, BasicBlock *B) {
3012 llvm_unreachable("ReturnInst has no successors!");
3013 }
3014};
3015
3016template <>
3017struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {};
3018
3020
3021//===----------------------------------------------------------------------===//
3022// BranchInst Class
3023//===----------------------------------------------------------------------===//
3024
3025//===---------------------------------------------------------------------------
3026/// Conditional or Unconditional Branch instruction.
3027///
3028class BranchInst : public Instruction {
3029 /// Ops list - Branches are strange. The operands are ordered:
3030 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3031 /// they don't have to check for cond/uncond branchness. These are mostly
3032 /// accessed relative from op_end().
3034 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3035 // BranchInst(BB *B) - 'br B'
3036 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3037 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3038 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3039 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3040 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3041 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3042 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3043 explicit BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
3044 InsertPosition InsertBefore);
3045 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3046 AllocInfo AllocInfo, InsertPosition InsertBefore);
3047
3048 void AssertOK();
3049
3050protected:
3051 // Note: Instruction needs to be a friend here to call cloneImpl.
3052 friend class Instruction;
3053
3054 BranchInst *cloneImpl() const;
3055
3056public:
3057 /// Iterator type that casts an operand to a basic block.
3058 ///
3059 /// This only makes sense because the successors are stored as adjacent
3060 /// operands for branch instructions.
3062 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3063 std::random_access_iterator_tag, BasicBlock *,
3064 ptrdiff_t, BasicBlock *, BasicBlock *> {
3066
3067 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3068 BasicBlock *operator->() const { return operator*(); }
3069 };
3070
3071 /// The const version of `succ_op_iterator`.
3073 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3074 std::random_access_iterator_tag,
3075 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3076 const BasicBlock *> {
3079
3080 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3081 const BasicBlock *operator->() const { return operator*(); }
3082 };
3083
3085 InsertPosition InsertBefore = nullptr) {
3086 IntrusiveOperandsAllocMarker AllocMarker{1};
3087 return new (AllocMarker) BranchInst(IfTrue, AllocMarker, InsertBefore);
3088 }
3089
3090 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3091 Value *Cond,
3092 InsertPosition InsertBefore = nullptr) {
3093 IntrusiveOperandsAllocMarker AllocMarker{3};
3094 return new (AllocMarker)
3095 BranchInst(IfTrue, IfFalse, Cond, AllocMarker, InsertBefore);
3096 }
3097
3098 /// Transparently provide more efficient getOperand methods.
3100
3101 bool isUnconditional() const { return getNumOperands() == 1; }
3102 bool isConditional() const { return getNumOperands() == 3; }
3103
3105 assert(isConditional() && "Cannot get condition of an uncond branch!");
3106 return Op<-3>();
3107 }
3108
3110 assert(isConditional() && "Cannot set condition of unconditional branch!");
3111 Op<-3>() = V;
3112 }
3113
3114 unsigned getNumSuccessors() const { return 1+isConditional(); }
3115
3116 BasicBlock *getSuccessor(unsigned i) const {
3117 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3118 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3119 }
3120
3121 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3122 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3123 *(&Op<-1>() - idx) = NewSucc;
3124 }
3125
3126 /// Swap the successors of this branch instruction.
3127 ///
3128 /// Swaps the successors of the branch instruction. This also swaps any
3129 /// branch weight metadata associated with the instruction so that it
3130 /// continues to map correctly to each operand.
3131 void swapSuccessors();
3132
3134 return make_range(
3135 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3136 succ_op_iterator(value_op_end()));
3137 }
3138
3141 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3142 const_succ_op_iterator(value_op_end()));
3143 }
3144
3145 // Methods for support type inquiry through isa, cast, and dyn_cast:
3146 static bool classof(const Instruction *I) {
3147 return (I->getOpcode() == Instruction::Br);
3148 }
3149 static bool classof(const Value *V) {
3150 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3151 }
3152};
3153
3154template <>
3155struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst> {};
3156
3158
3159//===----------------------------------------------------------------------===//
3160// SwitchInst Class
3161//===----------------------------------------------------------------------===//
3162
3163//===---------------------------------------------------------------------------
3164/// Multiway switch
3165///
3166class SwitchInst : public Instruction {
3167 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3168
3169 unsigned ReservedSpace;
3170
3171 // Operand[0] = Value to switch on
3172 // Operand[1] = Default basic block destination
3173 // Operand[2n ] = Value to match
3174 // Operand[2n+1] = BasicBlock to go to on match
3175 SwitchInst(const SwitchInst &SI);
3176
3177 /// Create a new switch instruction, specifying a value to switch on and a
3178 /// default destination. The number of additional cases can be specified here
3179 /// to make memory allocation more efficient. This constructor can also
3180 /// auto-insert before another instruction.
3181 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3182 InsertPosition InsertBefore);
3183
3184 // allocate space for exactly zero operands
3185 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3186
3187 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3188 void growOperands();
3189
3190protected:
3191 // Note: Instruction needs to be a friend here to call cloneImpl.
3192 friend class Instruction;
3193
3194 SwitchInst *cloneImpl() const;
3195
3196public:
3197 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3198
3199 // -2
3200 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3201
3202 template <typename CaseHandleT> class CaseIteratorImpl;
3203
3204 /// A handle to a particular switch case. It exposes a convenient interface
3205 /// to both the case value and the successor block.
3206 ///
3207 /// We define this as a template and instantiate it to form both a const and
3208 /// non-const handle.
3209 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3211 // Directly befriend both const and non-const iterators.
3212 friend class SwitchInst::CaseIteratorImpl<
3213 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3214
3215 protected:
3216 // Expose the switch type we're parameterized with to the iterator.
3217 using SwitchInstType = SwitchInstT;
3218
3219 SwitchInstT *SI;
3221
3222 CaseHandleImpl() = default;
3224
3225 public:
3226 /// Resolves case value for current case.
3227 ConstantIntT *getCaseValue() const {
3228 assert((unsigned)Index < SI->getNumCases() &&
3229 "Index out the number of cases.");
3230 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3231 }
3232
3233 /// Resolves successor for current case.
3234 BasicBlockT *getCaseSuccessor() const {
3235 assert(((unsigned)Index < SI->getNumCases() ||
3236 (unsigned)Index == DefaultPseudoIndex) &&
3237 "Index out the number of cases.");
3238 return SI->getSuccessor(getSuccessorIndex());
3239 }
3240
3241 /// Returns number of current case.
3242 unsigned getCaseIndex() const { return Index; }
3243
3244 /// Returns successor index for current case successor.
3245 unsigned getSuccessorIndex() const {
3246 assert(((unsigned)Index == DefaultPseudoIndex ||
3247 (unsigned)Index < SI->getNumCases()) &&
3248 "Index out the number of cases.");
3249 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3250 }
3251
3252 bool operator==(const CaseHandleImpl &RHS) const {
3253 assert(SI == RHS.SI && "Incompatible operators.");
3254 return Index == RHS.Index;
3255 }
3256 };
3257
3260
3262 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3264
3265 public:
3267
3268 /// Sets the new value for current case.
3269 void setValue(ConstantInt *V) const {
3270 assert((unsigned)Index < SI->getNumCases() &&
3271 "Index out the number of cases.");
3272 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3273 }
3274
3275 /// Sets the new successor for current case.
3276 void setSuccessor(BasicBlock *S) const {
3277 SI->setSuccessor(getSuccessorIndex(), S);
3278 }
3279 };
3280
3281 template <typename CaseHandleT>
3283 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3284 std::random_access_iterator_tag,
3285 const CaseHandleT> {
3286 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3287
3288 CaseHandleT Case;
3289
3290 public:
3291 /// Default constructed iterator is in an invalid state until assigned to
3292 /// a case for a particular switch.
3293 CaseIteratorImpl() = default;
3294
3295 /// Initializes case iterator for given SwitchInst and for given
3296 /// case number.
3297 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3298
3299 /// Initializes case iterator for given SwitchInst and for given
3300 /// successor index.
3302 unsigned SuccessorIndex) {
3303 assert(SuccessorIndex < SI->getNumSuccessors() &&
3304 "Successor index # out of range!");
3305 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3306 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3307 }
3308
3309 /// Support converting to the const variant. This will be a no-op for const
3310 /// variant.
3312 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3313 }
3314
3316 // Check index correctness after addition.
3317 // Note: Index == getNumCases() means end().
3318 assert(Case.Index + N >= 0 &&
3319 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3320 "Case.Index out the number of cases.");
3321 Case.Index += N;
3322 return *this;
3323 }
3325 // Check index correctness after subtraction.
3326 // Note: Case.Index == getNumCases() means end().
3327 assert(Case.Index - N >= 0 &&
3328 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3329 "Case.Index out the number of cases.");
3330 Case.Index -= N;
3331 return *this;
3332 }
3334 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3335 return Case.Index - RHS.Case.Index;
3336 }
3337 bool operator==(const CaseIteratorImpl &RHS) const {
3338 return Case == RHS.Case;
3339 }
3340 bool operator<(const CaseIteratorImpl &RHS) const {
3341 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3342 return Case.Index < RHS.Case.Index;
3343 }
3344 const CaseHandleT &operator*() const { return Case; }
3345 };
3346
3349
3351 unsigned NumCases,
3352 InsertPosition InsertBefore = nullptr) {
3353 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3354 }
3355
3356 /// Provide fast operand accessors
3358
3359 // Accessor Methods for Switch stmt
3360 Value *getCondition() const { return getOperand(0); }
3361 void setCondition(Value *V) { setOperand(0, V); }
3362
3364 return cast<BasicBlock>(getOperand(1));
3365 }
3366
3367 /// Returns true if the default branch must result in immediate undefined
3368 /// behavior, false otherwise.
3370 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg());
3371 }
3372
3373 void setDefaultDest(BasicBlock *DefaultCase) {
3374 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3375 }
3376
3377 /// Return the number of 'cases' in this switch instruction, excluding the
3378 /// default case.
3379 unsigned getNumCases() const {
3380 return getNumOperands()/2 - 1;
3381 }
3382
3383 /// Returns a read/write iterator that points to the first case in the
3384 /// SwitchInst.
3386 return CaseIt(this, 0);
3387 }
3388
3389 /// Returns a read-only iterator that points to the first case in the
3390 /// SwitchInst.
3392 return ConstCaseIt(this, 0);
3393 }
3394
3395 /// Returns a read/write iterator that points one past the last in the
3396 /// SwitchInst.
3398 return CaseIt(this, getNumCases());
3399 }
3400
3401 /// Returns a read-only iterator that points one past the last in the
3402 /// SwitchInst.
3404 return ConstCaseIt(this, getNumCases());
3405 }
3406
3407 /// Iteration adapter for range-for loops.
3409 return make_range(case_begin(), case_end());
3410 }
3411
3412 /// Constant iteration adapter for range-for loops.
3414 return make_range(case_begin(), case_end());
3415 }
3416
3417 /// Returns an iterator that points to the default case.
3418 /// Note: this iterator allows to resolve successor only. Attempt
3419 /// to resolve case value causes an assertion.
3420 /// Also note, that increment and decrement also causes an assertion and
3421 /// makes iterator invalid.
3423 return CaseIt(this, DefaultPseudoIndex);
3424 }
3426 return ConstCaseIt(this, DefaultPseudoIndex);
3427 }
3428
3429 /// Search all of the case values for the specified constant. If it is
3430 /// explicitly handled, return the case iterator of it, otherwise return
3431 /// default case iterator to indicate that it is handled by the default
3432 /// handler.
3434 return CaseIt(
3435 this,
3436 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3437 }
3439 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3440 return Case.getCaseValue() == C;
3441 });
3442 if (I != case_end())
3443 return I;
3444
3445 return case_default();
3446 }
3447
3448 /// Finds the unique case value for a given successor. Returns null if the
3449 /// successor is not found, not unique, or is the default case.
3451 if (BB == getDefaultDest())
3452 return nullptr;
3453
3454 ConstantInt *CI = nullptr;
3455 for (auto Case : cases()) {
3456 if (Case.getCaseSuccessor() != BB)
3457 continue;
3458
3459 if (CI)
3460 return nullptr; // Multiple cases lead to BB.
3461
3462 CI = Case.getCaseValue();
3463 }
3464
3465 return CI;
3466 }
3467
3468 /// Add an entry to the switch instruction.
3469 /// Note:
3470 /// This action invalidates case_end(). Old case_end() iterator will
3471 /// point to the added case.
3472 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3473
3474 /// This method removes the specified case and its successor from the switch
3475 /// instruction. Note that this operation may reorder the remaining cases at
3476 /// index idx and above.
3477 /// Note:
3478 /// This action invalidates iterators for all cases following the one removed,
3479 /// including the case_end() iterator. It returns an iterator for the next
3480 /// case.
3481 CaseIt removeCase(CaseIt I);
3482
3483 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3484 BasicBlock *getSuccessor(unsigned idx) const {
3485 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3486 return cast<BasicBlock>(getOperand(idx*2+1));
3487 }
3488 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3489 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3490 setOperand(idx * 2 + 1, NewSucc);
3491 }
3492
3493 // Methods for support type inquiry through isa, cast, and dyn_cast:
3494 static bool classof(const Instruction *I) {
3495 return I->getOpcode() == Instruction::Switch;
3496 }
3497 static bool classof(const Value *V) {
3498 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3499 }
3500};
3501
3502/// A wrapper class to simplify modification of SwitchInst cases along with
3503/// their prof branch_weights metadata.
3505 SwitchInst &SI;
3506 std::optional<SmallVector<uint32_t, 8>> Weights;
3507 bool Changed = false;
3508
3509protected:
3511
3512 void init();
3513
3514public:
3515 using CaseWeightOpt = std::optional<uint32_t>;
3516 SwitchInst *operator->() { return &SI; }
3517 SwitchInst &operator*() { return SI; }
3518 operator SwitchInst *() { return &SI; }
3519
3521
3523 if (Changed)
3524 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3525 }
3526
3527 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3528 /// correspondent branch weight.
3530
3531 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3532 /// specified branch weight for the added case.
3533 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3534
3535 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3536 /// this object to not touch the underlying SwitchInst in destructor.
3538
3539 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3540 CaseWeightOpt getSuccessorWeight(unsigned idx);
3541
3542 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3543};
3544
3545template <> struct OperandTraits<SwitchInst> : public HungoffOperandTraits {};
3546
3548
3549//===----------------------------------------------------------------------===//
3550// IndirectBrInst Class
3551//===----------------------------------------------------------------------===//
3552
3553//===---------------------------------------------------------------------------
3554/// Indirect Branch Instruction.
3555///
3557 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3558
3559 unsigned ReservedSpace;
3560
3561 // Operand[0] = Address to jump to
3562 // Operand[n+1] = n-th destination
3563 IndirectBrInst(const IndirectBrInst &IBI);
3564
3565 /// Create a new indirectbr instruction, specifying an
3566 /// Address to jump to. The number of expected destinations can be specified
3567 /// here to make memory allocation more efficient. This constructor can also
3568 /// autoinsert before another instruction.
3569 IndirectBrInst(Value *Address, unsigned NumDests,
3570 InsertPosition InsertBefore);
3571
3572 // allocate space for exactly zero operands
3573 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3574
3575 void init(Value *Address, unsigned NumDests);
3576 void growOperands();
3577
3578protected:
3579 // Note: Instruction needs to be a friend here to call cloneImpl.
3580 friend class Instruction;
3581
3582 IndirectBrInst *cloneImpl() const;
3583
3584public:
3585 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3586
3587 /// Iterator type that casts an operand to a basic block.
3588 ///
3589 /// This only makes sense because the successors are stored as adjacent
3590 /// operands for indirectbr instructions.
3592 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3593 std::random_access_iterator_tag, BasicBlock *,
3594 ptrdiff_t, BasicBlock *, BasicBlock *> {
3596
3597 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3598 BasicBlock *operator->() const { return operator*(); }
3599 };
3600
3601 /// The const version of `succ_op_iterator`.
3603 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3604 std::random_access_iterator_tag,
3605 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3606 const BasicBlock *> {
3609
3610 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3611 const BasicBlock *operator->() const { return operator*(); }
3612 };
3613
3614 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3615 InsertPosition InsertBefore = nullptr) {
3616 return new IndirectBrInst(Address, NumDests, InsertBefore);
3617 }
3618
3619 /// Provide fast operand accessors.
3621
3622 // Accessor Methods for IndirectBrInst instruction.
3623 Value *getAddress() { return getOperand(0); }
3624 const Value *getAddress() const { return getOperand(0); }
3625 void setAddress(Value *V) { setOperand(0, V); }
3626
3627 /// return the number of possible destinations in this
3628 /// indirectbr instruction.
3629 unsigned getNumDestinations() const { return getNumOperands()-1; }
3630
3631 /// Return the specified destination.
3632 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3633 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3634
3635 /// Add a destination.
3636 ///
3637 void addDestination(BasicBlock *Dest);
3638
3639 /// This method removes the specified successor from the
3640 /// indirectbr instruction.
3641 void removeDestination(unsigned i);
3642
3643 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3644 BasicBlock *getSuccessor(unsigned i) const {
3645 return cast<BasicBlock>(getOperand(i+1));
3646 }
3647 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3648 setOperand(i + 1, NewSucc);
3649 }
3650
3652 return make_range(succ_op_iterator(std::next(value_op_begin())),
3653 succ_op_iterator(value_op_end()));
3654 }
3655
3657 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3658 const_succ_op_iterator(value_op_end()));
3659 }
3660
3661 // Methods for support type inquiry through isa, cast, and dyn_cast:
3662 static bool classof(const Instruction *I) {
3663 return I->getOpcode() == Instruction::IndirectBr;
3664 }
3665 static bool classof(const Value *V) {
3666 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3667 }
3668};
3669
3670template <>
3672
3674
3675//===----------------------------------------------------------------------===//
3676// InvokeInst Class
3677//===----------------------------------------------------------------------===//
3678
3679/// Invoke instruction. The SubclassData field is used to hold the
3680/// calling convention of the call.
3681///
3682class InvokeInst : public CallBase {
3683 /// The number of operands for this call beyond the called function,
3684 /// arguments, and operand bundles.
3685 static constexpr int NumExtraOperands = 2;
3686
3687 /// The index from the end of the operand array to the normal destination.
3688 static constexpr int NormalDestOpEndIdx = -3;
3689
3690 /// The index from the end of the operand array to the unwind destination.
3691 static constexpr int UnwindDestOpEndIdx = -2;
3692
3694
3695 /// Construct an InvokeInst given a range of arguments.
3696 ///
3697 /// Construct an InvokeInst from a range of arguments
3698 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3699 BasicBlock *IfException, ArrayRef<Value *> Args,
3701 const Twine &NameStr, InsertPosition InsertBefore);
3702
3703 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3704 BasicBlock *IfException, ArrayRef<Value *> Args,
3705 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3706
3707 /// Compute the number of operands to allocate.
3708 static unsigned ComputeNumOperands(unsigned NumArgs,
3709 size_t NumBundleInputs = 0) {
3710 // We need one operand for the called function, plus our extra operands and
3711 // the input operand counts provided.
3712 return 1 + NumExtraOperands + NumArgs + unsigned(NumBundleInputs);
3713 }
3714
3715protected:
3716 // Note: Instruction needs to be a friend here to call cloneImpl.
3717 friend class Instruction;
3718
3719 InvokeInst *cloneImpl() const;
3720
3721public:
3722 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3723 BasicBlock *IfException, ArrayRef<Value *> Args,
3724 const Twine &NameStr,
3725 InsertPosition InsertBefore = nullptr) {
3726 IntrusiveOperandsAllocMarker AllocMarker{
3727 ComputeNumOperands(unsigned(Args.size()))};
3728 return new (AllocMarker) InvokeInst(Ty, Func, IfNormal, IfException, Args,
3729 {}, AllocMarker, NameStr, InsertBefore);
3730 }
3731
3732 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3733 BasicBlock *IfException, ArrayRef<Value *> Args,
3734 ArrayRef<OperandBundleDef> Bundles = {},
3735 const Twine &NameStr = "",
3736 InsertPosition InsertBefore = nullptr) {
3737 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3738 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)),
3739 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3740
3741 return new (AllocMarker)
3742 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, AllocMarker,
3743 NameStr, InsertBefore);
3744 }
3745
3747 BasicBlock *IfException, ArrayRef<Value *> Args,
3748 const Twine &NameStr,
3749 InsertPosition InsertBefore = nullptr) {
3750 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3751 IfException, Args, {}, NameStr, InsertBefore);
3752 }
3753
3755 BasicBlock *IfException, ArrayRef<Value *> Args,
3756 ArrayRef<OperandBundleDef> Bundles = {},
3757 const Twine &NameStr = "",
3758 InsertPosition InsertBefore = nullptr) {
3759 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3760 IfException, Args, Bundles, NameStr, InsertBefore);
3761 }
3762
3763 /// Create a clone of \p II with a different set of operand bundles and
3764 /// insert it before \p InsertBefore.
3765 ///
3766 /// The returned invoke instruction is identical to \p II in every way except
3767 /// that the operand bundles for the new instruction are set to the operand
3768 /// bundles in \p Bundles.
3769 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3770 InsertPosition InsertPt = nullptr);
3771
3772 // get*Dest - Return the destination basic blocks...
3774 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3775 }
3777 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3778 }
3780 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3781 }
3783 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3784 }
3785
3786 /// Get the landingpad instruction from the landing pad
3787 /// block (the unwind destination).
3788 LandingPadInst *getLandingPadInst() const;
3789
3790 BasicBlock *getSuccessor(unsigned i) const {
3791 assert(i < 2 && "Successor # out of range for invoke!");
3792 return i == 0 ? getNormalDest() : getUnwindDest();
3793 }
3794
3795 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3796 assert(i < 2 && "Successor # out of range for invoke!");
3797 if (i == 0)
3798 setNormalDest(NewSucc);
3799 else
3800 setUnwindDest(NewSucc);
3801 }
3802
3803 unsigned getNumSuccessors() const { return 2; }
3804
3805 /// Updates profile metadata by scaling it by \p S / \p T.
3806 void updateProfWeight(uint64_t S, uint64_t T);
3807
3808 // Methods for support type inquiry through isa, cast, and dyn_cast:
3809 static bool classof(const Instruction *I) {
3810 return (I->getOpcode() == Instruction::Invoke);
3811 }
3812 static bool classof(const Value *V) {
3813 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3814 }
3815
3816private:
3817 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3818 // method so that subclasses cannot accidentally use it.
3819 template <typename Bitfield>
3820 void setSubclassData(typename Bitfield::Type Value) {
3821 Instruction::setSubclassData<Bitfield>(Value);
3822 }
3823};
3824
3825InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3826 BasicBlock *IfException, ArrayRef<Value *> Args,
3827 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3828 const Twine &NameStr, InsertPosition InsertBefore)
3829 : CallBase(Ty->getReturnType(), Instruction::Invoke, AllocInfo,
3830 InsertBefore) {
3831 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3832}
3833
3834//===----------------------------------------------------------------------===//
3835// CallBrInst Class
3836//===----------------------------------------------------------------------===//
3837
3838/// CallBr instruction, tracking function calls that may not return control but
3839/// instead transfer it to a third location. The SubclassData field is used to
3840/// hold the calling convention of the call.
3841///
3842class CallBrInst : public CallBase {
3843
3844 unsigned NumIndirectDests;
3845
3847
3848 /// Construct a CallBrInst given a range of arguments.
3849 ///
3850 /// Construct a CallBrInst from a range of arguments
3851 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3852 ArrayRef<BasicBlock *> IndirectDests,
3854 AllocInfo AllocInfo, const Twine &NameStr,
3855 InsertPosition InsertBefore);
3856
3857 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3858 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3859 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3860
3861 /// Compute the number of operands to allocate.
3862 static unsigned ComputeNumOperands(int NumArgs, int NumIndirectDests,
3863 int NumBundleInputs = 0) {
3864 // We need one operand for the called function, plus our extra operands and
3865 // the input operand counts provided.
3866 return unsigned(2 + NumIndirectDests + NumArgs + NumBundleInputs);
3867 }
3868
3869protected:
3870 // Note: Instruction needs to be a friend here to call cloneImpl.
3871 friend class Instruction;
3872
3873 CallBrInst *cloneImpl() const;
3874
3875public:
3877 BasicBlock *DefaultDest,
3878 ArrayRef<BasicBlock *> IndirectDests,
3879 ArrayRef<Value *> Args, const Twine &NameStr,
3880 InsertPosition InsertBefore = nullptr) {
3881 IntrusiveOperandsAllocMarker AllocMarker{
3882 ComputeNumOperands(Args.size(), IndirectDests.size())};
3883 return new (AllocMarker)
3884 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, {}, AllocMarker,
3885 NameStr, InsertBefore);
3886 }
3887
3888 static CallBrInst *
3889 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3890 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3891 ArrayRef<OperandBundleDef> Bundles = {}, const Twine &NameStr = "",
3892 InsertPosition InsertBefore = nullptr) {
3893 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3894 ComputeNumOperands(Args.size(), IndirectDests.size(),
3895 CountBundleInputs(Bundles)),
3896 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3897
3898 return new (AllocMarker)
3899 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3900 AllocMarker, NameStr, InsertBefore);
3901 }
3902
3903 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3904 ArrayRef<BasicBlock *> IndirectDests,
3905 ArrayRef<Value *> Args, const Twine &NameStr,
3906 InsertPosition InsertBefore = nullptr) {
3907 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3908 IndirectDests, Args, NameStr, InsertBefore);
3909 }
3910
3911 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3912 ArrayRef<BasicBlock *> IndirectDests,
3913 ArrayRef<Value *> Args,
3914 ArrayRef<OperandBundleDef> Bundles = {},
3915 const Twine &NameStr = "",
3916 InsertPosition InsertBefore = nullptr) {
3917 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3918 IndirectDests, Args, Bundles, NameStr, InsertBefore);
3919 }
3920
3921 /// Create a clone of \p CBI with a different set of operand bundles and
3922 /// insert it before \p InsertBefore.
3923 ///
3924 /// The returned callbr instruction is identical to \p CBI in every way
3925 /// except that the operand bundles for the new instruction are set to the
3926 /// operand bundles in \p Bundles.
3927 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles,
3928 InsertPosition InsertBefore = nullptr);
3929
3930 /// Return the number of callbr indirect dest labels.
3931 ///
3932 unsigned getNumIndirectDests() const { return NumIndirectDests; }
3933
3934 /// getIndirectDestLabel - Return the i-th indirect dest label.
3935 ///
3936 Value *getIndirectDestLabel(unsigned i) const {
3937 assert(i < getNumIndirectDests() && "Out of bounds!");
3938 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
3939 }
3940
3941 Value *getIndirectDestLabelUse(unsigned i) const {
3942 assert(i < getNumIndirectDests() && "Out of bounds!");
3943 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
3944 }
3945
3946 // Return the destination basic blocks...
3948 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
3949 }
3950 BasicBlock *getIndirectDest(unsigned i) const {
3951 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
3952 }
3954 SmallVector<BasicBlock *, 16> IndirectDests;
3955 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
3956 IndirectDests.push_back(getIndirectDest(i));
3957 return IndirectDests;
3958 }
3960 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
3961 }
3962 void setIndirectDest(unsigned i, BasicBlock *B) {
3963 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
3964 }
3965
3966 BasicBlock *getSuccessor(unsigned i) const {
3967 assert(i < getNumSuccessors() + 1 &&
3968 "Successor # out of range for callbr!");
3969 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
3970 }
3971
3972 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3973 assert(i < getNumIndirectDests() + 1 &&
3974 "Successor # out of range for callbr!");
3975 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
3976 }
3977
3978 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
3979
3980 // Methods for support type inquiry through isa, cast, and dyn_cast:
3981 static bool classof(const Instruction *I) {
3982 return (I->getOpcode() == Instruction::CallBr);
3983 }
3984 static bool classof(const Value *V) {
3985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3986 }
3987
3988private:
3989 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3990 // method so that subclasses cannot accidentally use it.
3991 template <typename Bitfield>
3992 void setSubclassData(typename Bitfield::Type Value) {
3993 Instruction::setSubclassData<Bitfield>(Value);
3994 }
3995};
3996
3997CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3998 ArrayRef<BasicBlock *> IndirectDests,
3999 ArrayRef<Value *> Args,
4000 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
4001 const Twine &NameStr, InsertPosition InsertBefore)
4002 : CallBase(Ty->getReturnType(), Instruction::CallBr, AllocInfo,
4003 InsertBefore) {
4004 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4005}
4006
4007//===----------------------------------------------------------------------===//
4008// ResumeInst Class
4009//===----------------------------------------------------------------------===//
4010
4011//===---------------------------------------------------------------------------
4012/// Resume the propagation of an exception.
4013///
4014class ResumeInst : public Instruction {
4015 constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
4016
4017 ResumeInst(const ResumeInst &RI);
4018
4019 explicit ResumeInst(Value *Exn, InsertPosition InsertBefore = nullptr);
4020
4021protected:
4022 // Note: Instruction needs to be a friend here to call cloneImpl.
4023 friend class Instruction;
4024
4025 ResumeInst *cloneImpl() const;
4026
4027public:
4028 static ResumeInst *Create(Value *Exn, InsertPosition InsertBefore = nullptr) {
4029 return new (AllocMarker) ResumeInst(Exn, InsertBefore);
4030 }
4031
4032 /// Provide fast operand accessors
4034
4035 /// Convenience accessor.
4036 Value *getValue() const { return Op<0>(); }
4037
4038 unsigned getNumSuccessors() const { return 0; }
4039
4040 // Methods for support type inquiry through isa, cast, and dyn_cast:
4041 static bool classof(const Instruction *I) {
4042 return I->getOpcode() == Instruction::Resume;
4043 }
4044 static bool classof(const Value *V) {
4045 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4046 }
4047
4048private:
4049 BasicBlock *getSuccessor(unsigned idx) const {
4050 llvm_unreachable("ResumeInst has no successors!");
4051 }
4052
4053 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4054 llvm_unreachable("ResumeInst has no successors!");
4055 }
4056};
4057
4058template <>
4060 public FixedNumOperandTraits<ResumeInst, 1> {
4061};
4062
4064
4065//===----------------------------------------------------------------------===//
4066// CatchSwitchInst Class
4067//===----------------------------------------------------------------------===//
4069 using UnwindDestField = BoolBitfieldElementT<0>;
4070
4071 constexpr static HungOffOperandsAllocMarker AllocMarker{};
4072
4073 /// The number of operands actually allocated. NumOperands is
4074 /// the number actually in use.
4075 unsigned ReservedSpace;
4076
4077 // Operand[0] = Outer scope
4078 // Operand[1] = Unwind block destination
4079 // Operand[n] = BasicBlock to go to on match
4080 CatchSwitchInst(const CatchSwitchInst &CSI);
4081
4082 /// Create a new switch instruction, specifying a
4083 /// default destination. The number of additional handlers can be specified
4084 /// here to make memory allocation more efficient.
4085 /// This constructor can also autoinsert before another instruction.
4086 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4087 unsigned NumHandlers, const Twine &NameStr,
4088 InsertPosition InsertBefore);
4089
4090 // allocate space for exactly zero operands
4091 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4092
4093 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4094 void growOperands(unsigned Size);
4095
4096protected:
4097 // Note: Instruction needs to be a friend here to call cloneImpl.
4098 friend class Instruction;
4099
4100 CatchSwitchInst *cloneImpl() const;
4101
4102public:
4103 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4104
4105 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4106 unsigned NumHandlers,
4107 const Twine &NameStr = "",
4108 InsertPosition InsertBefore = nullptr) {
4109 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4110 InsertBefore);
4111 }
4112
4113 /// Provide fast operand accessors
4115
4116 // Accessor Methods for CatchSwitch stmt
4117 Value *getParentPad() const { return getOperand(0); }
4118 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4119
4120 // Accessor Methods for CatchSwitch stmt
4121 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4122 bool unwindsToCaller() const { return !hasUnwindDest(); }
4124 if (hasUnwindDest())
4125 return cast<BasicBlock>(getOperand(1));
4126 return nullptr;
4127 }
4128 void setUnwindDest(BasicBlock *UnwindDest) {
4129 assert(UnwindDest);
4130 assert(hasUnwindDest());
4131 setOperand(1, UnwindDest);
4132 }
4133
4134 /// return the number of 'handlers' in this catchswitch
4135 /// instruction, except the default handler
4136 unsigned getNumHandlers() const {
4137 if (hasUnwindDest())
4138 return getNumOperands() - 2;
4139 return getNumOperands() - 1;
4140 }
4141
4142private:
4143 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4144 static const BasicBlock *handler_helper(const Value *V) {
4145 return cast<BasicBlock>(V);
4146 }
4147
4148public:
4149 using DerefFnTy = BasicBlock *(*)(Value *);
4152 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4156
4157 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4159 op_iterator It = op_begin() + 1;
4160 if (hasUnwindDest())
4161 ++It;
4162 return handler_iterator(It, DerefFnTy(handler_helper));
4163 }
4164
4165 /// Returns an iterator that points to the first handler in the
4166 /// CatchSwitchInst.
4168 const_op_iterator It = op_begin() + 1;
4169 if (hasUnwindDest())
4170 ++It;
4171 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4172 }
4173
4174 /// Returns a read-only iterator that points one past the last
4175 /// handler in the CatchSwitchInst.
4177 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4178 }
4179
4180 /// Returns an iterator that points one past the last handler in the
4181 /// CatchSwitchInst.
4183 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4184 }
4185
4186 /// iteration adapter for range-for loops.
4188 return make_range(handler_begin(), handler_end());
4189 }
4190
4191 /// iteration adapter for range-for loops.
4193 return make_range(handler_begin(), handler_end());
4194 }
4195
4196 /// Add an entry to the switch instruction...
4197 /// Note:
4198 /// This action invalidates handler_end(). Old handler_end() iterator will
4199 /// point to the added handler.
4200 void addHandler(BasicBlock *Dest);
4201
4202 void removeHandler(handler_iterator HI);
4203
4204 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4205 BasicBlock *getSuccessor(unsigned Idx) const {
4206 assert(Idx < getNumSuccessors() &&
4207 "Successor # out of range for catchswitch!");
4208 return cast<BasicBlock>(getOperand(Idx + 1));
4209 }
4210 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4211 assert(Idx < getNumSuccessors() &&
4212 "Successor # out of range for catchswitch!");
4213 setOperand(Idx + 1, NewSucc);
4214 }
4215
4216 // Methods for support type inquiry through isa, cast, and dyn_cast:
4217 static bool classof(const Instruction *I) {
4218 return I->getOpcode() == Instruction::CatchSwitch;
4219 }
4220 static bool classof(const Value *V) {
4221 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4222 }
4223};
4224
4225template <>
4227
4229
4230//===----------------------------------------------------------------------===//
4231// CleanupPadInst Class
4232//===----------------------------------------------------------------------===//
4234private:
4235 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4236 AllocInfo AllocInfo, const Twine &NameStr,
4237 InsertPosition InsertBefore)
4238 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, AllocInfo,
4239 NameStr, InsertBefore) {}
4240
4241public:
4242 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = {},
4243 const Twine &NameStr = "",
4244 InsertPosition InsertBefore = nullptr) {
4245 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4246 return new (AllocMarker)
4247 CleanupPadInst(ParentPad, Args, AllocMarker, NameStr, InsertBefore);
4248 }
4249
4250 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4251 static bool classof(const Instruction *I) {
4252 return I->getOpcode() == Instruction::CleanupPad;
4253 }
4254 static bool classof(const Value *V) {
4255 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4256 }
4257};
4258
4259//===----------------------------------------------------------------------===//
4260// CatchPadInst Class
4261//===----------------------------------------------------------------------===//
4263private:
4264 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4265 AllocInfo AllocInfo, const Twine &NameStr,
4266 InsertPosition InsertBefore)
4267 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, AllocInfo,
4268 NameStr, InsertBefore) {}
4269
4270public:
4271 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4272 const Twine &NameStr = "",
4273 InsertPosition InsertBefore = nullptr) {
4274 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4275 return new (AllocMarker)
4276 CatchPadInst(CatchSwitch, Args, AllocMarker, NameStr, InsertBefore);
4277 }
4278
4279 /// Convenience accessors
4281 return cast<CatchSwitchInst>(Op<-1>());
4282 }
4283 void setCatchSwitch(Value *CatchSwitch) {
4284 assert(CatchSwitch);
4285 Op<-1>() = CatchSwitch;
4286 }
4287
4288 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4289 static bool classof(const Instruction *I) {
4290 return I->getOpcode() == Instruction::CatchPad;
4291 }
4292 static bool classof(const Value *V) {
4293 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4294 }
4295};
4296
4297//===----------------------------------------------------------------------===//
4298// CatchReturnInst Class
4299//===----------------------------------------------------------------------===//
4300
4302 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
4303
4305 CatchReturnInst(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore);
4306
4307 void init(Value *CatchPad, BasicBlock *BB);
4308
4309protected:
4310 // Note: Instruction needs to be a friend here to call cloneImpl.
4311 friend class Instruction;
4312
4313 CatchReturnInst *cloneImpl() const;
4314
4315public:
4316 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4317 InsertPosition InsertBefore = nullptr) {
4318 assert(CatchPad);
4319 assert(BB);
4320 return new (AllocMarker) CatchReturnInst(CatchPad, BB, InsertBefore);
4321 }
4322
4323 /// Provide fast operand accessors
4325
4326 /// Convenience accessors.
4327 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4328 void setCatchPad(CatchPadInst *CatchPad) {
4329 assert(CatchPad);
4330 Op<0>() = CatchPad;
4331 }
4332
4333 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4334 void setSuccessor(BasicBlock *NewSucc) {
4335 assert(NewSucc);
4336 Op<1>() = NewSucc;
4337 }
4338 unsigned getNumSuccessors() const { return 1; }
4339
4340 /// Get the parentPad of this catchret's catchpad's catchswitch.
4341 /// The successor block is implicitly a member of this funclet.
4344 }
4345
4346 // Methods for support type inquiry through isa, cast, and dyn_cast:
4347 static bool classof(const Instruction *I) {
4348 return (I->getOpcode() == Instruction::CatchRet);
4349 }
4350 static bool classof(const Value *V) {
4351 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4352 }
4353
4354private:
4355 BasicBlock *getSuccessor(unsigned Idx) const {
4356 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4357 return getSuccessor();
4358 }
4359
4360 void setSuccessor(unsigned Idx, BasicBlock *B) {
4361 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4362 setSuccessor(B);
4363 }
4364};
4365
4366template <>
4368 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4369
4371
4372//===----------------------------------------------------------------------===//
4373// CleanupReturnInst Class
4374//===----------------------------------------------------------------------===//
4375
4377 using UnwindDestField = BoolBitfieldElementT<0>;
4378
4379private:
4381 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
4382 AllocInfo AllocInfo, InsertPosition InsertBefore = nullptr);
4383
4384 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4385
4386protected:
4387 // Note: Instruction needs to be a friend here to call cloneImpl.
4388 friend class Instruction;
4389
4390 CleanupReturnInst *cloneImpl() const;
4391
4392public:
4393 static CleanupReturnInst *Create(Value *CleanupPad,
4394 BasicBlock *UnwindBB = nullptr,
4395 InsertPosition InsertBefore = nullptr) {
4396 assert(CleanupPad);
4397 unsigned Values = 1;
4398 if (UnwindBB)
4399 ++Values;
4400 IntrusiveOperandsAllocMarker AllocMarker{Values};
4401 return new (AllocMarker)
4402 CleanupReturnInst(CleanupPad, UnwindBB, AllocMarker, InsertBefore);
4403 }
4404
4405 /// Provide fast operand accessors
4407
4408 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4409 bool unwindsToCaller() const { return !hasUnwindDest(); }
4410
4411 /// Convenience accessor.
4413 return cast<CleanupPadInst>(Op<0>());
4414 }
4415 void setCleanupPad(CleanupPadInst *CleanupPad) {
4416 assert(CleanupPad);
4417 Op<0>() = CleanupPad;
4418 }
4419
4420 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4421
4423 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4424 }
4425 void setUnwindDest(BasicBlock *NewDest) {
4426 assert(NewDest);
4427 assert(hasUnwindDest());
4428 Op<1>() = NewDest;
4429 }
4430
4431 // Methods for support type inquiry through isa, cast, and dyn_cast:
4432 static bool classof(const Instruction *I) {
4433 return (I->getOpcode() == Instruction::CleanupRet);
4434 }
4435 static bool classof(const Value *V) {
4436 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4437 }
4438
4439private:
4440 BasicBlock *getSuccessor(unsigned Idx) const {
4441 assert(Idx == 0);
4442 return getUnwindDest();
4443 }
4444
4445 void setSuccessor(unsigned Idx, BasicBlock *B) {
4446 assert(Idx == 0);
4447 setUnwindDest(B);
4448 }
4449
4450 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4451 // method so that subclasses cannot accidentally use it.
4452 template <typename Bitfield>
4453 void setSubclassData(typename Bitfield::Type Value) {
4454 Instruction::setSubclassData<Bitfield>(Value);
4455 }
4456};
4457
4458template <>
4460 : public VariadicOperandTraits<CleanupReturnInst> {};
4461
4463
4464//===----------------------------------------------------------------------===//
4465// UnreachableInst Class
4466//===----------------------------------------------------------------------===//
4467
4468//===---------------------------------------------------------------------------
4469/// This function has undefined behavior. In particular, the
4470/// presence of this instruction indicates some higher level knowledge that the
4471/// end of the block cannot be reached.
4472///
4474 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
4475
4476protected:
4477 // Note: Instruction needs to be a friend here to call cloneImpl.
4478 friend class Instruction;
4479
4480 UnreachableInst *cloneImpl() const;
4481
4482public:
4483 explicit UnreachableInst(LLVMContext &C,
4484 InsertPosition InsertBefore = nullptr);
4485
4486 // allocate space for exactly zero operands
4487 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4488 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4489
4490 unsigned getNumSuccessors() const { return 0; }
4491
4492 // Methods for support type inquiry through isa, cast, and dyn_cast:
4493 static bool classof(const Instruction *I) {
4494 return I->getOpcode() == Instruction::Unreachable;
4495 }
4496 static bool classof(const Value *V) {
4497 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4498 }
4499
4500private:
4501 BasicBlock *getSuccessor(unsigned idx) const {
4502 llvm_unreachable("UnreachableInst has no successors!");
4503 }
4504
4505 void setSuccessor(unsigned idx, BasicBlock *B) {
4506 llvm_unreachable("UnreachableInst has no successors!");
4507 }
4508};
4509
4510//===----------------------------------------------------------------------===//
4511// TruncInst Class
4512//===----------------------------------------------------------------------===//
4513
4514/// This class represents a truncation of integer types.
4515class TruncInst : public CastInst {
4516protected:
4517 // Note: Instruction needs to be a friend here to call cloneImpl.
4518 friend class Instruction;
4519
4520 /// Clone an identical TruncInst
4521 TruncInst *cloneImpl() const;
4522
4523public:
4524 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
4525
4526 /// Constructor with insert-before-instruction semantics
4527 TruncInst(Value *S, ///< The value to be truncated
4528 Type *Ty, ///< The (smaller) type to truncate to
4529 const Twine &NameStr = "", ///< A name for the new instruction
4530 InsertPosition InsertBefore =
4531 nullptr ///< Where to insert the new instruction
4532 );
4533
4534 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4535 static bool classof(const Instruction *I) {
4536 return I->getOpcode() == Trunc;
4537 }
4538 static bool classof(const Value *V) {
4539 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4540 }
4541
4544 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
4545 }
4548 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
4549 }
4550
4551 /// Test whether this operation is known to never
4552 /// undergo unsigned overflow, aka the nuw property.
4553 bool hasNoUnsignedWrap() const {
4555 }
4556
4557 /// Test whether this operation is known to never
4558 /// undergo signed overflow, aka the nsw property.
4559 bool hasNoSignedWrap() const {
4560 return (SubclassOptionalData & NoSignedWrap) != 0;
4561 }
4562
4563 /// Returns the no-wrap kind of the operation.
4564 unsigned getNoWrapKind() const {
4565 unsigned NoWrapKind = 0;
4566 if (hasNoUnsignedWrap())
4567 NoWrapKind |= NoUnsignedWrap;
4568
4569 if (hasNoSignedWrap())
4570 NoWrapKind |= NoSignedWrap;
4571
4572 return NoWrapKind;
4573 }
4574};
4575
4576//===----------------------------------------------------------------------===//
4577// ZExtInst Class
4578//===----------------------------------------------------------------------===//
4579
4580/// This class represents zero extension of integer types.
4581class ZExtInst : public CastInst {
4582protected:
4583 // Note: Instruction needs to be a friend here to call cloneImpl.
4584 friend class Instruction;
4585
4586 /// Clone an identical ZExtInst
4587 ZExtInst *cloneImpl() const;
4588
4589public:
4590 /// Constructor with insert-before-instruction semantics
4591 ZExtInst(Value *S, ///< The value to be zero extended
4592 Type *Ty, ///< The type to zero extend to
4593 const Twine &NameStr = "", ///< A name for the new instruction
4594 InsertPosition InsertBefore =
4595 nullptr ///< Where to insert the new instruction
4596 );
4597
4598 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4599 static bool classof(const Instruction *I) {
4600 return I->getOpcode() == ZExt;
4601 }
4602 static bool classof(const Value *V) {
4603 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4604 }
4605};
4606
4607//===----------------------------------------------------------------------===//
4608// SExtInst Class
4609//===----------------------------------------------------------------------===//
4610
4611/// This class represents a sign extension of integer types.
4612class SExtInst : public CastInst {
4613protected:
4614 // Note: Instruction needs to be a friend here to call cloneImpl.
4615 friend class Instruction;
4616
4617 /// Clone an identical SExtInst
4618 SExtInst *cloneImpl() const;
4619
4620public:
4621 /// Constructor with insert-before-instruction semantics
4622 SExtInst(Value *S, ///< The value to be sign extended
4623 Type *Ty, ///< The type to sign extend to
4624 const Twine &NameStr = "", ///< A name for the new instruction
4625 InsertPosition InsertBefore =
4626 nullptr ///< Where to insert the new instruction
4627 );
4628
4629 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4630 static bool classof(const Instruction *I) {
4631 return I->getOpcode() == SExt;
4632 }
4633 static bool classof(const Value *V) {
4634 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4635 }
4636};
4637
4638//===----------------------------------------------------------------------===//
4639// FPTruncInst Class
4640//===----------------------------------------------------------------------===//
4641
4642/// This class represents a truncation of floating point types.
4643class FPTruncInst : public CastInst {
4644protected:
4645 // Note: Instruction needs to be a friend here to call cloneImpl.
4646 friend class Instruction;
4647
4648 /// Clone an identical FPTruncInst
4649 FPTruncInst *cloneImpl() const;
4650
4651public: /// Constructor with insert-before-instruction semantics
4652 FPTruncInst(Value *S, ///< The value to be truncated
4653 Type *Ty, ///< The type to truncate to
4654 const Twine &NameStr = "", ///< A name for the new instruction
4655 InsertPosition InsertBefore =
4656 nullptr ///< Where to insert the new instruction
4657 );
4658
4659 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4660 static bool classof(const Instruction *I) {
4661 return I->getOpcode() == FPTrunc;
4662 }
4663 static bool classof(const Value *V) {
4664 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4665 }
4666};
4667
4668//===----------------------------------------------------------------------===//
4669// FPExtInst Class
4670//===----------------------------------------------------------------------===//
4671
4672/// This class represents an extension of floating point types.
4673class FPExtInst : public CastInst {
4674protected:
4675 // Note: Instruction needs to be a friend here to call cloneImpl.
4676 friend class Instruction;
4677
4678 /// Clone an identical FPExtInst
4679 FPExtInst *cloneImpl() const;
4680
4681public:
4682 /// Constructor with insert-before-instruction semantics
4683 FPExtInst(Value *S, ///< The value to be extended
4684 Type *Ty, ///< The type to extend to
4685 const Twine &NameStr = "", ///< A name for the new instruction
4686 InsertPosition InsertBefore =
4687 nullptr ///< Where to insert the new instruction
4688 );
4689
4690 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4691 static bool classof(const Instruction *I) {
4692 return I->getOpcode() == FPExt;
4693 }
4694 static bool classof(const Value *V) {
4695 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4696 }
4697};
4698
4699//===----------------------------------------------------------------------===//
4700// UIToFPInst Class
4701//===----------------------------------------------------------------------===//
4702
4703/// This class represents a cast unsigned integer to floating point.
4704class UIToFPInst : public CastInst {
4705protected:
4706 // Note: Instruction needs to be a friend here to call cloneImpl.
4707 friend class Instruction;
4708
4709 /// Clone an identical UIToFPInst
4710 UIToFPInst *cloneImpl() const;
4711
4712public:
4713 /// Constructor with insert-before-instruction semantics
4714 UIToFPInst(Value *S, ///< The value to be converted
4715 Type *Ty, ///< The type to convert to
4716 const Twine &NameStr = "", ///< A name for the new instruction
4717 InsertPosition InsertBefore =
4718 nullptr ///< Where to insert the new instruction
4719 );
4720
4721 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4722 static bool classof(const Instruction *I) {
4723 return I->getOpcode() == UIToFP;
4724 }
4725 static bool classof(const Value *V) {
4726 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4727 }
4728};
4729
4730//===----------------------------------------------------------------------===//
4731// SIToFPInst Class
4732//===----------------------------------------------------------------------===//
4733
4734/// This class represents a cast from signed integer to floating point.
4735class SIToFPInst : public CastInst {
4736protected:
4737 // Note: Instruction needs to be a friend here to call cloneImpl.
4738 friend class Instruction;
4739
4740 /// Clone an identical SIToFPInst
4741 SIToFPInst *cloneImpl() const;
4742
4743public:
4744 /// Constructor with insert-before-instruction semantics
4745 SIToFPInst(Value *S, ///< The value to be converted
4746 Type *Ty, ///< The type to convert to
4747 const Twine &NameStr = "", ///< A name for the new instruction
4748 InsertPosition InsertBefore =
4749 nullptr ///< Where to insert the new instruction
4750 );
4751
4752 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4753 static bool classof(const Instruction *I) {
4754 return I->getOpcode() == SIToFP;
4755 }
4756 static bool classof(const Value *V) {
4757 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4758 }
4759};
4760
4761//===----------------------------------------------------------------------===//
4762// FPToUIInst Class
4763//===----------------------------------------------------------------------===//
4764
4765/// This class represents a cast from floating point to unsigned integer
4766class FPToUIInst : public CastInst {
4767protected:
4768 // Note: Instruction needs to be a friend here to call cloneImpl.
4769 friend class Instruction;
4770
4771 /// Clone an identical FPToUIInst
4772 FPToUIInst *cloneImpl() const;
4773
4774public:
4775 /// Constructor with insert-before-instruction semantics
4776 FPToUIInst(Value *S, ///< The value to be converted
4777 Type *Ty, ///< The type to convert to
4778 const Twine &NameStr = "", ///< A name for the new instruction
4779 InsertPosition InsertBefore =
4780 nullptr ///< Where to insert the new instruction
4781 );
4782
4783 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4784 static bool classof(const Instruction *I) {
4785 return I->getOpcode() == FPToUI;
4786 }
4787 static bool classof(const Value *V) {
4788 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4789 }
4790};
4791
4792//===----------------------------------------------------------------------===//
4793// FPToSIInst Class
4794//===----------------------------------------------------------------------===//
4795
4796/// This class represents a cast from floating point to signed integer.
4797class FPToSIInst : public CastInst {
4798protected:
4799 // Note: Instruction needs to be a friend here to call cloneImpl.
4800 friend class Instruction;
4801
4802 /// Clone an identical FPToSIInst
4803 FPToSIInst *cloneImpl() const;
4804
4805public:
4806 /// Constructor with insert-before-instruction semantics
4807 FPToSIInst(Value *S, ///< The value to be converted
4808 Type *Ty, ///< The type to convert to
4809 const Twine &NameStr = "", ///< A name for the new instruction
4810 InsertPosition InsertBefore =
4811 nullptr ///< Where to insert the new instruction
4812 );
4813
4814 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4815 static bool classof(const Instruction *I) {
4816 return I->getOpcode() == FPToSI;
4817 }
4818 static bool classof(const Value *V) {
4819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4820 }
4821};
4822
4823//===----------------------------------------------------------------------===//
4824// IntToPtrInst Class
4825//===----------------------------------------------------------------------===//
4826
4827/// This class represents a cast from an integer to a pointer.
4828class IntToPtrInst : public CastInst {
4829public:
4830 // Note: Instruction needs to be a friend here to call cloneImpl.
4831 friend class Instruction;
4832
4833 /// Constructor with insert-before-instruction semantics
4834 IntToPtrInst(Value *S, ///< The value to be converted
4835 Type *Ty, ///< The type to convert to
4836 const Twine &NameStr = "", ///< A name for the new instruction
4837 InsertPosition InsertBefore =
4838 nullptr ///< Where to insert the new instruction
4839 );
4840
4841 /// Clone an identical IntToPtrInst.
4842 IntToPtrInst *cloneImpl() const;
4843
4844 /// Returns the address space of this instruction's pointer type.
4845 unsigned getAddressSpace() const {
4846 return getType()->getPointerAddressSpace();
4847 }
4848
4849 // Methods for support type inquiry through isa, cast, and dyn_cast:
4850 static bool classof(const Instruction *I) {
4851 return I->getOpcode() == IntToPtr;
4852 }
4853 static bool classof(const Value *V) {
4854 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4855 }
4856};
4857
4858//===----------------------------------------------------------------------===//
4859// PtrToIntInst Class
4860//===----------------------------------------------------------------------===//
4861
4862/// This class represents a cast from a pointer to an integer.
4863class PtrToIntInst : public CastInst {
4864protected:
4865 // Note: Instruction needs to be a friend here to call cloneImpl.
4866 friend class Instruction;
4867
4868 /// Clone an identical PtrToIntInst.
4869 PtrToIntInst *cloneImpl() const;
4870
4871public:
4872 /// Constructor with insert-before-instruction semantics
4873 PtrToIntInst(Value *S, ///< The value to be converted
4874 Type *Ty, ///< The type to convert to
4875 const Twine &NameStr = "", ///< A name for the new instruction
4876 InsertPosition InsertBefore =
4877 nullptr ///< Where to insert the new instruction
4878 );
4879
4880 /// Gets the pointer operand.
4882 /// Gets the pointer operand.
4883 const Value *getPointerOperand() const { return getOperand(0); }
4884 /// Gets the operand index of the pointer operand.
4885 static unsigned getPointerOperandIndex() { return 0U; }
4886
4887 /// Returns the address space of the pointer operand.
4888 unsigned getPointerAddressSpace() const {
4890 }
4891
4892 // Methods for support type inquiry through isa, cast, and dyn_cast:
4893 static bool classof(const Instruction *I) {
4894 return I->getOpcode() == PtrToInt;
4895 }
4896 static bool classof(const Value *V) {
4897 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4898 }
4899};
4900
4901//===----------------------------------------------------------------------===//
4902// BitCastInst Class
4903//===----------------------------------------------------------------------===//
4904
4905/// This class represents a no-op cast from one type to another.
4906class BitCastInst : public CastInst {
4907protected:
4908 // Note: Instruction needs to be a friend here to call cloneImpl.
4909 friend class Instruction;
4910
4911 /// Clone an identical BitCastInst.
4912 BitCastInst *cloneImpl() const;
4913
4914public:
4915 /// Constructor with insert-before-instruction semantics
4916 BitCastInst(Value *S, ///< The value to be casted
4917 Type *Ty, ///< The type to casted to
4918 const Twine &NameStr = "", ///< A name for the new instruction
4919 InsertPosition InsertBefore =
4920 nullptr ///< Where to insert the new instruction
4921 );
4922
4923 // Methods for support type inquiry through isa, cast, and dyn_cast:
4924 static bool classof(const Instruction *I) {
4925 return I->getOpcode() == BitCast;
4926 }
4927 static bool classof(const Value *V) {
4928 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4929 }
4930};
4931
4932//===----------------------------------------------------------------------===//
4933// AddrSpaceCastInst Class
4934//===----------------------------------------------------------------------===//
4935
4936/// This class represents a conversion between pointers from one address space
4937/// to another.
4939protected:
4940 // Note: Instruction needs to be a friend here to call cloneImpl.
4941 friend class Instruction;
4942
4943 /// Clone an identical AddrSpaceCastInst.
4945
4946public:
4947 /// Constructor with insert-before-instruction semantics
4949 Value *S, ///< The value to be casted
4950 Type *Ty, ///< The type to casted to
4951 const Twine &NameStr = "", ///< A name for the new instruction
4952 InsertPosition InsertBefore =
4953 nullptr ///< Where to insert the new instruction
4954 );
4955
4956 // Methods for support type inquiry through isa, cast, and dyn_cast:
4957 static bool classof(const Instruction *I) {
4958 return I->getOpcode() == AddrSpaceCast;
4959 }
4960 static bool classof(const Value *V) {
4961 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4962 }
4963
4964 /// Gets the pointer operand.
4966 return getOperand(0);
4967 }
4968
4969 /// Gets the pointer operand.
4970 const Value *getPointerOperand() const {
4971 return getOperand(0);
4972 }
4973
4974 /// Gets the operand index of the pointer operand.
4975 static unsigned getPointerOperandIndex() {
4976 return 0U;
4977 }
4978
4979 /// Returns the address space of the pointer operand.
4980 unsigned getSrcAddressSpace() const {
4982 }
4983
4984 /// Returns the address space of the result.
4985 unsigned getDestAddressSpace() const {
4986 return getType()->getPointerAddressSpace();
4987 }
4988};
4989
4990//===----------------------------------------------------------------------===//
4991// Helper functions
4992//===----------------------------------------------------------------------===//
4993
4994/// A helper function that returns the pointer operand of a load or store
4995/// instruction. Returns nullptr if not load or store.
4996inline const Value *getLoadStorePointerOperand(const Value *V) {
4997 if (auto *Load = dyn_cast<LoadInst>(V))
4998 return Load->getPointerOperand();
4999 if (auto *Store = dyn_cast<StoreInst>(V))
5000 return Store->getPointerOperand();
5001 return nullptr;
5002}
5004 return const_cast<Value *>(
5005 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5006}
5007
5008/// A helper function that returns the pointer operand of a load, store
5009/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5010inline const Value *getPointerOperand(const Value *V) {
5011 if (auto *Ptr = getLoadStorePointerOperand(V))
5012 return Ptr;
5013 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5014 return Gep->getPointerOperand();
5015 return nullptr;
5016}
5018 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5019}
5020
5021/// A helper function that returns the alignment of load or store instruction.
5023 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5024 "Expected Load or Store instruction");
5025 if (auto *LI = dyn_cast<LoadInst>(I))
5026 return LI->getAlign();
5027 return cast<StoreInst>(I)->getAlign();
5028}
5029
5030/// A helper function that set the alignment of load or store instruction.
5031inline void setLoadStoreAlignment(Value *I, Align NewAlign) {
5032 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5033 "Expected Load or Store instruction");
5034 if (auto *LI = dyn_cast<LoadInst>(I))
5035 LI->setAlignment(NewAlign);
5036 else
5037 cast<StoreInst>(I)->setAlignment(NewAlign);
5038}
5039
5040/// A helper function that returns the address space of the pointer operand of
5041/// load or store instruction.
5042inline unsigned getLoadStoreAddressSpace(const Value *I) {
5043 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5044 "Expected Load or Store instruction");
5045 if (auto *LI = dyn_cast<LoadInst>(I))
5046 return LI->getPointerAddressSpace();
5047 return cast<StoreInst>(I)->getPointerAddressSpace();
5048}
5049
5050/// A helper function that returns the type of a load or store instruction.
5051inline Type *getLoadStoreType(const Value *I) {
5052 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5053 "Expected Load or Store instruction");
5054 if (auto *LI = dyn_cast<LoadInst>(I))
5055 return LI->getType();
5056 return cast<StoreInst>(I)->getValueOperand()->getType();
5057}
5058
5059/// A helper function that returns an atomic operation's sync scope; returns
5060/// std::nullopt if it is not an atomic operation.
5061inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5062 if (!I->isAtomic())
5063 return std::nullopt;
5064 if (auto *AI = dyn_cast<LoadInst>(I))
5065 return AI->getSyncScopeID();
5066 if (auto *AI = dyn_cast<StoreInst>(I))
5067 return AI->getSyncScopeID();
5068 if (auto *AI = dyn_cast<FenceInst>(I))
5069 return AI->getSyncScopeID();
5070 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5071 return AI->getSyncScopeID();
5072 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5073 return AI->getSyncScopeID();
5074 llvm_unreachable("unhandled atomic operation");
5075}
5076
5077/// A helper function that sets an atomic operation's sync scope.
5079 assert(I->isAtomic());
5080 if (auto *AI = dyn_cast<LoadInst>(I))
5081 AI->setSyncScopeID(SSID);
5082 else if (auto *AI = dyn_cast<StoreInst>(I))
5083 AI->setSyncScopeID(SSID);
5084 else if (auto *AI = dyn_cast<FenceInst>(I))
5085 AI->setSyncScopeID(SSID);
5086 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5087 AI->setSyncScopeID(SSID);
5088 else if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5089 AI->setSyncScopeID(SSID);
5090 else
5091 llvm_unreachable("unhandled atomic operation");
5092}
5093
5094//===----------------------------------------------------------------------===//
5095// FreezeInst Class
5096//===----------------------------------------------------------------------===//
5097
5098/// This class represents a freeze function that returns random concrete
5099/// value if an operand is either a poison value or an undef value
5101protected:
5102 // Note: Instruction needs to be a friend here to call cloneImpl.
5103 friend class Instruction;
5104
5105 /// Clone an identical FreezeInst
5106 FreezeInst *cloneImpl() const;
5107
5108public:
5109 explicit FreezeInst(Value *S, const Twine &NameStr = "",
5110 InsertPosition InsertBefore = nullptr);
5111
5112 // Methods for support type inquiry through isa, cast, and dyn_cast:
5113 static inline bool classof(const Instruction *I) {
5114 return I->getOpcode() == Freeze;
5115 }
5116 static inline bool classof(const Value *V) {
5117 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5118 }
5119};
5120
5121} // end namespace llvm
5122
5123#endif // LLVM_IR_INSTRUCTIONS_H
static const LLT S1
static bool isReverseMask(ArrayRef< int > M, EVT VT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Atomic ordering constants.
static const Function * getParent(const Value *V)
This file implements methods to test, set and extract typed bits from packed unsigned integers.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
RelocType Type
Definition: COFFYAML.cpp:410
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Align
std::string Name
uint32_t Index
uint64_t Size
Hexagon Common GEP
hexagon gen pred
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
#define P(N)
PowerPC Reduce CR logical Operation
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents a conversion between pointers from one address space to another.
const Value * getPointerOperand() const
Gets the pointer operand.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
Value * getPointerOperand()
Gets the pointer operand.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
unsigned getSrcAddressSpace() const
Returns the address space of the pointer operand.
unsigned getDestAddressSpace() const
Returns the address space of the result.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
static bool classof(const Value *V)
Definition: Instructions.h:157
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:151
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
void setAllocatedType(Type *Ty)
for use only in special circumstances that need to generically transform a whole instruction (eg: IR ...
Definition: Instructions.h:120
static bool classof(const Instruction *I)
Definition: Instructions.h:154
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
void setUsedWithInAlloca(bool V)
Specify whether this alloca is used to represent the arguments to a call.
Definition: Instructions.h:144
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:139
Value * getArraySize()
Definition: Instructions.h:96
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:104
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:128
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:529
const Value * getCompareOperand() const
Definition: Instructions.h:634
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
Definition: Instructions.h:607
void setWeak(bool IsWeak)
Definition: Instructions.h:564
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:555
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:640
BoolBitfieldElementT< VolatileField::NextBit > WeakField
Definition: Instructions.h:530
AtomicOrderingBitfieldElementT< SuccessOrderingField::NextBit > FailureOrderingField
Definition: Instructions.h:534
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
static bool isValidFailureOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:574
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:594
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
AlignmentBitfieldElementT< FailureOrderingField::NextBit > AlignmentField
Definition: Instructions.h:536
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Definition: Instructions.h:652
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:544
const Value * getPointerOperand() const
Definition: Instructions.h:630
static bool classof(const Value *V)
Definition: Instructions.h:671
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:562
void setAlignment(Align Align)
Definition: Instructions.h:548
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
Definition: Instructions.h:559
static bool isValidSuccessOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:569
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
AtomicOrderingBitfieldElementT< WeakField::NextBit > SuccessOrderingField
Definition: Instructions.h:532
static unsigned getPointerOperandIndex()
Definition: Instructions.h:631
const Value * getNewValOperand() const
Definition: Instructions.h:637
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:620
static bool classof(const Instruction *I)
Definition: Instructions.h:668
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:827
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:809
static unsigned getPointerOperandIndex()
Definition: Instructions.h:872
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:837
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:841
BinOpBitfieldElement< AtomicOrderingField::NextBit > OperationField
Definition: Instructions.h:799
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
@ Add
*p = old + v
Definition: Instructions.h:720
@ FAdd
*p = old + v
Definition: Instructions.h:741
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:764
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:734
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:768
@ FSub
*p = old - v
Definition: Instructions.h:744
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:756
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:732
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:738
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:752
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:736
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:748
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:760
@ Nand
*p = ~(old & v)
Definition: Instructions.h:726
AtomicOrderingBitfieldElementT< VolatileField::NextBit > AtomicOrderingField
Definition: Instructions.h:798
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:866
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
Value * getPointerOperand()
Definition: Instructions.h:870
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:852
bool isFloatingPointOperation() const
Definition: Instructions.h:882
static bool classof(const Instruction *I)
Definition: Instructions.h:887
const Value * getPointerOperand() const
Definition: Instructions.h:871
void setOperation(BinOp Operation)
Definition: Instructions.h:821
static bool classof(const Value *V)
Definition: Instructions.h:890
BinOp getOperation() const
Definition: Instructions.h:805
const Value * getValOperand() const
Definition: Instructions.h:875
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:861
void setAlignment(Align Align)
Definition: Instructions.h:831
Value * getValOperand()
Definition: Instructions.h:874
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
AlignmentBitfieldElementT< OperationField::NextBit > AlignmentField
Definition: Instructions.h:800
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:796
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:878
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
This class represents a no-op cast from one type to another.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
Conditional or Unconditional Branch instruction.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
iterator_range< succ_op_iterator > successors()
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
static bool classof(const Instruction *I)
bool isConditional() const
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
Value * getCondition() const
iterator_range< const_succ_op_iterator > successors() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1474
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1451
FunctionType * FTy
Definition: InstrTypes.h:1127
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2299
unsigned arg_size() const
Definition: InstrTypes.h:1284
unsigned getNumTotalBundleOperands() const
Return the total number operands (not operand bundles) used by every operand bundle in this OperandBu...
Definition: InstrTypes.h:2027
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static bool classof(const Value *V)
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i) const
Value * getIndirectDestLabelUse(unsigned i) const
BasicBlock * getIndirectDest(unsigned i) const
void setDefaultDest(BasicBlock *B)
unsigned getNumSuccessors() const
void setIndirectDest(unsigned i, BasicBlock *B)
Value * getIndirectDestLabel(unsigned i) const
getIndirectDestLabel - Return the i-th indirect dest label.
BasicBlock * getDefaultDest() const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static bool classof(const Value *V)
bool isTailCall() const
void setCanReturnTwice()
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
bool canReturnTwice() const
Return true if the call can return twice.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
bool isMustTailCall() const
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
bool isNonContinuableTrap() const
Return true if the call is for a noreturn trap intrinsic.
static CallInst * Create(FunctionCallee Func, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
CatchSwitchInst * getCatchSwitch() const
Convenience accessors.
void setCatchSwitch(Value *CatchSwitch)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CatchPadInst * Create(Value *CatchSwitch, ArrayRef< Value * > Args, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Value *V)
static bool classof(const Instruction *I)
BasicBlock * getSuccessor() const
CatchPadInst * getCatchPad() const
Convenience accessors.
void setSuccessor(BasicBlock *NewSucc)
static bool classof(const Value *V)
static CatchReturnInst * Create(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
void setCatchPad(CatchPadInst *CatchPad)
CatchReturnInst * cloneImpl() const
Value * getCatchSwitchParentPad() const
Get the parentPad of this catchret's catchpad's catchswitch.
void setUnwindDest(BasicBlock *UnwindDest)
static bool classof(const Instruction *I)
BasicBlock *(*)(Value *) DerefFnTy
const BasicBlock *(*)(const Value *) ConstDerefFnTy
unsigned getNumSuccessors() const
const_handler_iterator handler_begin() const
Returns an iterator that points to the first handler in the CatchSwitchInst.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
void setSuccessor(unsigned Idx, BasicBlock *NewSucc)
Value * getParentPad() const
void setParentPad(Value *ParentPad)
bool unwindsToCaller() const
static bool classof(const Value *V)
handler_iterator handler_end()
Returns a read-only iterator that points one past the last handler in the CatchSwitchInst.
BasicBlock * getUnwindDest() const
BasicBlock * getSuccessor(unsigned Idx) const
const_handler_iterator handler_end() const
Returns an iterator that points one past the last handler in the CatchSwitchInst.
bool hasUnwindDest() const
handler_iterator handler_begin()
Returns an iterator that points to the first handler in CatchSwitchInst.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
handler_range handlers()
iteration adapter for range-for loops.
const_handler_range handlers() const
iteration adapter for range-for loops.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
CleanupPadInst * getCleanupPad() const
Convenience accessor.
unsigned getNumSuccessors() const
BasicBlock * getUnwindDest() const
bool unwindsToCaller() const
void setCleanupPad(CleanupPadInst *CleanupPad)
static bool classof(const Value *V)
void setUnwindDest(BasicBlock *NewDest)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
bool hasUnwindDest() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:980
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:766
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:681
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:684
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:682
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:675
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
static auto FCmpPredicates()
Returns the sequence of all FCmp predicates.
Definition: InstrTypes.h:712
bool isFPPredicate() const
Definition: InstrTypes.h:780
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:42
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This instruction extracts a single (scalar) element from a VectorType value.
const Value * getVectorOperand() const
ExtractElementInst * cloneImpl() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool classof(const Value *V)
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getIndexOperand() const
static bool classof(const Instruction *I)
VectorType * getVectorOperandType() const
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getAggregateOperand() const
static unsigned getAggregateOperandIndex()
idx_iterator idx_begin() const
This instruction compares its operands according to the predicate given to the constructor.
bool isRelational() const
FCmpInst(Predicate Pred, Value *LHS, Value *RHS, const Twine &NameStr="", Instruction *FlagsSource=nullptr)
Constructor with no-insertion semantics.
bool isEquality() const
static bool classof(const Value *V)
bool isCommutative() const
static bool isCommutative(Predicate Pred)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isEquality(Predicate Pred)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static auto predicates()
Returns the sequence of all FCmp predicates.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
static bool classof(const Value *V)
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to signed integer.
static bool classof(const Value *V)
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to unsigned integer.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
static bool classof(const Value *V)
Definition: Instructions.h:473
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:460
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
static bool classof(const Instruction *I)
Definition: Instructions.h:470
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:455
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
This class represents a freeze function that returns random concrete value if an operand is either a ...
static bool classof(const Value *V)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
static bool classof(const Instruction *I)
friend class CatchPadInst
Definition: InstrTypes.h:2357
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
Class to represent function types.
Definition: DerivedTypes.h:105
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setResultElementType(Type *Ty)
Definition: Instructions.h:993
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
iterator_range< const_op_iterator > indices() const
Type * getResultElementType() const
Definition: Instructions.h:995
static bool classof(const Instruction *I)
static bool classof(const Value *V)
iterator_range< op_iterator > indices()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
void setSourceElementType(Type *Ty)
Definition: Instructions.h:992
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
Definition: Instructions.h:990
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, GEPNoWrapFlags NW, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:967
static unsigned getPointerOperandIndex()
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
const_op_iterator idx_begin() const
GetElementPtrInst * cloneImpl() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
unsigned getNumIndices() const
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
const_op_iterator idx_end() const
const Value * getPointerOperand() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This instruction compares its operands according to the predicate given to the constructor.
bool hasSameSign() const
An icmp instruction, which can be marked as "samesign", indicating that the two operands have the sam...
static bool classof(const Value *V)
void setSameSign(bool B=true)
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static bool isCommutative(Predicate P)
static CmpPredicate getSwappedCmpPredicate(CmpPredicate Pred)
CmpPredicate getCmpPredicate() const
bool isCommutative() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
static bool classof(const Instruction *I)
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getNonStrictCmpPredicate(CmpPredicate Pred)
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
static auto predicates()
Returns the sequence of all ICmp predicates.
ICmpInst(Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with no-insertion semantics.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Indirect Branch Instruction.
static IndirectBrInst * Create(Value *Address, unsigned NumDests, InsertPosition InsertBefore=nullptr)
BasicBlock * getDestination(unsigned i)
Return the specified destination.
static bool classof(const Value *V)
const Value * getAddress() const
static bool classof(const Instruction *I)
BasicBlock * getSuccessor(unsigned i) const
iterator_range< const_succ_op_iterator > successors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
const BasicBlock * getDestination(unsigned i) const
void setSuccessor(unsigned i, BasicBlock *NewSucc)
void setAddress(Value *V)
unsigned getNumSuccessors() const
iterator_range< succ_op_iterator > successors()
This instruction inserts a single (scalar) element into a VectorType value.
static bool classof(const Value *V)
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
VectorType * getType() const
Overload to return most specific vector type.
static bool classof(const Instruction *I)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
static bool classof(const Instruction *I)
static unsigned getAggregateOperandIndex()
Value * getAggregateOperand()
static bool classof(const Value *V)
unsigned getNumIndices() const
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
static unsigned getInsertedValueOperandIndex()
InsertValueInst * cloneImpl() const
idx_iterator idx_end() const
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
const Value * getAggregateOperand() const
bool hasIndices() const
const Value * getInsertedValueOperand() const
idx_iterator idx_begin() const
typename Bitfield::Element< AtomicOrdering, Offset, 3, AtomicOrdering::LAST > AtomicOrderingBitfieldElementT
Definition: Instruction.h:153
typename Bitfield::Element< bool, Offset, 1 > BoolBitfieldElementT
Definition: Instruction.h:148
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
typename Bitfield::Element< unsigned, Offset, 6, Value::MaxAlignmentExponent > AlignmentBitfieldElementT
Definition: Instruction.h:145
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:310
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
friend class BasicBlock
Various leaf nodes.
Definition: Instruction.h:1046
This class represents a cast from an integer to a pointer.
static bool classof(const Instruction *I)
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static bool classof(const Value *V)
Invoke instruction.
static bool classof(const Instruction *I)
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
static bool classof(const Value *V)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
void setUnwindDest(BasicBlock *B)
BasicBlock * getNormalDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static bool classof(const Value *V)
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void reserveClauses(unsigned Size)
Grow the size of the operand list to accommodate the new number of clauses.
static bool classof(const Instruction *I)
An instruction for reading from memory.
Definition: Instructions.h:176
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:261
const Value * getPointerOperand() const
Definition: Instructions.h:256
void setAlignment(Align Align)
Definition: Instructions.h:215
Value * getPointerOperand()
Definition: Instructions.h:255
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
static bool classof(const Instruction *I)
Definition: Instructions.h:266
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:225
static bool classof(const Value *V)
Definition: Instructions.h:269
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this load instruction.
Definition: Instructions.h:235
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:241
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
Type * getPointerOperandType() const
Definition: Instructions.h:258
static unsigned getPointerOperandIndex()
Definition: Instructions.h:257
bool isUnordered() const
Definition: Instructions.h:249
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:208
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
bool isSimple() const
Definition: Instructions.h:247
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Metadata node.
Definition: Metadata.h:1073
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
BasicBlock * getIncomingBlock(Value::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static bool classof(const Value *V)
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock *const * const_block_iterator
void setIncomingValue(unsigned i, Value *V)
static unsigned getOperandNumForIncomingValue(unsigned i)
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static unsigned getIncomingValueNumForOperand(unsigned i)
const_op_range incoming_values() const
Value * removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true)
void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New)
Replace every incoming basic block Old to basic block New.
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Class to represent pointers.
Definition: DerivedTypes.h:679
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:712
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static bool classof(const Value *V)
const Value * getPointerOperand() const
Gets the pointer operand.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
static bool classof(const Instruction *I)
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
static ResumeInst * Create(Value *Exn, InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getValue() const
Convenience accessor.
static bool classof(const Value *V)
unsigned getNumSuccessors() const
ResumeInst * cloneImpl() const
static bool classof(const Instruction *I)
Return a value (possibly void), from a function.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
static ReturnInst * Create(LLVMContext &C, BasicBlock *InsertAtEnd)
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a sign extension of integer types.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void setFalseValue(Value *V)
const Value * getFalseValue() const
void setTrueValue(Value *V)
OtherOps getOpcode() const
Value * getCondition()
Value * getTrueValue()
void swapValues()
Swap the true and false values of the select instruction.
Value * getFalseValue()
const Value * getCondition() const
SelectInst * cloneImpl() const
friend class Instruction
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static bool classof(const Value *V)
void setCondition(Value *V)
const Value * getTrueValue() const
static bool classof(const Instruction *I)
This instruction constructs a fixed permutation of two input vectors.
static bool classof(const Value *V)
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts)
Constant * getShuffleMaskForBitcode() const
Return the mask for this instruction, for use in bitcode.
bool isSingleSource() const
Return true if this shuffle chooses elements from exactly one source vector without changing the leng...
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
bool isExtractSubvectorMask(int &Index) const
Return true if this shuffle mask is an extract subvector mask.
ArrayRef< int > getShuffleMask() const
static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, int &NumSubElts, int &Index)
static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts)
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
void getShuffleMask(SmallVectorImpl< int > &Result) const
Return the mask for this instruction as a vector of integers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor)
VectorType * getType() const
Overload to return most specific vector type.
bool isInsertSubvectorMask(int &NumSubElts, int &Index) const
Return true if this shuffle mask is an insert subvector mask.
bool increasesLength() const
Return true if this shuffle returns a vector with a greater number of elements than its source vector...
bool isZeroEltSplat() const
Return true if all elements of this shuffle are the same value as the first element of exactly one so...
static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isTranspose() const
Return true if this shuffle transposes the elements of its inputs without changing the length of the ...
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
bool isSplice(int &Index) const
Return true if this shuffle splices two inputs without changing the length of the vectors.
static bool isReverseMask(const Constant *Mask, int NumSrcElts)
static bool isSelectMask(const Constant *Mask, int NumSrcElts)
static bool classof(const Instruction *I)
static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts)
bool isIdentity() const
Return true if this shuffle chooses elements from exactly one source vector without lane crossings an...
static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, int &VF)
static bool isIdentityMask(const Constant *Mask, int NumSrcElts)
static bool isTransposeMask(const Constant *Mask, int NumSrcElts)
bool isReverse() const
Return true if this shuffle swaps the order of elements from exactly one source vector.
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
static bool classof(const Instruction *I)
Definition: Instructions.h:392
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:342
const Value * getPointerOperand() const
Definition: Instructions.h:382
Align getAlign() const
Definition: Instructions.h:333
Type * getPointerOperandType() const
Definition: Instructions.h:384
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:328
void setAlignment(Align Align)
Definition: Instructions.h:337
bool isSimple() const
Definition: Instructions.h:370
const Value * getValueOperand() const
Definition: Instructions.h:379
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:348
Value * getValueOperand()
Definition: Instructions.h:378
static bool classof(const Value *V)
Definition: Instructions.h:395
bool isUnordered() const
Definition: Instructions.h:372
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this store instruction.
Definition: Instructions.h:358
StoreInst * cloneImpl() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:387
static unsigned getPointerOperandIndex()
Definition: Instructions.h:383
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:353
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:325
Value * getPointerOperand()
Definition: Instructions.h:381
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:364
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
SwitchInstProfUpdateWrapper(SwitchInst &SI)
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
A handle to a particular switch case.
unsigned getCaseIndex() const
Returns number of current case.
unsigned getSuccessorIndex() const
Returns successor index for current case successor.
BasicBlockT * getCaseSuccessor() const
Resolves successor for current case.
CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index)
bool operator==(const CaseHandleImpl &RHS) const
ConstantIntT * getCaseValue() const
Resolves case value for current case.
CaseHandle(SwitchInst *SI, ptrdiff_t Index)
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
const CaseHandleT & operator*() const
CaseIteratorImpl()=default
Default constructed iterator is in an invalid state until assigned to a case for a particular switch.
CaseIteratorImpl & operator-=(ptrdiff_t N)
bool operator==(const CaseIteratorImpl &RHS) const
CaseIteratorImpl & operator+=(ptrdiff_t N)
ptrdiff_t operator-(const CaseIteratorImpl &RHS) const
bool operator<(const CaseIteratorImpl &RHS) const
CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum)
Initializes case iterator for given SwitchInst and for given case number.
static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, unsigned SuccessorIndex)
Initializes case iterator for given SwitchInst and for given successor index.
Multiway switch.
BasicBlock * getDefaultDest() const
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
BasicBlock * getSuccessor(unsigned idx) const
ConstCaseIt findCaseValue(const ConstantInt *C) const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static SwitchInst * Create(Value *Value, BasicBlock *Default, unsigned NumCases, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
ConstCaseIt case_begin() const
Returns a read-only iterator that points to the first case in the SwitchInst.
bool defaultDestUndefined() const
Returns true if the default branch must result in immediate undefined behavior, false otherwise.
iterator_range< ConstCaseIt > cases() const
Constant iteration adapter for range-for loops.
ConstantInt * findCaseDest(BasicBlock *BB)
Finds the unique case value for a given successor.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
static bool classof(const Value *V)
unsigned getNumSuccessors() const
CaseIt case_default()
Returns an iterator that points to the default case.
void setDefaultDest(BasicBlock *DefaultCase)
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt findCaseValue(const ConstantInt *C)
Search all of the case values for the specified constant.
Value * getCondition() const
ConstCaseIt case_default() const
CaseIt case_begin()
Returns a read/write iterator that points to the first case in the SwitchInst.
static bool classof(const Instruction *I)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
ConstCaseIt case_end() const
Returns a read-only iterator that points one past the last in the SwitchInst.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
TruncInst * cloneImpl() const
Clone an identical TruncInst.
void setHasNoUnsignedWrap(bool B)
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
static bool classof(const Value *V)
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
This class represents a cast unsigned integer to floating point.
static bool classof(const Value *V)
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This function has undefined behavior.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:280
const Use & getOperandUse(unsigned i) const
Definition: User.h:241
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static bool classof(const Instruction *I)
Value * getPointerOperand()
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getPointerOperand() const
static bool classof(const Value *V)
static unsigned getPointerOperandIndex()
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:391
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
CRTP base class for adapting an iterator to a different type.
Definition: iterator.h:237
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:80
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Type * checkGEPType(Type *Ty)
Definition: Instructions.h:925
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2204
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setAtomicSyncScopeID(Instruction *I, SyncScope::ID SSID)
A helper function that sets an atomic operation's sync scope.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1841
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
void setLoadStoreAlignment(Value *I, Align NewAlign)
A helper function that set the alignment of load or store instruction.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
static constexpr bool areContiguous()
Definition: Bitfields.h:280
The const version of succ_op_iterator.
const BasicBlock * operator->() const
const_succ_op_iterator(const_value_op_iterator I)
const BasicBlock * operator*() const
Iterator type that casts an operand to a basic block.
succ_op_iterator(value_op_iterator I)
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:93
The const version of succ_op_iterator.
const_succ_op_iterator(const_value_op_iterator I)
Iterator type that casts an operand to a basic block.
Compile-time customization of User operands.
Definition: User.h:42
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Information about how a User object was allocated, to be passed into the User constructor.
Definition: User.h:79
Indicates this User has operands "hung off" in another allocation.
Definition: User.h:57
Indicates this User has operands co-allocated.
Definition: User.h:60
Iterator for directly iterating over the operand Values.
Definition: User.h:299
VariadicOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:67