LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140
141 // Move everything after this operand down.
142 //
143 // FIXME: we could just swap with the end of the list, then erase. However,
144 // clients might not expect this to happen. The code as it is thrashes the
145 // use/def lists, which is kinda lame.
146 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
147 copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
148
149 // Nuke the last value.
150 Op<-1>().set(nullptr);
152
153 // If the PHI node is dead, because it has zero entries, nuke it now.
154 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
155 // If anyone is using this PHI, make them use a dummy value instead...
158 }
159 return Removed;
160}
161
162void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
163 bool DeletePHIIfEmpty) {
164 SmallDenseSet<unsigned> RemoveIndices;
165 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
166 if (Predicate(Idx))
167 RemoveIndices.insert(Idx);
168
169 if (RemoveIndices.empty())
170 return;
171
172 // Remove operands.
173 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
174 return RemoveIndices.contains(U.getOperandNo());
175 });
176 for (Use &U : make_range(NewOpEnd, op_end()))
177 U.set(nullptr);
178
179 // Remove incoming blocks.
180 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
181 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
182 return RemoveIndices.contains(&BB - block_begin());
183 });
184
185 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
186
187 // If the PHI node is dead, because it has zero entries, nuke it now.
188 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
189 // If anyone is using this PHI, make them use a dummy value instead...
192 }
193}
194
195/// growOperands - grow operands - This grows the operand list in response
196/// to a push_back style of operation. This grows the number of ops by 1.5
197/// times.
198///
199void PHINode::growOperands() {
200 unsigned e = getNumOperands();
201 unsigned NumOps = e + e / 2;
202 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
203
204 ReservedSpace = NumOps;
205 growHungoffUses(ReservedSpace, /* IsPhi */ true);
206}
207
208/// hasConstantValue - If the specified PHI node always merges together the same
209/// value, return the value, otherwise return null.
211 // Exploit the fact that phi nodes always have at least one entry.
212 Value *ConstantValue = getIncomingValue(0);
213 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
214 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
215 if (ConstantValue != this)
216 return nullptr; // Incoming values not all the same.
217 // The case where the first value is this PHI.
218 ConstantValue = getIncomingValue(i);
219 }
220 if (ConstantValue == this)
221 return PoisonValue::get(getType());
222 return ConstantValue;
223}
224
225/// hasConstantOrUndefValue - Whether the specified PHI node always merges
226/// together the same value, assuming that undefs result in the same value as
227/// non-undefs.
228/// Unlike \ref hasConstantValue, this does not return a value because the
229/// unique non-undef incoming value need not dominate the PHI node.
231 Value *ConstantValue = nullptr;
232 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
234 if (Incoming != this && !isa<UndefValue>(Incoming)) {
235 if (ConstantValue && ConstantValue != Incoming)
236 return false;
237 ConstantValue = Incoming;
238 }
239 }
240 return true;
241}
242
243//===----------------------------------------------------------------------===//
244// LandingPadInst Implementation
245//===----------------------------------------------------------------------===//
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr,
249 InsertPosition InsertBefore)
250 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
251 init(NumReservedValues, NameStr);
252}
253
254LandingPadInst::LandingPadInst(const LandingPadInst &LP)
255 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
256 ReservedSpace(LP.getNumOperands()) {
259 Use *OL = getOperandList();
260 const Use *InOL = LP.getOperandList();
261 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
262 OL[I] = InOL[I];
263
264 setCleanup(LP.isCleanup());
265}
266
267LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
268 const Twine &NameStr,
269 InsertPosition InsertBefore) {
270 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
271}
272
273void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
274 ReservedSpace = NumReservedValues;
276 allocHungoffUses(ReservedSpace);
277 setName(NameStr);
278 setCleanup(false);
279}
280
281/// growOperands - grow operands - This grows the operand list in response to a
282/// push_back style of operation. This grows the number of ops by 2 times.
283void LandingPadInst::growOperands(unsigned Size) {
284 unsigned e = getNumOperands();
285 if (ReservedSpace >= e + Size) return;
286 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
287 growHungoffUses(ReservedSpace);
288}
289
291 unsigned OpNo = getNumOperands();
292 growOperands(1);
293 assert(OpNo < ReservedSpace && "Growing didn't work!");
295 getOperandList()[OpNo] = Val;
296}
297
298//===----------------------------------------------------------------------===//
299// CallBase Implementation
300//===----------------------------------------------------------------------===//
301
303 InsertPosition InsertPt) {
304 switch (CB->getOpcode()) {
305 case Instruction::Call:
306 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
307 case Instruction::Invoke:
308 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
309 case Instruction::CallBr:
310 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
311 default:
312 llvm_unreachable("Unknown CallBase sub-class!");
313 }
314}
315
317 InsertPosition InsertPt) {
319 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
320 auto ChildOB = CI->getOperandBundleAt(i);
321 if (ChildOB.getTagName() != OpB.getTag())
322 OpDefs.emplace_back(ChildOB);
323 }
324 OpDefs.emplace_back(OpB);
325 return CallBase::Create(CI, OpDefs, InsertPt);
326}
327
329
331 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
332 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
333}
334
336 const Value *V = getCalledOperand();
337 if (isa<Function>(V) || isa<Constant>(V))
338 return false;
339 return !isInlineAsm();
340}
341
342/// Tests if this call site must be tail call optimized. Only a CallInst can
343/// be tail call optimized.
345 if (auto *CI = dyn_cast<CallInst>(this))
346 return CI->isMustTailCall();
347 return false;
348}
349
350/// Tests if this call site is marked as a tail call.
352 if (auto *CI = dyn_cast<CallInst>(this))
353 return CI->isTailCall();
354 return false;
355}
356
359 return F->getIntrinsicID();
361}
362
364 FPClassTest Mask = Attrs.getRetNoFPClass();
365
366 if (const Function *F = getCalledFunction())
367 Mask |= F->getAttributes().getRetNoFPClass();
368 return Mask;
369}
370
372 FPClassTest Mask = Attrs.getParamNoFPClass(i);
373
374 if (const Function *F = getCalledFunction())
375 Mask |= F->getAttributes().getParamNoFPClass(i);
376 return Mask;
377}
378
379std::optional<ConstantRange> CallBase::getRange() const {
380 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
382 if (const Function *F = getCalledFunction())
383 FnAttr = F->getRetAttribute(Attribute::Range);
384
385 if (CallAttr.isValid() && FnAttr.isValid())
386 return CallAttr.getRange().intersectWith(FnAttr.getRange());
387 if (CallAttr.isValid())
388 return CallAttr.getRange();
389 if (FnAttr.isValid())
390 return FnAttr.getRange();
391 return std::nullopt;
392}
393
395 if (hasRetAttr(Attribute::NonNull))
396 return true;
397
398 if (getRetDereferenceableBytes() > 0 &&
400 return true;
401
402 return false;
403}
404
406 unsigned Index;
407
408 if (Attrs.hasAttrSomewhere(Kind, &Index))
409 return getArgOperand(Index - AttributeList::FirstArgIndex);
410 if (const Function *F = getCalledFunction())
411 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
412 return getArgOperand(Index - AttributeList::FirstArgIndex);
413
414 return nullptr;
415}
416
417/// Determine whether the argument or parameter has the given attribute.
418bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
419 assert(ArgNo < arg_size() && "Param index out of bounds!");
420
421 if (Attrs.hasParamAttr(ArgNo, Kind))
422 return true;
423
424 const Function *F = getCalledFunction();
425 if (!F)
426 return false;
427
428 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
429 return false;
430
431 // Take into account mod/ref by operand bundles.
432 switch (Kind) {
433 case Attribute::ReadNone:
435 case Attribute::ReadOnly:
437 case Attribute::WriteOnly:
438 return !hasReadingOperandBundles();
439 default:
440 return true;
441 }
442}
443
445 bool AllowUndefOrPoison) const {
447 "Argument must be a pointer");
448 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
449 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
450 return true;
451
452 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
454 getCaller(),
456 return true;
457
458 return false;
459}
460
461bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
470 return F->getAttributes().hasFnAttr(Kind);
471
472 return false;
473}
474
475template <typename AK>
476Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
477 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
478 // getMemoryEffects() correctly combines memory effects from the call-site,
479 // operand bundles and function.
480 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
481 }
482
484 return F->getAttributes().getFnAttr(Kind);
485
486 return Attribute();
487}
488
489template LLVM_ABI Attribute
490CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
491template LLVM_ABI Attribute
492CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
493
494template <typename AK>
495Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496 AK Kind) const {
498
499 if (auto *F = dyn_cast<Function>(V))
500 return F->getAttributes().getParamAttr(ArgNo, Kind);
501
502 return Attribute();
503}
504template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
505 unsigned ArgNo, Attribute::AttrKind Kind) const;
506template LLVM_ABI Attribute
507CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
508
511 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
513}
514
517 const unsigned BeginIndex) {
518 auto It = op_begin() + BeginIndex;
519 for (auto &B : Bundles)
520 It = std::copy(B.input_begin(), B.input_end(), It);
521
522 auto *ContextImpl = getContext().pImpl;
523 auto BI = Bundles.begin();
524 unsigned CurrentIndex = BeginIndex;
525
526 for (auto &BOI : bundle_op_infos()) {
527 assert(BI != Bundles.end() && "Incorrect allocation?");
528
529 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
530 BOI.Begin = CurrentIndex;
531 BOI.End = CurrentIndex + BI->input_size();
532 CurrentIndex = BOI.End;
533 BI++;
534 }
535
536 assert(BI == Bundles.end() && "Incorrect allocation?");
537
538 return It;
539}
540
542 /// When there isn't many bundles, we do a simple linear search.
543 /// Else fallback to a binary-search that use the fact that bundles usually
544 /// have similar number of argument to get faster convergence.
546 for (auto &BOI : bundle_op_infos())
547 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
548 return BOI;
549
550 llvm_unreachable("Did not find operand bundle for operand!");
551 }
552
553 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
555 OpIdx < std::prev(bundle_op_info_end())->End &&
556 "The Idx isn't in the operand bundle");
557
558 /// We need a decimal number below and to prevent using floating point numbers
559 /// we use an intergal value multiplied by this constant.
560 constexpr unsigned NumberScaling = 1024;
561
564 bundle_op_iterator Current = Begin;
565
566 while (Begin != End) {
567 unsigned ScaledOperandPerBundle =
568 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
569 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
570 ScaledOperandPerBundle);
571 if (Current >= End)
572 Current = std::prev(End);
573 assert(Current < End && Current >= Begin &&
574 "the operand bundle doesn't cover every value in the range");
575 if (OpIdx >= Current->Begin && OpIdx < Current->End)
576 break;
577 if (OpIdx >= Current->End)
578 Begin = Current + 1;
579 else
580 End = Current;
581 }
582
583 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
584 "the operand bundle doesn't cover every value in the range");
585 return *Current;
586}
587
590 InsertPosition InsertPt) {
591 if (CB->getOperandBundle(ID))
592 return CB;
593
595 CB->getOperandBundlesAsDefs(Bundles);
596 Bundles.push_back(OB);
597 return Create(CB, Bundles, InsertPt);
598}
599
601 InsertPosition InsertPt) {
603 bool CreateNew = false;
604
605 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
606 auto Bundle = CB->getOperandBundleAt(I);
607 if (Bundle.getTagID() == ID) {
608 CreateNew = true;
609 continue;
610 }
611 Bundles.emplace_back(Bundle);
612 }
613
614 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
615}
616
618 // Implementation note: this is a conservative implementation of operand
619 // bundle semantics, where *any* non-assume operand bundle (other than
620 // ptrauth) forces a callsite to be at least readonly.
624 getIntrinsicID() != Intrinsic::assume;
625}
626
634
636 MemoryEffects ME = getAttributes().getMemoryEffects();
637 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
638 MemoryEffects FnME = Fn->getMemoryEffects();
639 if (hasOperandBundles()) {
640 // TODO: Add a method to get memory effects for operand bundles instead.
642 FnME |= MemoryEffects::readOnly();
644 FnME |= MemoryEffects::writeOnly();
645 }
646 if (isVolatile()) {
647 // Volatile operations also access inaccessible memory.
649 }
650 ME &= FnME;
651 }
652 return ME;
653}
657
658/// Determine if the function does not access memory.
665
666/// Determine if the function does not access or only reads memory.
673
674/// Determine if the function does not access or only writes memory.
681
682/// Determine if the call can access memmory only using pointers based
683/// on its arguments.
690
691/// Determine if the function may only access memory that is
692/// inaccessible from the IR.
699
700/// Determine if the function may only access memory that is
701/// either inaccessible from the IR or pointed to by its arguments.
709
711 if (OpNo < arg_size()) {
712 // If the argument is passed byval, the callee does not have access to the
713 // original pointer and thus cannot capture it.
714 if (isByValArgument(OpNo))
715 return CaptureInfo::none();
716
718 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
719 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
720 return CI;
721 }
722
723 // Bundles on assumes are captures(none).
724 if (getIntrinsicID() == Intrinsic::assume)
725 return CaptureInfo::none();
726
727 // deopt operand bundles are captures(none)
728 auto &BOI = getBundleOpInfoForOperand(OpNo);
729 auto OBU = operandBundleFromBundleOpInfo(BOI);
730 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
731}
732
734 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
736 continue;
737
739 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
740 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
742 return true;
743 }
744 return false;
745}
746
747//===----------------------------------------------------------------------===//
748// CallInst Implementation
749//===----------------------------------------------------------------------===//
750
751void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
752 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
753 this->FTy = FTy;
754 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
755 "NumOperands not set up?");
756
757#ifndef NDEBUG
758 assert((Args.size() == FTy->getNumParams() ||
759 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
760 "Calling a function with bad signature!");
761
762 for (unsigned i = 0; i != Args.size(); ++i)
763 assert((i >= FTy->getNumParams() ||
764 FTy->getParamType(i) == Args[i]->getType()) &&
765 "Calling a function with a bad signature!");
766#endif
767
768 // Set operands in order of their index to match use-list-order
769 // prediction.
770 llvm::copy(Args, op_begin());
771 setCalledOperand(Func);
772
773 auto It = populateBundleOperandInfos(Bundles, Args.size());
774 (void)It;
775 assert(It + 1 == op_end() && "Should add up!");
776
777 setName(NameStr);
778}
779
780void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
781 this->FTy = FTy;
782 assert(getNumOperands() == 1 && "NumOperands not set up?");
783 setCalledOperand(Func);
784
785 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
786
787 setName(NameStr);
788}
789
790CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
791 AllocInfo AllocInfo, InsertPosition InsertBefore)
792 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
793 InsertBefore) {
794 init(Ty, Func, Name);
795}
796
797CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
798 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
800 "Wrong number of operands allocated");
801 setTailCallKind(CI.getTailCallKind());
803
804 std::copy(CI.op_begin(), CI.op_end(), op_begin());
805 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
808}
809
811 InsertPosition InsertPt) {
812 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
813
814 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
815 Args, OpB, CI->getName(), InsertPt);
816 NewCI->setTailCallKind(CI->getTailCallKind());
817 NewCI->setCallingConv(CI->getCallingConv());
818 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
819 NewCI->setAttributes(CI->getAttributes());
820 NewCI->setDebugLoc(CI->getDebugLoc());
821 return NewCI;
822}
823
824// Update profile weight for call instruction by scaling it using the ratio
825// of S/T. The meaning of "branch_weights" meta data for call instruction is
826// transfered to represent call count.
828 if (T == 0) {
829 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
830 "div by 0. Ignoring. Likely the function "
831 << getParent()->getParent()->getName()
832 << " has 0 entry count, and contains call instructions "
833 "with non-zero prof info.");
834 return;
835 }
836 scaleProfData(*this, S, T);
837}
838
839//===----------------------------------------------------------------------===//
840// InvokeInst Implementation
841//===----------------------------------------------------------------------===//
842
843void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
844 BasicBlock *IfException, ArrayRef<Value *> Args,
846 const Twine &NameStr) {
847 this->FTy = FTy;
848
850 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
851 "NumOperands not set up?");
852
853#ifndef NDEBUG
854 assert(((Args.size() == FTy->getNumParams()) ||
855 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
856 "Invoking a function with bad signature");
857
858 for (unsigned i = 0, e = Args.size(); i != e; i++)
859 assert((i >= FTy->getNumParams() ||
860 FTy->getParamType(i) == Args[i]->getType()) &&
861 "Invoking a function with a bad signature!");
862#endif
863
864 // Set operands in order of their index to match use-list-order
865 // prediction.
866 llvm::copy(Args, op_begin());
867 setNormalDest(IfNormal);
868 setUnwindDest(IfException);
870
871 auto It = populateBundleOperandInfos(Bundles, Args.size());
872 (void)It;
873 assert(It + 3 == op_end() && "Should add up!");
874
875 setName(NameStr);
876}
877
878InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
879 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
880 assert(getNumOperands() == II.getNumOperands() &&
881 "Wrong number of operands allocated");
882 setCallingConv(II.getCallingConv());
883 std::copy(II.op_begin(), II.op_end(), op_begin());
884 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
886 SubclassOptionalData = II.SubclassOptionalData;
887}
888
890 InsertPosition InsertPt) {
891 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
892
893 auto *NewII = InvokeInst::Create(
894 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
895 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
896 NewII->setCallingConv(II->getCallingConv());
897 NewII->SubclassOptionalData = II->SubclassOptionalData;
898 NewII->setAttributes(II->getAttributes());
899 NewII->setDebugLoc(II->getDebugLoc());
900 return NewII;
901}
902
904 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
905}
906
908 if (T == 0) {
909 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
910 "div by 0. Ignoring. Likely the function "
911 << getParent()->getParent()->getName()
912 << " has 0 entry count, and contains call instructions "
913 "with non-zero prof info.");
914 return;
915 }
916 scaleProfData(*this, S, T);
917}
918
919//===----------------------------------------------------------------------===//
920// CallBrInst Implementation
921//===----------------------------------------------------------------------===//
922
923void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
924 ArrayRef<BasicBlock *> IndirectDests,
927 const Twine &NameStr) {
928 this->FTy = FTy;
929
930 assert(getNumOperands() == ComputeNumOperands(Args.size(),
931 IndirectDests.size(),
932 CountBundleInputs(Bundles)) &&
933 "NumOperands not set up?");
934
935#ifndef NDEBUG
936 assert(((Args.size() == FTy->getNumParams()) ||
937 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
938 "Calling a function with bad signature");
939
940 for (unsigned i = 0, e = Args.size(); i != e; i++)
941 assert((i >= FTy->getNumParams() ||
942 FTy->getParamType(i) == Args[i]->getType()) &&
943 "Calling a function with a bad signature!");
944#endif
945
946 // Set operands in order of their index to match use-list-order
947 // prediction.
948 llvm::copy(Args, op_begin());
949 NumIndirectDests = IndirectDests.size();
950 setDefaultDest(Fallthrough);
951 for (unsigned i = 0; i != NumIndirectDests; ++i)
952 setIndirectDest(i, IndirectDests[i]);
954
955 auto It = populateBundleOperandInfos(Bundles, Args.size());
956 (void)It;
957 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
958
959 setName(NameStr);
960}
961
962CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
963 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
964 AllocInfo) {
966 "Wrong number of operands allocated");
968 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
969 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
972 NumIndirectDests = CBI.NumIndirectDests;
973}
974
975CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
976 InsertPosition InsertPt) {
977 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
978
979 auto *NewCBI = CallBrInst::Create(
980 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
981 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
982 NewCBI->setCallingConv(CBI->getCallingConv());
983 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
984 NewCBI->setAttributes(CBI->getAttributes());
985 NewCBI->setDebugLoc(CBI->getDebugLoc());
986 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
987 return NewCBI;
988}
989
990//===----------------------------------------------------------------------===//
991// ReturnInst Implementation
992//===----------------------------------------------------------------------===//
993
994ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
995 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
996 AllocInfo) {
998 "Wrong number of operands allocated");
999 if (RI.getNumOperands())
1000 Op<0>() = RI.Op<0>();
1002}
1003
1004ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1005 InsertPosition InsertBefore)
1006 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1007 InsertBefore) {
1008 if (retVal)
1009 Op<0>() = retVal;
1010}
1011
1012//===----------------------------------------------------------------------===//
1013// ResumeInst Implementation
1014//===----------------------------------------------------------------------===//
1015
1016ResumeInst::ResumeInst(const ResumeInst &RI)
1017 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1018 AllocMarker) {
1019 Op<0>() = RI.Op<0>();
1020}
1021
1022ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1023 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1024 AllocMarker, InsertBefore) {
1025 Op<0>() = Exn;
1026}
1027
1028//===----------------------------------------------------------------------===//
1029// CleanupReturnInst Implementation
1030//===----------------------------------------------------------------------===//
1031
1032CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1034 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1036 "Wrong number of operands allocated");
1037 setSubclassData<Instruction::OpaqueField>(
1039 Op<0>() = CRI.Op<0>();
1040 if (CRI.hasUnwindDest())
1041 Op<1>() = CRI.Op<1>();
1042}
1043
1044void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1045 if (UnwindBB)
1046 setSubclassData<UnwindDestField>(true);
1047
1048 Op<0>() = CleanupPad;
1049 if (UnwindBB)
1050 Op<1>() = UnwindBB;
1051}
1052
1053CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1055 InsertPosition InsertBefore)
1056 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1057 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1058 init(CleanupPad, UnwindBB);
1059}
1060
1061//===----------------------------------------------------------------------===//
1062// CatchReturnInst Implementation
1063//===----------------------------------------------------------------------===//
1064void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1065 Op<0>() = CatchPad;
1066 Op<1>() = BB;
1067}
1068
1069CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1070 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1071 AllocMarker) {
1072 Op<0>() = CRI.Op<0>();
1073 Op<1>() = CRI.Op<1>();
1074}
1075
1076CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1077 InsertPosition InsertBefore)
1078 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1079 AllocMarker, InsertBefore) {
1080 init(CatchPad, BB);
1081}
1082
1083//===----------------------------------------------------------------------===//
1084// CatchSwitchInst Implementation
1085//===----------------------------------------------------------------------===//
1086
1087CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1088 unsigned NumReservedValues,
1089 const Twine &NameStr,
1090 InsertPosition InsertBefore)
1091 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1092 InsertBefore) {
1093 if (UnwindDest)
1094 ++NumReservedValues;
1095 init(ParentPad, UnwindDest, NumReservedValues + 1);
1096 setName(NameStr);
1097}
1098
1099CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1100 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1102 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1103 setNumHungOffUseOperands(ReservedSpace);
1104 Use *OL = getOperandList();
1105 const Use *InOL = CSI.getOperandList();
1106 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1107 OL[I] = InOL[I];
1108}
1109
1110void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1111 unsigned NumReservedValues) {
1112 assert(ParentPad && NumReservedValues);
1113
1114 ReservedSpace = NumReservedValues;
1115 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1116 allocHungoffUses(ReservedSpace);
1117
1118 Op<0>() = ParentPad;
1119 if (UnwindDest) {
1121 setUnwindDest(UnwindDest);
1122 }
1123}
1124
1125/// growOperands - grow operands - This grows the operand list in response to a
1126/// push_back style of operation. This grows the number of ops by 2 times.
1127void CatchSwitchInst::growOperands(unsigned Size) {
1128 unsigned NumOperands = getNumOperands();
1129 assert(NumOperands >= 1);
1130 if (ReservedSpace >= NumOperands + Size)
1131 return;
1132 ReservedSpace = (NumOperands + Size / 2) * 2;
1133 growHungoffUses(ReservedSpace);
1134}
1135
1137 unsigned OpNo = getNumOperands();
1138 growOperands(1);
1139 assert(OpNo < ReservedSpace && "Growing didn't work!");
1141 getOperandList()[OpNo] = Handler;
1142}
1143
1145 // Move all subsequent handlers up one.
1146 Use *EndDst = op_end() - 1;
1147 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1148 *CurDst = *(CurDst + 1);
1149 // Null out the last handler use.
1150 *EndDst = nullptr;
1151
1153}
1154
1155//===----------------------------------------------------------------------===//
1156// FuncletPadInst Implementation
1157//===----------------------------------------------------------------------===//
1158void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1159 const Twine &NameStr) {
1160 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1161 llvm::copy(Args, op_begin());
1162 setParentPad(ParentPad);
1163 setName(NameStr);
1164}
1165
1166FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1167 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1169 "Wrong number of operands allocated");
1170 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1172}
1173
1174FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1176 const Twine &NameStr,
1177 InsertPosition InsertBefore)
1178 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1179 init(ParentPad, Args, NameStr);
1180}
1181
1182//===----------------------------------------------------------------------===//
1183// UnreachableInst Implementation
1184//===----------------------------------------------------------------------===//
1185
1187 InsertPosition InsertBefore)
1188 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1189 AllocMarker, InsertBefore) {}
1190
1191//===----------------------------------------------------------------------===//
1192// BranchInst Implementation
1193//===----------------------------------------------------------------------===//
1194
1195void BranchInst::AssertOK() {
1196 if (isConditional())
1197 assert(getCondition()->getType()->isIntegerTy(1) &&
1198 "May only branch on boolean predicates!");
1199}
1200
1201BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1202 InsertPosition InsertBefore)
1203 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1204 AllocInfo, InsertBefore) {
1205 assert(IfTrue && "Branch destination may not be null!");
1206 Op<-1>() = IfTrue;
1207}
1208
1209BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1210 AllocInfo AllocInfo, InsertPosition InsertBefore)
1211 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1212 AllocInfo, InsertBefore) {
1213 // Assign in order of operand index to make use-list order predictable.
1214 Op<-3>() = Cond;
1215 Op<-2>() = IfFalse;
1216 Op<-1>() = IfTrue;
1217#ifndef NDEBUG
1218 AssertOK();
1219#endif
1220}
1221
1222BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1223 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1224 AllocInfo) {
1226 "Wrong number of operands allocated");
1227 // Assign in order of operand index to make use-list order predictable.
1228 if (BI.getNumOperands() != 1) {
1229 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1230 Op<-3>() = BI.Op<-3>();
1231 Op<-2>() = BI.Op<-2>();
1232 }
1233 Op<-1>() = BI.Op<-1>();
1235}
1236
1239 "Cannot swap successors of an unconditional branch");
1240 Op<-1>().swap(Op<-2>());
1241
1242 // Update profile metadata if present and it matches our structural
1243 // expectations.
1245}
1246
1247//===----------------------------------------------------------------------===//
1248// AllocaInst Implementation
1249//===----------------------------------------------------------------------===//
1250
1251static Value *getAISize(LLVMContext &Context, Value *Amt) {
1252 if (!Amt)
1253 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1254 else {
1255 assert(!isa<BasicBlock>(Amt) &&
1256 "Passed basic block into allocation size parameter! Use other ctor");
1257 assert(Amt->getType()->isIntegerTy() &&
1258 "Allocation array size is not an integer!");
1259 }
1260 return Amt;
1261}
1262
1264 assert(Pos.isValid() &&
1265 "Insertion position cannot be null when alignment not provided!");
1266 BasicBlock *BB = Pos.getBasicBlock();
1267 assert(BB->getParent() &&
1268 "BB must be in a Function when alignment not provided!");
1269 const DataLayout &DL = BB->getDataLayout();
1270 return DL.getPrefTypeAlign(Ty);
1271}
1272
1273AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1274 InsertPosition InsertBefore)
1275 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1276
1277AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1278 const Twine &Name, InsertPosition InsertBefore)
1279 : AllocaInst(Ty, AddrSpace, ArraySize,
1280 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1281 InsertBefore) {}
1282
1283AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1284 Align Align, const Twine &Name,
1285 InsertPosition InsertBefore)
1286 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1287 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1288 AllocatedType(Ty) {
1290 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1291 setName(Name);
1292}
1293
1296 return !CI->isOne();
1297 return true;
1298}
1299
1300/// isStaticAlloca - Return true if this alloca is in the entry block of the
1301/// function and is a constant size. If so, the code generator will fold it
1302/// into the prolog/epilog code, so it is basically free.
1304 // Must be constant size.
1305 if (!isa<ConstantInt>(getArraySize())) return false;
1306
1307 // Must be in the entry block.
1308 const BasicBlock *Parent = getParent();
1309 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1310}
1311
1312//===----------------------------------------------------------------------===//
1313// LoadInst Implementation
1314//===----------------------------------------------------------------------===//
1315
1316void LoadInst::AssertOK() {
1318 "Ptr must have pointer type.");
1319}
1320
1322 assert(Pos.isValid() &&
1323 "Insertion position cannot be null when alignment not provided!");
1324 BasicBlock *BB = Pos.getBasicBlock();
1325 assert(BB->getParent() &&
1326 "BB must be in a Function when alignment not provided!");
1327 const DataLayout &DL = BB->getDataLayout();
1328 return DL.getABITypeAlign(Ty);
1329}
1330
1332 InsertPosition InsertBef)
1333 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1334
1336 InsertPosition InsertBef)
1337 : LoadInst(Ty, Ptr, Name, isVolatile,
1338 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1339
1341 Align Align, InsertPosition InsertBef)
1343 SyncScope::System, InsertBef) {}
1344
1347 InsertPosition InsertBef)
1348 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1351 setAtomic(Order, SSID);
1352 AssertOK();
1353 setName(Name);
1354}
1355
1356//===----------------------------------------------------------------------===//
1357// StoreInst Implementation
1358//===----------------------------------------------------------------------===//
1359
1360void StoreInst::AssertOK() {
1361 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1363 "Ptr must have pointer type!");
1364}
1365
1367 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1368
1370 InsertPosition InsertBefore)
1371 : StoreInst(val, addr, isVolatile,
1372 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1373 InsertBefore) {}
1374
1376 InsertPosition InsertBefore)
1378 SyncScope::System, InsertBefore) {}
1379
1381 AtomicOrdering Order, SyncScope::ID SSID,
1382 InsertPosition InsertBefore)
1383 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1384 InsertBefore) {
1385 Op<0>() = val;
1386 Op<1>() = addr;
1389 setAtomic(Order, SSID);
1390 AssertOK();
1391}
1392
1393//===----------------------------------------------------------------------===//
1394// AtomicCmpXchgInst Implementation
1395//===----------------------------------------------------------------------===//
1396
1397void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1398 Align Alignment, AtomicOrdering SuccessOrdering,
1399 AtomicOrdering FailureOrdering,
1400 SyncScope::ID SSID) {
1401 Op<0>() = Ptr;
1402 Op<1>() = Cmp;
1403 Op<2>() = NewVal;
1404 setSuccessOrdering(SuccessOrdering);
1405 setFailureOrdering(FailureOrdering);
1406 setSyncScopeID(SSID);
1407 setAlignment(Alignment);
1408
1409 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1410 "All operands must be non-null!");
1412 "Ptr must have pointer type!");
1413 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1414 "Cmp type and NewVal type must be same!");
1415}
1416
1418 Align Alignment,
1419 AtomicOrdering SuccessOrdering,
1420 AtomicOrdering FailureOrdering,
1421 SyncScope::ID SSID,
1422 InsertPosition InsertBefore)
1423 : Instruction(
1424 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1425 AtomicCmpXchg, AllocMarker, InsertBefore) {
1426 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1427}
1428
1429//===----------------------------------------------------------------------===//
1430// AtomicRMWInst Implementation
1431//===----------------------------------------------------------------------===//
1432
1433void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1434 Align Alignment, AtomicOrdering Ordering,
1435 SyncScope::ID SSID) {
1436 assert(Ordering != AtomicOrdering::NotAtomic &&
1437 "atomicrmw instructions can only be atomic.");
1438 assert(Ordering != AtomicOrdering::Unordered &&
1439 "atomicrmw instructions cannot be unordered.");
1440 Op<0>() = Ptr;
1441 Op<1>() = Val;
1443 setOrdering(Ordering);
1444 setSyncScopeID(SSID);
1445 setAlignment(Alignment);
1446
1447 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1449 "Ptr must have pointer type!");
1450 assert(Ordering != AtomicOrdering::NotAtomic &&
1451 "AtomicRMW instructions must be atomic!");
1452}
1453
1455 Align Alignment, AtomicOrdering Ordering,
1456 SyncScope::ID SSID, InsertPosition InsertBefore)
1457 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1458 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1459}
1460
1462 switch (Op) {
1464 return "xchg";
1465 case AtomicRMWInst::Add:
1466 return "add";
1467 case AtomicRMWInst::Sub:
1468 return "sub";
1469 case AtomicRMWInst::And:
1470 return "and";
1472 return "nand";
1473 case AtomicRMWInst::Or:
1474 return "or";
1475 case AtomicRMWInst::Xor:
1476 return "xor";
1477 case AtomicRMWInst::Max:
1478 return "max";
1479 case AtomicRMWInst::Min:
1480 return "min";
1482 return "umax";
1484 return "umin";
1486 return "fadd";
1488 return "fsub";
1490 return "fmax";
1492 return "fmin";
1494 return "fmaximum";
1496 return "fminimum";
1498 return "uinc_wrap";
1500 return "udec_wrap";
1502 return "usub_cond";
1504 return "usub_sat";
1506 return "<invalid operation>";
1507 }
1508
1509 llvm_unreachable("invalid atomicrmw operation");
1510}
1511
1512//===----------------------------------------------------------------------===//
1513// FenceInst Implementation
1514//===----------------------------------------------------------------------===//
1515
1517 SyncScope::ID SSID, InsertPosition InsertBefore)
1518 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1519 setOrdering(Ordering);
1520 setSyncScopeID(SSID);
1521}
1522
1523//===----------------------------------------------------------------------===//
1524// GetElementPtrInst Implementation
1525//===----------------------------------------------------------------------===//
1526
1527void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1528 const Twine &Name) {
1529 assert(getNumOperands() == 1 + IdxList.size() &&
1530 "NumOperands not initialized?");
1531 Op<0>() = Ptr;
1532 llvm::copy(IdxList, op_begin() + 1);
1533 setName(Name);
1534}
1535
1536GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1538 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1539 SourceElementType(GEPI.SourceElementType),
1540 ResultElementType(GEPI.ResultElementType) {
1541 assert(getNumOperands() == GEPI.getNumOperands() &&
1542 "Wrong number of operands allocated");
1543 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1545}
1546
1548 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1549 if (!Struct->indexValid(Idx))
1550 return nullptr;
1551 return Struct->getTypeAtIndex(Idx);
1552 }
1553 if (!Idx->getType()->isIntOrIntVectorTy())
1554 return nullptr;
1555 if (auto *Array = dyn_cast<ArrayType>(Ty))
1556 return Array->getElementType();
1557 if (auto *Vector = dyn_cast<VectorType>(Ty))
1558 return Vector->getElementType();
1559 return nullptr;
1560}
1561
1563 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1564 if (Idx >= Struct->getNumElements())
1565 return nullptr;
1566 return Struct->getElementType(Idx);
1567 }
1568 if (auto *Array = dyn_cast<ArrayType>(Ty))
1569 return Array->getElementType();
1570 if (auto *Vector = dyn_cast<VectorType>(Ty))
1571 return Vector->getElementType();
1572 return nullptr;
1573}
1574
1575template <typename IndexTy>
1577 if (IdxList.empty())
1578 return Ty;
1579 for (IndexTy V : IdxList.slice(1)) {
1581 if (!Ty)
1582 return Ty;
1583 }
1584 return Ty;
1585}
1586
1590
1592 ArrayRef<Constant *> IdxList) {
1593 return getIndexedTypeInternal(Ty, IdxList);
1594}
1595
1599
1600/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1601/// zeros. If so, the result pointer and the first operand have the same
1602/// value, just potentially different types.
1604 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1606 if (!CI->isZero()) return false;
1607 } else {
1608 return false;
1609 }
1610 }
1611 return true;
1612}
1613
1614/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1615/// constant integers. If so, the result pointer and the first operand have
1616/// a constant offset between them.
1618 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1620 return false;
1621 }
1622 return true;
1623}
1624
1628
1630 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1631 if (B)
1633 else
1634 NW = NW.withoutInBounds();
1635 setNoWrapFlags(NW);
1636}
1637
1639 return cast<GEPOperator>(this)->getNoWrapFlags();
1640}
1641
1643 return cast<GEPOperator>(this)->isInBounds();
1644}
1645
1647 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1648}
1649
1651 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1652}
1653
1655 APInt &Offset) const {
1656 // Delegate to the generic GEPOperator implementation.
1657 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1658}
1659
1661 const DataLayout &DL, unsigned BitWidth,
1662 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1663 APInt &ConstantOffset) const {
1664 // Delegate to the generic GEPOperator implementation.
1665 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1666 ConstantOffset);
1667}
1668
1669//===----------------------------------------------------------------------===//
1670// ExtractElementInst Implementation
1671//===----------------------------------------------------------------------===//
1672
1673ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1674 const Twine &Name,
1675 InsertPosition InsertBef)
1676 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1677 ExtractElement, AllocMarker, InsertBef) {
1678 assert(isValidOperands(Val, Index) &&
1679 "Invalid extractelement instruction operands!");
1680 Op<0>() = Val;
1681 Op<1>() = Index;
1682 setName(Name);
1683}
1684
1685bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1686 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1687 return false;
1688 return true;
1689}
1690
1691//===----------------------------------------------------------------------===//
1692// InsertElementInst Implementation
1693//===----------------------------------------------------------------------===//
1694
1695InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1696 const Twine &Name,
1697 InsertPosition InsertBef)
1698 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1699 assert(isValidOperands(Vec, Elt, Index) &&
1700 "Invalid insertelement instruction operands!");
1701 Op<0>() = Vec;
1702 Op<1>() = Elt;
1703 Op<2>() = Index;
1704 setName(Name);
1705}
1706
1708 const Value *Index) {
1709 if (!Vec->getType()->isVectorTy())
1710 return false; // First operand of insertelement must be vector type.
1711
1712 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1713 return false;// Second operand of insertelement must be vector element type.
1714
1715 if (!Index->getType()->isIntegerTy())
1716 return false; // Third operand of insertelement must be i32.
1717 return true;
1718}
1719
1720//===----------------------------------------------------------------------===//
1721// ShuffleVectorInst Implementation
1722//===----------------------------------------------------------------------===//
1723
1725 assert(V && "Cannot create placeholder of nullptr V");
1726 return PoisonValue::get(V->getType());
1727}
1728
1730 InsertPosition InsertBefore)
1732 InsertBefore) {}
1733
1735 const Twine &Name,
1736 InsertPosition InsertBefore)
1738 InsertBefore) {}
1739
1741 const Twine &Name,
1742 InsertPosition InsertBefore)
1743 : Instruction(
1744 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1745 cast<VectorType>(Mask->getType())->getElementCount()),
1746 ShuffleVector, AllocMarker, InsertBefore) {
1747 assert(isValidOperands(V1, V2, Mask) &&
1748 "Invalid shuffle vector instruction operands!");
1749
1750 Op<0>() = V1;
1751 Op<1>() = V2;
1752 SmallVector<int, 16> MaskArr;
1753 getShuffleMask(cast<Constant>(Mask), MaskArr);
1754 setShuffleMask(MaskArr);
1755 setName(Name);
1756}
1757
1759 const Twine &Name,
1760 InsertPosition InsertBefore)
1761 : Instruction(
1762 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1763 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1764 ShuffleVector, AllocMarker, InsertBefore) {
1765 assert(isValidOperands(V1, V2, Mask) &&
1766 "Invalid shuffle vector instruction operands!");
1767 Op<0>() = V1;
1768 Op<1>() = V2;
1769 setShuffleMask(Mask);
1770 setName(Name);
1771}
1772
1774 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1775 int NumMaskElts = ShuffleMask.size();
1776 SmallVector<int, 16> NewMask(NumMaskElts);
1777 for (int i = 0; i != NumMaskElts; ++i) {
1778 int MaskElt = getMaskValue(i);
1779 if (MaskElt == PoisonMaskElem) {
1780 NewMask[i] = PoisonMaskElem;
1781 continue;
1782 }
1783 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1784 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1785 NewMask[i] = MaskElt;
1786 }
1787 setShuffleMask(NewMask);
1788 Op<0>().swap(Op<1>());
1789}
1790
1792 ArrayRef<int> Mask) {
1793 // V1 and V2 must be vectors of the same type.
1794 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1795 return false;
1796
1797 // Make sure the mask elements make sense.
1798 int V1Size =
1799 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1800 for (int Elem : Mask)
1801 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1802 return false;
1803
1805 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1806 return false;
1807
1808 return true;
1809}
1810
1812 const Value *Mask) {
1813 // V1 and V2 must be vectors of the same type.
1814 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1815 return false;
1816
1817 // Mask must be vector of i32, and must be the same kind of vector as the
1818 // input vectors
1819 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1820 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1822 return false;
1823
1824 // Check to see if Mask is valid.
1826 return true;
1827
1828 // NOTE: Through vector ConstantInt we have the potential to support more
1829 // than just zero splat masks but that requires a LangRef change.
1830 if (isa<ScalableVectorType>(MaskTy))
1831 return false;
1832
1833 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1834
1835 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1836 return !CI->uge(V1Size * 2);
1837
1838 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1839 for (Value *Op : MV->operands()) {
1840 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1841 if (CI->uge(V1Size*2))
1842 return false;
1843 } else if (!isa<UndefValue>(Op)) {
1844 return false;
1845 }
1846 }
1847 return true;
1848 }
1849
1850 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1851 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1852 i != e; ++i)
1853 if (CDS->getElementAsInteger(i) >= V1Size*2)
1854 return false;
1855 return true;
1856 }
1857
1858 return false;
1859}
1860
1862 SmallVectorImpl<int> &Result) {
1863 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1864
1865 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1866 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1867 Result.append(EC.getKnownMinValue(), MaskVal);
1868 return;
1869 }
1870
1871 assert(!EC.isScalable() &&
1872 "Scalable vector shuffle mask must be undef or zeroinitializer");
1873
1874 unsigned NumElts = EC.getFixedValue();
1875
1876 Result.reserve(NumElts);
1877
1878 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1879 for (unsigned i = 0; i != NumElts; ++i)
1880 Result.push_back(CDS->getElementAsInteger(i));
1881 return;
1882 }
1883 for (unsigned i = 0; i != NumElts; ++i) {
1884 Constant *C = Mask->getAggregateElement(i);
1885 Result.push_back(isa<UndefValue>(C) ? -1 :
1886 cast<ConstantInt>(C)->getZExtValue());
1887 }
1888}
1889
1891 ShuffleMask.assign(Mask.begin(), Mask.end());
1892 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1893}
1894
1896 Type *ResultTy) {
1897 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1898 if (isa<ScalableVectorType>(ResultTy)) {
1899 assert(all_equal(Mask) && "Unexpected shuffle");
1900 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1901 if (Mask[0] == 0)
1902 return Constant::getNullValue(VecTy);
1903 return PoisonValue::get(VecTy);
1904 }
1906 for (int Elem : Mask) {
1907 if (Elem == PoisonMaskElem)
1909 else
1910 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1911 }
1912 return ConstantVector::get(MaskConst);
1913}
1914
1915static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1916 assert(!Mask.empty() && "Shuffle mask must contain elements");
1917 bool UsesLHS = false;
1918 bool UsesRHS = false;
1919 for (int I : Mask) {
1920 if (I == -1)
1921 continue;
1922 assert(I >= 0 && I < (NumOpElts * 2) &&
1923 "Out-of-bounds shuffle mask element");
1924 UsesLHS |= (I < NumOpElts);
1925 UsesRHS |= (I >= NumOpElts);
1926 if (UsesLHS && UsesRHS)
1927 return false;
1928 }
1929 // Allow for degenerate case: completely undef mask means neither source is used.
1930 return UsesLHS || UsesRHS;
1931}
1932
1934 // We don't have vector operand size information, so assume operands are the
1935 // same size as the mask.
1936 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1937}
1938
1939static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1940 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1941 return false;
1942 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1943 if (Mask[i] == -1)
1944 continue;
1945 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1946 return false;
1947 }
1948 return true;
1949}
1950
1952 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1953 return false;
1954 // We don't have vector operand size information, so assume operands are the
1955 // same size as the mask.
1956 return isIdentityMaskImpl(Mask, NumSrcElts);
1957}
1958
1960 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1961 return false;
1962 if (!isSingleSourceMask(Mask, NumSrcElts))
1963 return false;
1964
1965 // The number of elements in the mask must be at least 2.
1966 if (NumSrcElts < 2)
1967 return false;
1968
1969 for (int I = 0, E = Mask.size(); I < E; ++I) {
1970 if (Mask[I] == -1)
1971 continue;
1972 if (Mask[I] != (NumSrcElts - 1 - I) &&
1973 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1974 return false;
1975 }
1976 return true;
1977}
1978
1980 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1981 return false;
1982 if (!isSingleSourceMask(Mask, NumSrcElts))
1983 return false;
1984 for (int I = 0, E = Mask.size(); I < E; ++I) {
1985 if (Mask[I] == -1)
1986 continue;
1987 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1988 return false;
1989 }
1990 return true;
1991}
1992
1994 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1995 return false;
1996 // Select is differentiated from identity. It requires using both sources.
1997 if (isSingleSourceMask(Mask, NumSrcElts))
1998 return false;
1999 for (int I = 0, E = Mask.size(); I < E; ++I) {
2000 if (Mask[I] == -1)
2001 continue;
2002 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2003 return false;
2004 }
2005 return true;
2006}
2007
2009 // Example masks that will return true:
2010 // v1 = <a, b, c, d>
2011 // v2 = <e, f, g, h>
2012 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2013 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2014
2015 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2016 return false;
2017 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2018 int Sz = Mask.size();
2019 if (Sz < 2 || !isPowerOf2_32(Sz))
2020 return false;
2021
2022 // 2. The first element of the mask must be either a 0 or a 1.
2023 if (Mask[0] != 0 && Mask[0] != 1)
2024 return false;
2025
2026 // 3. The difference between the first 2 elements must be equal to the
2027 // number of elements in the mask.
2028 if ((Mask[1] - Mask[0]) != NumSrcElts)
2029 return false;
2030
2031 // 4. The difference between consecutive even-numbered and odd-numbered
2032 // elements must be equal to 2.
2033 for (int I = 2; I < Sz; ++I) {
2034 int MaskEltVal = Mask[I];
2035 if (MaskEltVal == -1)
2036 return false;
2037 int MaskEltPrevVal = Mask[I - 2];
2038 if (MaskEltVal - MaskEltPrevVal != 2)
2039 return false;
2040 }
2041 return true;
2042}
2043
2045 int &Index) {
2046 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2047 return false;
2048 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2049 int StartIndex = -1;
2050 for (int I = 0, E = Mask.size(); I != E; ++I) {
2051 int MaskEltVal = Mask[I];
2052 if (MaskEltVal == -1)
2053 continue;
2054
2055 if (StartIndex == -1) {
2056 // Don't support a StartIndex that begins in the second input, or if the
2057 // first non-undef index would access below the StartIndex.
2058 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2059 return false;
2060
2061 StartIndex = MaskEltVal - I;
2062 continue;
2063 }
2064
2065 // Splice is sequential starting from StartIndex.
2066 if (MaskEltVal != (StartIndex + I))
2067 return false;
2068 }
2069
2070 if (StartIndex == -1)
2071 return false;
2072
2073 // NOTE: This accepts StartIndex == 0 (COPY).
2074 Index = StartIndex;
2075 return true;
2076}
2077
2079 int NumSrcElts, int &Index) {
2080 // Must extract from a single source.
2081 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2082 return false;
2083
2084 // Must be smaller (else this is an Identity shuffle).
2085 if (NumSrcElts <= (int)Mask.size())
2086 return false;
2087
2088 // Find start of extraction, accounting that we may start with an UNDEF.
2089 int SubIndex = -1;
2090 for (int i = 0, e = Mask.size(); i != e; ++i) {
2091 int M = Mask[i];
2092 if (M < 0)
2093 continue;
2094 int Offset = (M % NumSrcElts) - i;
2095 if (0 <= SubIndex && SubIndex != Offset)
2096 return false;
2097 SubIndex = Offset;
2098 }
2099
2100 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2101 Index = SubIndex;
2102 return true;
2103 }
2104 return false;
2105}
2106
2108 int NumSrcElts, int &NumSubElts,
2109 int &Index) {
2110 int NumMaskElts = Mask.size();
2111
2112 // Don't try to match if we're shuffling to a smaller size.
2113 if (NumMaskElts < NumSrcElts)
2114 return false;
2115
2116 // TODO: We don't recognize self-insertion/widening.
2117 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2118 return false;
2119
2120 // Determine which mask elements are attributed to which source.
2121 APInt UndefElts = APInt::getZero(NumMaskElts);
2122 APInt Src0Elts = APInt::getZero(NumMaskElts);
2123 APInt Src1Elts = APInt::getZero(NumMaskElts);
2124 bool Src0Identity = true;
2125 bool Src1Identity = true;
2126
2127 for (int i = 0; i != NumMaskElts; ++i) {
2128 int M = Mask[i];
2129 if (M < 0) {
2130 UndefElts.setBit(i);
2131 continue;
2132 }
2133 if (M < NumSrcElts) {
2134 Src0Elts.setBit(i);
2135 Src0Identity &= (M == i);
2136 continue;
2137 }
2138 Src1Elts.setBit(i);
2139 Src1Identity &= (M == (i + NumSrcElts));
2140 }
2141 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2142 "unknown shuffle elements");
2143 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2144 "2-source shuffle not found");
2145
2146 // Determine lo/hi span ranges.
2147 // TODO: How should we handle undefs at the start of subvector insertions?
2148 int Src0Lo = Src0Elts.countr_zero();
2149 int Src1Lo = Src1Elts.countr_zero();
2150 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2151 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2152
2153 // If src0 is in place, see if the src1 elements is inplace within its own
2154 // span.
2155 if (Src0Identity) {
2156 int NumSub1Elts = Src1Hi - Src1Lo;
2157 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2158 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2159 NumSubElts = NumSub1Elts;
2160 Index = Src1Lo;
2161 return true;
2162 }
2163 }
2164
2165 // If src1 is in place, see if the src0 elements is inplace within its own
2166 // span.
2167 if (Src1Identity) {
2168 int NumSub0Elts = Src0Hi - Src0Lo;
2169 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2170 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2171 NumSubElts = NumSub0Elts;
2172 Index = Src0Lo;
2173 return true;
2174 }
2175 }
2176
2177 return false;
2178}
2179
2181 // FIXME: Not currently possible to express a shuffle mask for a scalable
2182 // vector for this case.
2184 return false;
2185
2186 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2187 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2188 if (NumMaskElts <= NumOpElts)
2189 return false;
2190
2191 // The first part of the mask must choose elements from exactly 1 source op.
2193 if (!isIdentityMaskImpl(Mask, NumOpElts))
2194 return false;
2195
2196 // All extending must be with undef elements.
2197 for (int i = NumOpElts; i < NumMaskElts; ++i)
2198 if (Mask[i] != -1)
2199 return false;
2200
2201 return true;
2202}
2203
2205 // FIXME: Not currently possible to express a shuffle mask for a scalable
2206 // vector for this case.
2208 return false;
2209
2210 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2211 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2212 if (NumMaskElts >= NumOpElts)
2213 return false;
2214
2215 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2216}
2217
2219 // Vector concatenation is differentiated from identity with padding.
2221 return false;
2222
2223 // FIXME: Not currently possible to express a shuffle mask for a scalable
2224 // vector for this case.
2226 return false;
2227
2228 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2229 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2230 if (NumMaskElts != NumOpElts * 2)
2231 return false;
2232
2233 // Use the mask length rather than the operands' vector lengths here. We
2234 // already know that the shuffle returns a vector twice as long as the inputs,
2235 // and neither of the inputs are undef vectors. If the mask picks consecutive
2236 // elements from both inputs, then this is a concatenation of the inputs.
2237 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2238}
2239
2241 int ReplicationFactor, int VF) {
2242 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2243 "Unexpected mask size.");
2244
2245 for (int CurrElt : seq(VF)) {
2246 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2247 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2248 "Run out of mask?");
2249 Mask = Mask.drop_front(ReplicationFactor);
2250 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2251 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2252 }))
2253 return false;
2254 }
2255 assert(Mask.empty() && "Did not consume the whole mask?");
2256
2257 return true;
2258}
2259
2261 int &ReplicationFactor, int &VF) {
2262 // undef-less case is trivial.
2263 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2264 ReplicationFactor =
2265 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2266 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2267 return false;
2268 VF = Mask.size() / ReplicationFactor;
2269 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2270 }
2271
2272 // However, if the mask contains undef's, we have to enumerate possible tuples
2273 // and pick one. There are bounds on replication factor: [1, mask size]
2274 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2275 // Additionally, mask size is a replication factor multiplied by vector size,
2276 // which further significantly reduces the search space.
2277
2278 // Before doing that, let's perform basic correctness checking first.
2279 int Largest = -1;
2280 for (int MaskElt : Mask) {
2281 if (MaskElt == PoisonMaskElem)
2282 continue;
2283 // Elements must be in non-decreasing order.
2284 if (MaskElt < Largest)
2285 return false;
2286 Largest = std::max(Largest, MaskElt);
2287 }
2288
2289 // Prefer larger replication factor if all else equal.
2290 for (int PossibleReplicationFactor :
2291 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2292 if (Mask.size() % PossibleReplicationFactor != 0)
2293 continue;
2294 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2295 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2296 PossibleVF))
2297 continue;
2298 ReplicationFactor = PossibleReplicationFactor;
2299 VF = PossibleVF;
2300 return true;
2301 }
2302
2303 return false;
2304}
2305
2306bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2307 int &VF) const {
2308 // Not possible to express a shuffle mask for a scalable vector for this
2309 // case.
2311 return false;
2312
2313 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2314 if (ShuffleMask.size() % VF != 0)
2315 return false;
2316 ReplicationFactor = ShuffleMask.size() / VF;
2317
2318 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2319}
2320
2322 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2323 Mask.size() % VF != 0)
2324 return false;
2325 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2326 ArrayRef<int> SubMask = Mask.slice(K, VF);
2327 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2328 continue;
2329 SmallBitVector Used(VF, false);
2330 for (int Idx : SubMask) {
2331 if (Idx != PoisonMaskElem && Idx < VF)
2332 Used.set(Idx);
2333 }
2334 if (!Used.all())
2335 return false;
2336 }
2337 return true;
2338}
2339
2340/// Return true if this shuffle mask is a replication mask.
2342 // Not possible to express a shuffle mask for a scalable vector for this
2343 // case.
2345 return false;
2346 if (!isSingleSourceMask(ShuffleMask, VF))
2347 return false;
2348
2349 return isOneUseSingleSourceMask(ShuffleMask, VF);
2350}
2351
2352bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2354 // shuffle_vector can only interleave fixed length vectors - for scalable
2355 // vectors, see the @llvm.vector.interleave2 intrinsic
2356 if (!OpTy)
2357 return false;
2358 unsigned OpNumElts = OpTy->getNumElements();
2359
2360 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2361}
2362
2364 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2365 SmallVectorImpl<unsigned> &StartIndexes) {
2366 unsigned NumElts = Mask.size();
2367 if (NumElts % Factor)
2368 return false;
2369
2370 unsigned LaneLen = NumElts / Factor;
2371 if (!isPowerOf2_32(LaneLen))
2372 return false;
2373
2374 StartIndexes.resize(Factor);
2375
2376 // Check whether each element matches the general interleaved rule.
2377 // Ignore undef elements, as long as the defined elements match the rule.
2378 // Outer loop processes all factors (x, y, z in the above example)
2379 unsigned I = 0, J;
2380 for (; I < Factor; I++) {
2381 unsigned SavedLaneValue;
2382 unsigned SavedNoUndefs = 0;
2383
2384 // Inner loop processes consecutive accesses (x, x+1... in the example)
2385 for (J = 0; J < LaneLen - 1; J++) {
2386 // Lane computes x's position in the Mask
2387 unsigned Lane = J * Factor + I;
2388 unsigned NextLane = Lane + Factor;
2389 int LaneValue = Mask[Lane];
2390 int NextLaneValue = Mask[NextLane];
2391
2392 // If both are defined, values must be sequential
2393 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2394 LaneValue + 1 != NextLaneValue)
2395 break;
2396
2397 // If the next value is undef, save the current one as reference
2398 if (LaneValue >= 0 && NextLaneValue < 0) {
2399 SavedLaneValue = LaneValue;
2400 SavedNoUndefs = 1;
2401 }
2402
2403 // Undefs are allowed, but defined elements must still be consecutive:
2404 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2405 // Verify this by storing the last non-undef followed by an undef
2406 // Check that following non-undef masks are incremented with the
2407 // corresponding distance.
2408 if (SavedNoUndefs > 0 && LaneValue < 0) {
2409 SavedNoUndefs++;
2410 if (NextLaneValue >= 0 &&
2411 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2412 break;
2413 }
2414 }
2415
2416 if (J < LaneLen - 1)
2417 return false;
2418
2419 int StartMask = 0;
2420 if (Mask[I] >= 0) {
2421 // Check that the start of the I range (J=0) is greater than 0
2422 StartMask = Mask[I];
2423 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2424 // StartMask defined by the last value in lane
2425 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2426 } else if (SavedNoUndefs > 0) {
2427 // StartMask defined by some non-zero value in the j loop
2428 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2429 }
2430 // else StartMask remains set to 0, i.e. all elements are undefs
2431
2432 if (StartMask < 0)
2433 return false;
2434 // We must stay within the vectors; This case can happen with undefs.
2435 if (StartMask + LaneLen > NumInputElts)
2436 return false;
2437
2438 StartIndexes[I] = StartMask;
2439 }
2440
2441 return true;
2442}
2443
2444/// Check if the mask is a DE-interleave mask of the given factor
2445/// \p Factor like:
2446/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2448 unsigned Factor,
2449 unsigned &Index) {
2450 // Check all potential start indices from 0 to (Factor - 1).
2451 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2452 unsigned I = 0;
2453
2454 // Check that elements are in ascending order by Factor. Ignore undef
2455 // elements.
2456 for (; I < Mask.size(); I++)
2457 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2458 break;
2459
2460 if (I == Mask.size()) {
2461 Index = Idx;
2462 return true;
2463 }
2464 }
2465
2466 return false;
2467}
2468
2469/// Try to lower a vector shuffle as a bit rotation.
2470///
2471/// Look for a repeated rotation pattern in each sub group.
2472/// Returns an element-wise left bit rotation amount or -1 if failed.
2473static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2474 int NumElts = Mask.size();
2475 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2476
2477 int RotateAmt = -1;
2478 for (int i = 0; i != NumElts; i += NumSubElts) {
2479 for (int j = 0; j != NumSubElts; ++j) {
2480 int M = Mask[i + j];
2481 if (M < 0)
2482 continue;
2483 if (M < i || M >= i + NumSubElts)
2484 return -1;
2485 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2486 if (0 <= RotateAmt && Offset != RotateAmt)
2487 return -1;
2488 RotateAmt = Offset;
2489 }
2490 }
2491 return RotateAmt;
2492}
2493
2495 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2496 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2497 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2498 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2499 if (EltRotateAmt < 0)
2500 continue;
2501 RotateAmt = EltRotateAmt * EltSizeInBits;
2502 return true;
2503 }
2504
2505 return false;
2506}
2507
2508//===----------------------------------------------------------------------===//
2509// InsertValueInst Class
2510//===----------------------------------------------------------------------===//
2511
2512void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2513 const Twine &Name) {
2514 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2515
2516 // There's no fundamental reason why we require at least one index
2517 // (other than weirdness with &*IdxBegin being invalid; see
2518 // getelementptr's init routine for example). But there's no
2519 // present need to support it.
2520 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2521
2523 Val->getType() && "Inserted value must match indexed type!");
2524 Op<0>() = Agg;
2525 Op<1>() = Val;
2526
2527 Indices.append(Idxs.begin(), Idxs.end());
2528 setName(Name);
2529}
2530
2531InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2532 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2533 Indices(IVI.Indices) {
2534 Op<0>() = IVI.getOperand(0);
2535 Op<1>() = IVI.getOperand(1);
2537}
2538
2539//===----------------------------------------------------------------------===//
2540// ExtractValueInst Class
2541//===----------------------------------------------------------------------===//
2542
2543void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2544 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2545
2546 // There's no fundamental reason why we require at least one index.
2547 // But there's no present need to support it.
2548 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2549
2550 Indices.append(Idxs.begin(), Idxs.end());
2551 setName(Name);
2552}
2553
2554ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2555 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2556 (BasicBlock *)nullptr),
2557 Indices(EVI.Indices) {
2559}
2560
2561// getIndexedType - Returns the type of the element that would be extracted
2562// with an extractvalue instruction with the specified parameters.
2563//
2564// A null type is returned if the indices are invalid for the specified
2565// pointer type.
2566//
2568 ArrayRef<unsigned> Idxs) {
2569 for (unsigned Index : Idxs) {
2570 // We can't use CompositeType::indexValid(Index) here.
2571 // indexValid() always returns true for arrays because getelementptr allows
2572 // out-of-bounds indices. Since we don't allow those for extractvalue and
2573 // insertvalue we need to check array indexing manually.
2574 // Since the only other types we can index into are struct types it's just
2575 // as easy to check those manually as well.
2576 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2577 if (Index >= AT->getNumElements())
2578 return nullptr;
2579 Agg = AT->getElementType();
2580 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2581 if (Index >= ST->getNumElements())
2582 return nullptr;
2583 Agg = ST->getElementType(Index);
2584 } else {
2585 // Not a valid type to index into.
2586 return nullptr;
2587 }
2588 }
2589 return Agg;
2590}
2591
2592//===----------------------------------------------------------------------===//
2593// UnaryOperator Class
2594//===----------------------------------------------------------------------===//
2595
2597 const Twine &Name, InsertPosition InsertBefore)
2598 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2599 Op<0>() = S;
2600 setName(Name);
2601 AssertOK();
2602}
2603
2605 InsertPosition InsertBefore) {
2606 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2607}
2608
2609void UnaryOperator::AssertOK() {
2610 Value *LHS = getOperand(0);
2611 (void)LHS; // Silence warnings.
2612#ifndef NDEBUG
2613 switch (getOpcode()) {
2614 case FNeg:
2615 assert(getType() == LHS->getType() &&
2616 "Unary operation should return same type as operand!");
2617 assert(getType()->isFPOrFPVectorTy() &&
2618 "Tried to create a floating-point operation on a "
2619 "non-floating-point type!");
2620 break;
2621 default: llvm_unreachable("Invalid opcode provided");
2622 }
2623#endif
2624}
2625
2626//===----------------------------------------------------------------------===//
2627// BinaryOperator Class
2628//===----------------------------------------------------------------------===//
2629
2631 const Twine &Name, InsertPosition InsertBefore)
2632 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2633 Op<0>() = S1;
2634 Op<1>() = S2;
2635 setName(Name);
2636 AssertOK();
2637}
2638
2639void BinaryOperator::AssertOK() {
2640 Value *LHS = getOperand(0), *RHS = getOperand(1);
2641 (void)LHS; (void)RHS; // Silence warnings.
2642 assert(LHS->getType() == RHS->getType() &&
2643 "Binary operator operand types must match!");
2644#ifndef NDEBUG
2645 switch (getOpcode()) {
2646 case Add: case Sub:
2647 case Mul:
2648 assert(getType() == LHS->getType() &&
2649 "Arithmetic operation should return same type as operands!");
2650 assert(getType()->isIntOrIntVectorTy() &&
2651 "Tried to create an integer operation on a non-integer type!");
2652 break;
2653 case FAdd: case FSub:
2654 case FMul:
2655 assert(getType() == LHS->getType() &&
2656 "Arithmetic operation should return same type as operands!");
2657 assert(getType()->isFPOrFPVectorTy() &&
2658 "Tried to create a floating-point operation on a "
2659 "non-floating-point type!");
2660 break;
2661 case UDiv:
2662 case SDiv:
2663 assert(getType() == LHS->getType() &&
2664 "Arithmetic operation should return same type as operands!");
2665 assert(getType()->isIntOrIntVectorTy() &&
2666 "Incorrect operand type (not integer) for S/UDIV");
2667 break;
2668 case FDiv:
2669 assert(getType() == LHS->getType() &&
2670 "Arithmetic operation should return same type as operands!");
2671 assert(getType()->isFPOrFPVectorTy() &&
2672 "Incorrect operand type (not floating point) for FDIV");
2673 break;
2674 case URem:
2675 case SRem:
2676 assert(getType() == LHS->getType() &&
2677 "Arithmetic operation should return same type as operands!");
2678 assert(getType()->isIntOrIntVectorTy() &&
2679 "Incorrect operand type (not integer) for S/UREM");
2680 break;
2681 case FRem:
2682 assert(getType() == LHS->getType() &&
2683 "Arithmetic operation should return same type as operands!");
2684 assert(getType()->isFPOrFPVectorTy() &&
2685 "Incorrect operand type (not floating point) for FREM");
2686 break;
2687 case Shl:
2688 case LShr:
2689 case AShr:
2690 assert(getType() == LHS->getType() &&
2691 "Shift operation should return same type as operands!");
2692 assert(getType()->isIntOrIntVectorTy() &&
2693 "Tried to create a shift operation on a non-integral type!");
2694 break;
2695 case And: case Or:
2696 case Xor:
2697 assert(getType() == LHS->getType() &&
2698 "Logical operation should return same type as operands!");
2699 assert(getType()->isIntOrIntVectorTy() &&
2700 "Tried to create a logical operation on a non-integral type!");
2701 break;
2702 default: llvm_unreachable("Invalid opcode provided");
2703 }
2704#endif
2705}
2706
2708 const Twine &Name,
2709 InsertPosition InsertBefore) {
2710 assert(S1->getType() == S2->getType() &&
2711 "Cannot create binary operator with two operands of differing type!");
2712 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2713}
2714
2716 InsertPosition InsertBefore) {
2717 Value *Zero = ConstantInt::get(Op->getType(), 0);
2718 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2719 InsertBefore);
2720}
2721
2723 InsertPosition InsertBefore) {
2724 Value *Zero = ConstantInt::get(Op->getType(), 0);
2725 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2726}
2727
2729 InsertPosition InsertBefore) {
2730 Constant *C = Constant::getAllOnesValue(Op->getType());
2731 return new BinaryOperator(Instruction::Xor, Op, C,
2732 Op->getType(), Name, InsertBefore);
2733}
2734
2735// Exchange the two operands to this instruction. This instruction is safe to
2736// use on any binary instruction and does not modify the semantics of the
2737// instruction.
2739 if (!isCommutative())
2740 return true; // Can't commute operands
2741 Op<0>().swap(Op<1>());
2742 return false;
2743}
2744
2745//===----------------------------------------------------------------------===//
2746// FPMathOperator Class
2747//===----------------------------------------------------------------------===//
2748
2750 const MDNode *MD =
2751 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2752 if (!MD)
2753 return 0.0;
2755 return Accuracy->getValueAPF().convertToFloat();
2756}
2757
2758//===----------------------------------------------------------------------===//
2759// CastInst Class
2760//===----------------------------------------------------------------------===//
2761
2762// Just determine if this cast only deals with integral->integral conversion.
2764 switch (getOpcode()) {
2765 default: return false;
2766 case Instruction::ZExt:
2767 case Instruction::SExt:
2768 case Instruction::Trunc:
2769 return true;
2770 case Instruction::BitCast:
2771 return getOperand(0)->getType()->isIntegerTy() &&
2772 getType()->isIntegerTy();
2773 }
2774}
2775
2776/// This function determines if the CastInst does not require any bits to be
2777/// changed in order to effect the cast. Essentially, it identifies cases where
2778/// no code gen is necessary for the cast, hence the name no-op cast. For
2779/// example, the following are all no-op casts:
2780/// # bitcast i32* %x to i8*
2781/// # bitcast <2 x i32> %x to <4 x i16>
2782/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2783/// Determine if the described cast is a no-op.
2785 Type *SrcTy,
2786 Type *DestTy,
2787 const DataLayout &DL) {
2788 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2789 switch (Opcode) {
2790 default: llvm_unreachable("Invalid CastOp");
2791 case Instruction::Trunc:
2792 case Instruction::ZExt:
2793 case Instruction::SExt:
2794 case Instruction::FPTrunc:
2795 case Instruction::FPExt:
2796 case Instruction::UIToFP:
2797 case Instruction::SIToFP:
2798 case Instruction::FPToUI:
2799 case Instruction::FPToSI:
2800 case Instruction::AddrSpaceCast:
2801 // TODO: Target informations may give a more accurate answer here.
2802 return false;
2803 case Instruction::BitCast:
2804 return true; // BitCast never modifies bits.
2805 case Instruction::PtrToAddr:
2806 case Instruction::PtrToInt:
2807 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2808 DestTy->getScalarSizeInBits();
2809 case Instruction::IntToPtr:
2810 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2811 SrcTy->getScalarSizeInBits();
2812 }
2813}
2814
2816 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2817}
2818
2819/// This function determines if a pair of casts can be eliminated and what
2820/// opcode should be used in the elimination. This assumes that there are two
2821/// instructions like this:
2822/// * %F = firstOpcode SrcTy %x to MidTy
2823/// * %S = secondOpcode MidTy %F to DstTy
2824/// The function returns a resultOpcode so these two casts can be replaced with:
2825/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2826/// If no such cast is permitted, the function returns 0.
2829 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2830 Type *DstIntPtrTy) {
2831 // Define the 144 possibilities for these two cast instructions. The values
2832 // in this matrix determine what to do in a given situation and select the
2833 // case in the switch below. The rows correspond to firstOp, the columns
2834 // correspond to secondOp. In looking at the table below, keep in mind
2835 // the following cast properties:
2836 //
2837 // Size Compare Source Destination
2838 // Operator Src ? Size Type Sign Type Sign
2839 // -------- ------------ ------------------- ---------------------
2840 // TRUNC > Integer Any Integral Any
2841 // ZEXT < Integral Unsigned Integer Any
2842 // SEXT < Integral Signed Integer Any
2843 // FPTOUI n/a FloatPt n/a Integral Unsigned
2844 // FPTOSI n/a FloatPt n/a Integral Signed
2845 // UITOFP n/a Integral Unsigned FloatPt n/a
2846 // SITOFP n/a Integral Signed FloatPt n/a
2847 // FPTRUNC > FloatPt n/a FloatPt n/a
2848 // FPEXT < FloatPt n/a FloatPt n/a
2849 // PTRTOINT n/a Pointer n/a Integral Unsigned
2850 // INTTOPTR n/a Integral Unsigned Pointer n/a
2851 // BITCAST = FirstClass n/a FirstClass n/a
2852 // ADDRSPCST n/a Pointer n/a Pointer n/a
2853 //
2854 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2855 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2856 // into "fptoui double to i64", but this loses information about the range
2857 // of the produced value (we no longer know the top-part is all zeros).
2858 // Further this conversion is often much more expensive for typical hardware,
2859 // and causes issues when building libgcc. We disallow fptosi+sext for the
2860 // same reason.
2861 const unsigned numCastOps =
2862 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2863 // clang-format off
2864 static const uint8_t CastResults[numCastOps][numCastOps] = {
2865 // T F F U S F F P P I B A -+
2866 // R Z S P P I I T P 2 2 N T S |
2867 // U E E 2 2 2 2 R E I A T C C +- secondOp
2868 // N X X U S F F N X N D 2 V V |
2869 // C T T I I P P C T T R P T T -+
2870 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2871 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2872 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2873 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2874 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2875 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2876 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2877 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2878 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2879 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2880 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2881 { 99,99,99,99,99,99,99,99,99,11,99,99,15, 0}, // IntToPtr |
2882 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2883 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2884 };
2885 // clang-format on
2886
2887 // TODO: This logic could be encoded into the table above and handled in the
2888 // switch below.
2889 // If either of the casts are a bitcast from scalar to vector, disallow the
2890 // merging. However, any pair of bitcasts are allowed.
2891 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2892 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2893 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2894
2895 // Check if any of the casts convert scalars <-> vectors.
2896 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2897 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2898 if (!AreBothBitcasts)
2899 return 0;
2900
2901 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2902 [secondOp-Instruction::CastOpsBegin];
2903 switch (ElimCase) {
2904 case 0:
2905 // Categorically disallowed.
2906 return 0;
2907 case 1:
2908 // Allowed, use first cast's opcode.
2909 return firstOp;
2910 case 2:
2911 // Allowed, use second cast's opcode.
2912 return secondOp;
2913 case 3:
2914 // No-op cast in second op implies firstOp as long as the DestTy
2915 // is integer and we are not converting between a vector and a
2916 // non-vector type.
2917 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2918 return firstOp;
2919 return 0;
2920 case 4:
2921 // No-op cast in second op implies firstOp as long as the DestTy
2922 // matches MidTy.
2923 if (DstTy == MidTy)
2924 return firstOp;
2925 return 0;
2926 case 5:
2927 // No-op cast in first op implies secondOp as long as the SrcTy
2928 // is an integer.
2929 if (SrcTy->isIntegerTy())
2930 return secondOp;
2931 return 0;
2932 case 7: {
2933 // Disable inttoptr/ptrtoint optimization if enabled.
2934 if (DisableI2pP2iOpt)
2935 return 0;
2936
2937 // Cannot simplify if address spaces are different!
2938 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2939 return 0;
2940
2941 unsigned MidSize = MidTy->getScalarSizeInBits();
2942 // We can still fold this without knowing the actual sizes as long we
2943 // know that the intermediate pointer is the largest possible
2944 // pointer size.
2945 // FIXME: Is this always true?
2946 if (MidSize == 64)
2947 return Instruction::BitCast;
2948
2949 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2950 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2951 return 0;
2952 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2953 if (MidSize >= PtrSize)
2954 return Instruction::BitCast;
2955 return 0;
2956 }
2957 case 8: {
2958 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2959 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2960 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2961 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2962 unsigned DstSize = DstTy->getScalarSizeInBits();
2963 if (SrcTy == DstTy)
2964 return Instruction::BitCast;
2965 if (SrcSize < DstSize)
2966 return firstOp;
2967 if (SrcSize > DstSize)
2968 return secondOp;
2969 return 0;
2970 }
2971 case 9:
2972 // zext, sext -> zext, because sext can't sign extend after zext
2973 return Instruction::ZExt;
2974 case 11: {
2975 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2976 if (!MidIntPtrTy)
2977 return 0;
2978 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2979 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2980 unsigned DstSize = DstTy->getScalarSizeInBits();
2981 if (SrcSize <= PtrSize && SrcSize == DstSize)
2982 return Instruction::BitCast;
2983 return 0;
2984 }
2985 case 12:
2986 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2987 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2988 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2989 return Instruction::AddrSpaceCast;
2990 return Instruction::BitCast;
2991 case 13:
2992 // FIXME: this state can be merged with (1), but the following assert
2993 // is useful to check the correcteness of the sequence due to semantic
2994 // change of bitcast.
2995 assert(
2996 SrcTy->isPtrOrPtrVectorTy() &&
2997 MidTy->isPtrOrPtrVectorTy() &&
2998 DstTy->isPtrOrPtrVectorTy() &&
2999 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3000 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3001 "Illegal addrspacecast, bitcast sequence!");
3002 // Allowed, use first cast's opcode
3003 return firstOp;
3004 case 14:
3005 // bitcast, addrspacecast -> addrspacecast
3006 return Instruction::AddrSpaceCast;
3007 case 15:
3008 // FIXME: this state can be merged with (1), but the following assert
3009 // is useful to check the correcteness of the sequence due to semantic
3010 // change of bitcast.
3011 assert(
3012 SrcTy->isIntOrIntVectorTy() &&
3013 MidTy->isPtrOrPtrVectorTy() &&
3014 DstTy->isPtrOrPtrVectorTy() &&
3015 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3016 "Illegal inttoptr, bitcast sequence!");
3017 // Allowed, use first cast's opcode
3018 return firstOp;
3019 case 16:
3020 // FIXME: this state can be merged with (2), but the following assert
3021 // is useful to check the correcteness of the sequence due to semantic
3022 // change of bitcast.
3023 assert(
3024 SrcTy->isPtrOrPtrVectorTy() &&
3025 MidTy->isPtrOrPtrVectorTy() &&
3026 DstTy->isIntOrIntVectorTy() &&
3027 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3028 "Illegal bitcast, ptrtoint sequence!");
3029 // Allowed, use second cast's opcode
3030 return secondOp;
3031 case 17:
3032 // (sitofp (zext x)) -> (uitofp x)
3033 return Instruction::UIToFP;
3034 case 99:
3035 // Cast combination can't happen (error in input). This is for all cases
3036 // where the MidTy is not the same for the two cast instructions.
3037 llvm_unreachable("Invalid Cast Combination");
3038 default:
3039 llvm_unreachable("Error in CastResults table!!!");
3040 }
3041}
3042
3044 const Twine &Name, InsertPosition InsertBefore) {
3045 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3046 // Construct and return the appropriate CastInst subclass
3047 switch (op) {
3048 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3049 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3050 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3051 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3052 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3053 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3054 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3055 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3056 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3057 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3058 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3059 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3060 case BitCast:
3061 return new BitCastInst(S, Ty, Name, InsertBefore);
3062 case AddrSpaceCast:
3063 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3064 default:
3065 llvm_unreachable("Invalid opcode provided");
3066 }
3067}
3068
3070 InsertPosition InsertBefore) {
3071 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3072 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3073 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3074}
3075
3077 InsertPosition InsertBefore) {
3078 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3079 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3080 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3081}
3082
3084 InsertPosition InsertBefore) {
3085 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3086 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3087 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3088}
3089
3090/// Create a BitCast or a PtrToInt cast instruction
3092 InsertPosition InsertBefore) {
3093 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3094 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3095 "Invalid cast");
3096 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3097 assert((!Ty->isVectorTy() ||
3098 cast<VectorType>(Ty)->getElementCount() ==
3099 cast<VectorType>(S->getType())->getElementCount()) &&
3100 "Invalid cast");
3101
3102 if (Ty->isIntOrIntVectorTy())
3103 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3104
3105 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3106}
3107
3109 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3110 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3111 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3112
3113 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3114 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3115
3116 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3117}
3118
3120 const Twine &Name,
3121 InsertPosition InsertBefore) {
3122 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3123 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3124 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3125 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3126
3127 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3128}
3129
3131 const Twine &Name,
3132 InsertPosition InsertBefore) {
3133 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3134 "Invalid integer cast");
3135 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3136 unsigned DstBits = Ty->getScalarSizeInBits();
3137 Instruction::CastOps opcode =
3138 (SrcBits == DstBits ? Instruction::BitCast :
3139 (SrcBits > DstBits ? Instruction::Trunc :
3140 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3141 return Create(opcode, C, Ty, Name, InsertBefore);
3142}
3143
3145 InsertPosition InsertBefore) {
3146 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3147 "Invalid cast");
3148 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3149 unsigned DstBits = Ty->getScalarSizeInBits();
3150 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3151 Instruction::CastOps opcode =
3152 (SrcBits == DstBits ? Instruction::BitCast :
3153 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3154 return Create(opcode, C, Ty, Name, InsertBefore);
3155}
3156
3157bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3158 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3159 return false;
3160
3161 if (SrcTy == DestTy)
3162 return true;
3163
3164 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3165 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3166 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3167 // An element by element cast. Valid if casting the elements is valid.
3168 SrcTy = SrcVecTy->getElementType();
3169 DestTy = DestVecTy->getElementType();
3170 }
3171 }
3172 }
3173
3174 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3175 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3176 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3177 }
3178 }
3179
3180 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3181 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3182
3183 // Could still have vectors of pointers if the number of elements doesn't
3184 // match
3185 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3186 return false;
3187
3188 if (SrcBits != DestBits)
3189 return false;
3190
3191 return true;
3192}
3193
3195 const DataLayout &DL) {
3196 // ptrtoint and inttoptr are not allowed on non-integral pointers
3197 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3198 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3199 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3200 !DL.isNonIntegralPointerType(PtrTy));
3201 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3202 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3203 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3204 !DL.isNonIntegralPointerType(PtrTy));
3205
3206 return isBitCastable(SrcTy, DestTy);
3207}
3208
3209// Provide a way to get a "cast" where the cast opcode is inferred from the
3210// types and size of the operand. This, basically, is a parallel of the
3211// logic in the castIsValid function below. This axiom should hold:
3212// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3213// should not assert in castIsValid. In other words, this produces a "correct"
3214// casting opcode for the arguments passed to it.
3217 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3218 Type *SrcTy = Src->getType();
3219
3220 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3221 "Only first class types are castable!");
3222
3223 if (SrcTy == DestTy)
3224 return BitCast;
3225
3226 // FIXME: Check address space sizes here
3227 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3228 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3229 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3230 // An element by element cast. Find the appropriate opcode based on the
3231 // element types.
3232 SrcTy = SrcVecTy->getElementType();
3233 DestTy = DestVecTy->getElementType();
3234 }
3235
3236 // Get the bit sizes, we'll need these
3237 // FIXME: This doesn't work for scalable vector types with different element
3238 // counts that don't call getElementType above.
3239 unsigned SrcBits =
3240 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3241 unsigned DestBits =
3242 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3243
3244 // Run through the possibilities ...
3245 if (DestTy->isIntegerTy()) { // Casting to integral
3246 if (SrcTy->isIntegerTy()) { // Casting from integral
3247 if (DestBits < SrcBits)
3248 return Trunc; // int -> smaller int
3249 else if (DestBits > SrcBits) { // its an extension
3250 if (SrcIsSigned)
3251 return SExt; // signed -> SEXT
3252 else
3253 return ZExt; // unsigned -> ZEXT
3254 } else {
3255 return BitCast; // Same size, No-op cast
3256 }
3257 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3258 if (DestIsSigned)
3259 return FPToSI; // FP -> sint
3260 else
3261 return FPToUI; // FP -> uint
3262 } else if (SrcTy->isVectorTy()) {
3263 assert(DestBits == SrcBits &&
3264 "Casting vector to integer of different width");
3265 return BitCast; // Same size, no-op cast
3266 } else {
3267 assert(SrcTy->isPointerTy() &&
3268 "Casting from a value that is not first-class type");
3269 return PtrToInt; // ptr -> int
3270 }
3271 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3272 if (SrcTy->isIntegerTy()) { // Casting from integral
3273 if (SrcIsSigned)
3274 return SIToFP; // sint -> FP
3275 else
3276 return UIToFP; // uint -> FP
3277 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3278 if (DestBits < SrcBits) {
3279 return FPTrunc; // FP -> smaller FP
3280 } else if (DestBits > SrcBits) {
3281 return FPExt; // FP -> larger FP
3282 } else {
3283 return BitCast; // same size, no-op cast
3284 }
3285 } else if (SrcTy->isVectorTy()) {
3286 assert(DestBits == SrcBits &&
3287 "Casting vector to floating point of different width");
3288 return BitCast; // same size, no-op cast
3289 }
3290 llvm_unreachable("Casting pointer or non-first class to float");
3291 } else if (DestTy->isVectorTy()) {
3292 assert(DestBits == SrcBits &&
3293 "Illegal cast to vector (wrong type or size)");
3294 return BitCast;
3295 } else if (DestTy->isPointerTy()) {
3296 if (SrcTy->isPointerTy()) {
3297 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3298 return AddrSpaceCast;
3299 return BitCast; // ptr -> ptr
3300 } else if (SrcTy->isIntegerTy()) {
3301 return IntToPtr; // int -> ptr
3302 }
3303 llvm_unreachable("Casting pointer to other than pointer or int");
3304 }
3305 llvm_unreachable("Casting to type that is not first-class");
3306}
3307
3308//===----------------------------------------------------------------------===//
3309// CastInst SubClass Constructors
3310//===----------------------------------------------------------------------===//
3311
3312/// Check that the construction parameters for a CastInst are correct. This
3313/// could be broken out into the separate constructors but it is useful to have
3314/// it in one place and to eliminate the redundant code for getting the sizes
3315/// of the types involved.
3316bool
3318 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3319 SrcTy->isAggregateType() || DstTy->isAggregateType())
3320 return false;
3321
3322 // Get the size of the types in bits, and whether we are dealing
3323 // with vector types, we'll need this later.
3324 bool SrcIsVec = isa<VectorType>(SrcTy);
3325 bool DstIsVec = isa<VectorType>(DstTy);
3326 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3327 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3328
3329 // If these are vector types, get the lengths of the vectors (using zero for
3330 // scalar types means that checking that vector lengths match also checks that
3331 // scalars are not being converted to vectors or vectors to scalars).
3332 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3334 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3336
3337 // Switch on the opcode provided
3338 switch (op) {
3339 default: return false; // This is an input error
3340 case Instruction::Trunc:
3341 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3342 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3343 case Instruction::ZExt:
3344 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3345 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3346 case Instruction::SExt:
3347 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3348 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3349 case Instruction::FPTrunc:
3350 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3351 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3352 case Instruction::FPExt:
3353 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3354 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3355 case Instruction::UIToFP:
3356 case Instruction::SIToFP:
3357 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3358 SrcEC == DstEC;
3359 case Instruction::FPToUI:
3360 case Instruction::FPToSI:
3361 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3362 SrcEC == DstEC;
3363 case Instruction::PtrToAddr:
3364 case Instruction::PtrToInt:
3365 if (SrcEC != DstEC)
3366 return false;
3367 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3368 case Instruction::IntToPtr:
3369 if (SrcEC != DstEC)
3370 return false;
3371 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3372 case Instruction::BitCast: {
3373 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3374 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3375
3376 // BitCast implies a no-op cast of type only. No bits change.
3377 // However, you can't cast pointers to anything but pointers.
3378 if (!SrcPtrTy != !DstPtrTy)
3379 return false;
3380
3381 // For non-pointer cases, the cast is okay if the source and destination bit
3382 // widths are identical.
3383 if (!SrcPtrTy)
3384 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3385
3386 // If both are pointers then the address spaces must match.
3387 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3388 return false;
3389
3390 // A vector of pointers must have the same number of elements.
3391 if (SrcIsVec && DstIsVec)
3392 return SrcEC == DstEC;
3393 if (SrcIsVec)
3394 return SrcEC == ElementCount::getFixed(1);
3395 if (DstIsVec)
3396 return DstEC == ElementCount::getFixed(1);
3397
3398 return true;
3399 }
3400 case Instruction::AddrSpaceCast: {
3401 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3402 if (!SrcPtrTy)
3403 return false;
3404
3405 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3406 if (!DstPtrTy)
3407 return false;
3408
3409 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3410 return false;
3411
3412 return SrcEC == DstEC;
3413 }
3414 }
3415}
3416
3418 InsertPosition InsertBefore)
3419 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3420 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3421}
3422
3423ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3424 InsertPosition InsertBefore)
3425 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3426 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3427}
3428
3429SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3430 InsertPosition InsertBefore)
3431 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3432 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3433}
3434
3436 InsertPosition InsertBefore)
3437 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3438 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3439}
3440
3442 InsertPosition InsertBefore)
3443 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3444 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3445}
3446
3448 InsertPosition InsertBefore)
3449 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3450 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3451}
3452
3454 InsertPosition InsertBefore)
3455 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3456 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3457}
3458
3460 InsertPosition InsertBefore)
3461 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3462 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3463}
3464
3466 InsertPosition InsertBefore)
3467 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3468 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3469}
3470
3472 InsertPosition InsertBefore)
3473 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3474 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3475}
3476
3478 InsertPosition InsertBefore)
3479 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3480 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3481}
3482
3484 InsertPosition InsertBefore)
3485 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3486 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3487}
3488
3490 InsertPosition InsertBefore)
3491 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3492 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3493}
3494
3496 InsertPosition InsertBefore)
3497 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3498 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3499}
3500
3501//===----------------------------------------------------------------------===//
3502// CmpInst Classes
3503//===----------------------------------------------------------------------===//
3504
3506 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3507 Instruction *FlagsSource)
3508 : Instruction(ty, op, AllocMarker, InsertBefore) {
3509 Op<0>() = LHS;
3510 Op<1>() = RHS;
3511 setPredicate(predicate);
3512 setName(Name);
3513 if (FlagsSource)
3514 copyIRFlags(FlagsSource);
3515}
3516
3518 const Twine &Name, InsertPosition InsertBefore) {
3519 if (Op == Instruction::ICmp) {
3520 if (InsertBefore.isValid())
3521 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3522 S1, S2, Name);
3523 else
3524 return new ICmpInst(CmpInst::Predicate(predicate),
3525 S1, S2, Name);
3526 }
3527
3528 if (InsertBefore.isValid())
3529 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3530 S1, S2, Name);
3531 else
3532 return new FCmpInst(CmpInst::Predicate(predicate),
3533 S1, S2, Name);
3534}
3535
3537 Value *S2,
3538 const Instruction *FlagsSource,
3539 const Twine &Name,
3540 InsertPosition InsertBefore) {
3541 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3542 Inst->copyIRFlags(FlagsSource);
3543 return Inst;
3544}
3545
3547 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3548 IC->swapOperands();
3549 else
3550 cast<FCmpInst>(this)->swapOperands();
3551}
3552
3554 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3555 return IC->isCommutative();
3556 return cast<FCmpInst>(this)->isCommutative();
3557}
3558
3561 return ICmpInst::isEquality(P);
3563 return FCmpInst::isEquality(P);
3564 llvm_unreachable("Unsupported predicate kind");
3565}
3566
3567// Returns true if either operand of CmpInst is a provably non-zero
3568// floating-point constant.
3569static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3570 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3571 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3572 if (auto *Const = LHS ? LHS : RHS) {
3573 using namespace llvm::PatternMatch;
3574 return match(Const, m_NonZeroNotDenormalFP());
3575 }
3576 return false;
3577}
3578
3579// Floating-point equality is not an equivalence when comparing +0.0 with
3580// -0.0, when comparing NaN with another value, or when flushing
3581// denormals-to-zero.
3582bool CmpInst::isEquivalence(bool Invert) const {
3583 switch (Invert ? getInversePredicate() : getPredicate()) {
3585 return true;
3587 if (!hasNoNaNs())
3588 return false;
3589 [[fallthrough]];
3591 return hasNonZeroFPOperands(this);
3592 default:
3593 return false;
3594 }
3595}
3596
3598 switch (pred) {
3599 default: llvm_unreachable("Unknown cmp predicate!");
3600 case ICMP_EQ: return ICMP_NE;
3601 case ICMP_NE: return ICMP_EQ;
3602 case ICMP_UGT: return ICMP_ULE;
3603 case ICMP_ULT: return ICMP_UGE;
3604 case ICMP_UGE: return ICMP_ULT;
3605 case ICMP_ULE: return ICMP_UGT;
3606 case ICMP_SGT: return ICMP_SLE;
3607 case ICMP_SLT: return ICMP_SGE;
3608 case ICMP_SGE: return ICMP_SLT;
3609 case ICMP_SLE: return ICMP_SGT;
3610
3611 case FCMP_OEQ: return FCMP_UNE;
3612 case FCMP_ONE: return FCMP_UEQ;
3613 case FCMP_OGT: return FCMP_ULE;
3614 case FCMP_OLT: return FCMP_UGE;
3615 case FCMP_OGE: return FCMP_ULT;
3616 case FCMP_OLE: return FCMP_UGT;
3617 case FCMP_UEQ: return FCMP_ONE;
3618 case FCMP_UNE: return FCMP_OEQ;
3619 case FCMP_UGT: return FCMP_OLE;
3620 case FCMP_ULT: return FCMP_OGE;
3621 case FCMP_UGE: return FCMP_OLT;
3622 case FCMP_ULE: return FCMP_OGT;
3623 case FCMP_ORD: return FCMP_UNO;
3624 case FCMP_UNO: return FCMP_ORD;
3625 case FCMP_TRUE: return FCMP_FALSE;
3626 case FCMP_FALSE: return FCMP_TRUE;
3627 }
3628}
3629
3631 switch (Pred) {
3632 default: return "unknown";
3633 case FCmpInst::FCMP_FALSE: return "false";
3634 case FCmpInst::FCMP_OEQ: return "oeq";
3635 case FCmpInst::FCMP_OGT: return "ogt";
3636 case FCmpInst::FCMP_OGE: return "oge";
3637 case FCmpInst::FCMP_OLT: return "olt";
3638 case FCmpInst::FCMP_OLE: return "ole";
3639 case FCmpInst::FCMP_ONE: return "one";
3640 case FCmpInst::FCMP_ORD: return "ord";
3641 case FCmpInst::FCMP_UNO: return "uno";
3642 case FCmpInst::FCMP_UEQ: return "ueq";
3643 case FCmpInst::FCMP_UGT: return "ugt";
3644 case FCmpInst::FCMP_UGE: return "uge";
3645 case FCmpInst::FCMP_ULT: return "ult";
3646 case FCmpInst::FCMP_ULE: return "ule";
3647 case FCmpInst::FCMP_UNE: return "une";
3648 case FCmpInst::FCMP_TRUE: return "true";
3649 case ICmpInst::ICMP_EQ: return "eq";
3650 case ICmpInst::ICMP_NE: return "ne";
3651 case ICmpInst::ICMP_SGT: return "sgt";
3652 case ICmpInst::ICMP_SGE: return "sge";
3653 case ICmpInst::ICMP_SLT: return "slt";
3654 case ICmpInst::ICMP_SLE: return "sle";
3655 case ICmpInst::ICMP_UGT: return "ugt";
3656 case ICmpInst::ICMP_UGE: return "uge";
3657 case ICmpInst::ICMP_ULT: return "ult";
3658 case ICmpInst::ICMP_ULE: return "ule";
3659 }
3660}
3661
3663 OS << CmpInst::getPredicateName(Pred);
3664 return OS;
3665}
3666
3668 switch (pred) {
3669 default: llvm_unreachable("Unknown icmp predicate!");
3670 case ICMP_EQ: case ICMP_NE:
3671 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3672 return pred;
3673 case ICMP_UGT: return ICMP_SGT;
3674 case ICMP_ULT: return ICMP_SLT;
3675 case ICMP_UGE: return ICMP_SGE;
3676 case ICMP_ULE: return ICMP_SLE;
3677 }
3678}
3679
3681 switch (pred) {
3682 default: llvm_unreachable("Unknown icmp predicate!");
3683 case ICMP_EQ: case ICMP_NE:
3684 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3685 return pred;
3686 case ICMP_SGT: return ICMP_UGT;
3687 case ICMP_SLT: return ICMP_ULT;
3688 case ICMP_SGE: return ICMP_UGE;
3689 case ICMP_SLE: return ICMP_ULE;
3690 }
3691}
3692
3694 switch (pred) {
3695 default: llvm_unreachable("Unknown cmp predicate!");
3696 case ICMP_EQ: case ICMP_NE:
3697 return pred;
3698 case ICMP_SGT: return ICMP_SLT;
3699 case ICMP_SLT: return ICMP_SGT;
3700 case ICMP_SGE: return ICMP_SLE;
3701 case ICMP_SLE: return ICMP_SGE;
3702 case ICMP_UGT: return ICMP_ULT;
3703 case ICMP_ULT: return ICMP_UGT;
3704 case ICMP_UGE: return ICMP_ULE;
3705 case ICMP_ULE: return ICMP_UGE;
3706
3707 case FCMP_FALSE: case FCMP_TRUE:
3708 case FCMP_OEQ: case FCMP_ONE:
3709 case FCMP_UEQ: case FCMP_UNE:
3710 case FCMP_ORD: case FCMP_UNO:
3711 return pred;
3712 case FCMP_OGT: return FCMP_OLT;
3713 case FCMP_OLT: return FCMP_OGT;
3714 case FCMP_OGE: return FCMP_OLE;
3715 case FCMP_OLE: return FCMP_OGE;
3716 case FCMP_UGT: return FCMP_ULT;
3717 case FCMP_ULT: return FCMP_UGT;
3718 case FCMP_UGE: return FCMP_ULE;
3719 case FCMP_ULE: return FCMP_UGE;
3720 }
3721}
3722
3724 switch (pred) {
3725 case ICMP_SGE:
3726 case ICMP_SLE:
3727 case ICMP_UGE:
3728 case ICMP_ULE:
3729 case FCMP_OGE:
3730 case FCMP_OLE:
3731 case FCMP_UGE:
3732 case FCMP_ULE:
3733 return true;
3734 default:
3735 return false;
3736 }
3737}
3738
3740 switch (pred) {
3741 case ICMP_SGT:
3742 case ICMP_SLT:
3743 case ICMP_UGT:
3744 case ICMP_ULT:
3745 case FCMP_OGT:
3746 case FCMP_OLT:
3747 case FCMP_UGT:
3748 case FCMP_ULT:
3749 return true;
3750 default:
3751 return false;
3752 }
3753}
3754
3756 switch (pred) {
3757 case ICMP_SGE:
3758 return ICMP_SGT;
3759 case ICMP_SLE:
3760 return ICMP_SLT;
3761 case ICMP_UGE:
3762 return ICMP_UGT;
3763 case ICMP_ULE:
3764 return ICMP_ULT;
3765 case FCMP_OGE:
3766 return FCMP_OGT;
3767 case FCMP_OLE:
3768 return FCMP_OLT;
3769 case FCMP_UGE:
3770 return FCMP_UGT;
3771 case FCMP_ULE:
3772 return FCMP_ULT;
3773 default:
3774 return pred;
3775 }
3776}
3777
3779 switch (pred) {
3780 case ICMP_SGT:
3781 return ICMP_SGE;
3782 case ICMP_SLT:
3783 return ICMP_SLE;
3784 case ICMP_UGT:
3785 return ICMP_UGE;
3786 case ICMP_ULT:
3787 return ICMP_ULE;
3788 case FCMP_OGT:
3789 return FCMP_OGE;
3790 case FCMP_OLT:
3791 return FCMP_OLE;
3792 case FCMP_UGT:
3793 return FCMP_UGE;
3794 case FCMP_ULT:
3795 return FCMP_ULE;
3796 default:
3797 return pred;
3798 }
3799}
3800
3802 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3803
3804 if (isStrictPredicate(pred))
3805 return getNonStrictPredicate(pred);
3806 if (isNonStrictPredicate(pred))
3807 return getStrictPredicate(pred);
3808
3809 llvm_unreachable("Unknown predicate!");
3810}
3811
3813 switch (predicate) {
3814 default: return false;
3816 case ICmpInst::ICMP_UGE: return true;
3817 }
3818}
3819
3821 switch (predicate) {
3822 default: return false;
3824 case ICmpInst::ICMP_SGE: return true;
3825 }
3826}
3827
3828bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3829 ICmpInst::Predicate Pred) {
3830 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3831 switch (Pred) {
3833 return LHS.eq(RHS);
3835 return LHS.ne(RHS);
3837 return LHS.ugt(RHS);
3839 return LHS.uge(RHS);
3841 return LHS.ult(RHS);
3843 return LHS.ule(RHS);
3845 return LHS.sgt(RHS);
3847 return LHS.sge(RHS);
3849 return LHS.slt(RHS);
3851 return LHS.sle(RHS);
3852 default:
3853 llvm_unreachable("Unexpected non-integer predicate.");
3854 };
3855}
3856
3857bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3858 FCmpInst::Predicate Pred) {
3859 APFloat::cmpResult R = LHS.compare(RHS);
3860 switch (Pred) {
3861 default:
3862 llvm_unreachable("Invalid FCmp Predicate");
3864 return false;
3866 return true;
3867 case FCmpInst::FCMP_UNO:
3868 return R == APFloat::cmpUnordered;
3869 case FCmpInst::FCMP_ORD:
3870 return R != APFloat::cmpUnordered;
3871 case FCmpInst::FCMP_UEQ:
3872 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3873 case FCmpInst::FCMP_OEQ:
3874 return R == APFloat::cmpEqual;
3875 case FCmpInst::FCMP_UNE:
3876 return R != APFloat::cmpEqual;
3877 case FCmpInst::FCMP_ONE:
3879 case FCmpInst::FCMP_ULT:
3880 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3881 case FCmpInst::FCMP_OLT:
3882 return R == APFloat::cmpLessThan;
3883 case FCmpInst::FCMP_UGT:
3885 case FCmpInst::FCMP_OGT:
3886 return R == APFloat::cmpGreaterThan;
3887 case FCmpInst::FCMP_ULE:
3888 return R != APFloat::cmpGreaterThan;
3889 case FCmpInst::FCMP_OLE:
3890 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3891 case FCmpInst::FCMP_UGE:
3892 return R != APFloat::cmpLessThan;
3893 case FCmpInst::FCMP_OGE:
3894 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3895 }
3896}
3897
3898std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3899 const KnownBits &RHS,
3900 ICmpInst::Predicate Pred) {
3901 switch (Pred) {
3902 case ICmpInst::ICMP_EQ:
3903 return KnownBits::eq(LHS, RHS);
3904 case ICmpInst::ICMP_NE:
3905 return KnownBits::ne(LHS, RHS);
3906 case ICmpInst::ICMP_UGE:
3907 return KnownBits::uge(LHS, RHS);
3908 case ICmpInst::ICMP_UGT:
3909 return KnownBits::ugt(LHS, RHS);
3910 case ICmpInst::ICMP_ULE:
3911 return KnownBits::ule(LHS, RHS);
3912 case ICmpInst::ICMP_ULT:
3913 return KnownBits::ult(LHS, RHS);
3914 case ICmpInst::ICMP_SGE:
3915 return KnownBits::sge(LHS, RHS);
3916 case ICmpInst::ICMP_SGT:
3917 return KnownBits::sgt(LHS, RHS);
3918 case ICmpInst::ICMP_SLE:
3919 return KnownBits::sle(LHS, RHS);
3920 case ICmpInst::ICMP_SLT:
3921 return KnownBits::slt(LHS, RHS);
3922 default:
3923 llvm_unreachable("Unexpected non-integer predicate.");
3924 }
3925}
3926
3928 if (CmpInst::isEquality(pred))
3929 return pred;
3930 if (isSigned(pred))
3931 return getUnsignedPredicate(pred);
3932 if (isUnsigned(pred))
3933 return getSignedPredicate(pred);
3934
3935 llvm_unreachable("Unknown predicate!");
3936}
3937
3939 switch (predicate) {
3940 default: return false;
3943 case FCmpInst::FCMP_ORD: return true;
3944 }
3945}
3946
3948 switch (predicate) {
3949 default: return false;
3952 case FCmpInst::FCMP_UNO: return true;
3953 }
3954}
3955
3957 switch(predicate) {
3958 default: return false;
3959 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3960 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3961 }
3962}
3963
3965 switch(predicate) {
3966 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3967 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3968 default: return false;
3969 }
3970}
3971
3973 // If the predicates match, then we know the first condition implies the
3974 // second is true.
3975 if (CmpPredicate::getMatching(Pred1, Pred2))
3976 return true;
3977
3978 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3980 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3982
3983 switch (Pred1) {
3984 default:
3985 break;
3986 case CmpInst::ICMP_EQ:
3987 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3988 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3989 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3990 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3991 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3992 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3994 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3995 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3996 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3997 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3998 }
3999 return false;
4000}
4001
4003 CmpPredicate Pred2) {
4004 return isImpliedTrueByMatchingCmp(Pred1,
4006}
4007
4009 CmpPredicate Pred2) {
4010 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4011 return true;
4012 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4013 return false;
4014 return std::nullopt;
4015}
4016
4017//===----------------------------------------------------------------------===//
4018// CmpPredicate Implementation
4019//===----------------------------------------------------------------------===//
4020
4021std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4022 CmpPredicate B) {
4023 if (A.Pred == B.Pred)
4024 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4026 return {};
4027 if (A.HasSameSign &&
4029 return B.Pred;
4030 if (B.HasSameSign &&
4032 return A.Pred;
4033 return {};
4034}
4035
4039
4041 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4042 return ICI->getCmpPredicate();
4043 return Cmp->getPredicate();
4044}
4045
4049
4051 return getSwapped(get(Cmp));
4052}
4053
4054//===----------------------------------------------------------------------===//
4055// SwitchInst Implementation
4056//===----------------------------------------------------------------------===//
4057
4058void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4059 assert(Value && Default && NumReserved);
4060 ReservedSpace = NumReserved;
4062 allocHungoffUses(ReservedSpace);
4063
4064 Op<0>() = Value;
4065 Op<1>() = Default;
4066}
4067
4068/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4069/// switch on and a default destination. The number of additional cases can
4070/// be specified here to make memory allocation more efficient. This
4071/// constructor can also autoinsert before another instruction.
4072SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4073 InsertPosition InsertBefore)
4074 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4075 AllocMarker, InsertBefore) {
4076 init(Value, Default, 2+NumCases*2);
4077}
4078
4079SwitchInst::SwitchInst(const SwitchInst &SI)
4080 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4081 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4082 setNumHungOffUseOperands(SI.getNumOperands());
4083 Use *OL = getOperandList();
4084 const Use *InOL = SI.getOperandList();
4085 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4086 OL[i] = InOL[i];
4087 OL[i+1] = InOL[i+1];
4088 }
4089 SubclassOptionalData = SI.SubclassOptionalData;
4090}
4091
4092/// addCase - Add an entry to the switch instruction...
4093///
4095 unsigned NewCaseIdx = getNumCases();
4096 unsigned OpNo = getNumOperands();
4097 if (OpNo+2 > ReservedSpace)
4098 growOperands(); // Get more space!
4099 // Initialize some new operands.
4100 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4102 CaseHandle Case(this, NewCaseIdx);
4103 Case.setValue(OnVal);
4104 Case.setSuccessor(Dest);
4105}
4106
4107/// removeCase - This method removes the specified case and its successor
4108/// from the switch instruction.
4110 unsigned idx = I->getCaseIndex();
4111
4112 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4113
4114 unsigned NumOps = getNumOperands();
4115 Use *OL = getOperandList();
4116
4117 // Overwrite this case with the end of the list.
4118 if (2 + (idx + 1) * 2 != NumOps) {
4119 OL[2 + idx * 2] = OL[NumOps - 2];
4120 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4121 }
4122
4123 // Nuke the last value.
4124 OL[NumOps-2].set(nullptr);
4125 OL[NumOps-2+1].set(nullptr);
4127
4128 return CaseIt(this, idx);
4129}
4130
4131/// growOperands - grow operands - This grows the operand list in response
4132/// to a push_back style of operation. This grows the number of ops by 3 times.
4133///
4134void SwitchInst::growOperands() {
4135 unsigned e = getNumOperands();
4136 unsigned NumOps = e*3;
4137
4138 ReservedSpace = NumOps;
4139 growHungoffUses(ReservedSpace);
4140}
4141
4143 assert(Changed && "called only if metadata has changed");
4144
4145 if (!Weights)
4146 return nullptr;
4147
4148 assert(SI.getNumSuccessors() == Weights->size() &&
4149 "num of prof branch_weights must accord with num of successors");
4150
4151 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4152
4153 if (AllZeroes || Weights->size() < 2)
4154 return nullptr;
4155
4156 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4157}
4158
4160 MDNode *ProfileData = getBranchWeightMDNode(SI);
4161 if (!ProfileData)
4162 return;
4163
4164 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4165 llvm_unreachable("number of prof branch_weights metadata operands does "
4166 "not correspond to number of succesors");
4167 }
4168
4170 if (!extractBranchWeights(ProfileData, Weights))
4171 return;
4172 this->Weights = std::move(Weights);
4173}
4174
4177 if (Weights) {
4178 assert(SI.getNumSuccessors() == Weights->size() &&
4179 "num of prof branch_weights must accord with num of successors");
4180 Changed = true;
4181 // Copy the last case to the place of the removed one and shrink.
4182 // This is tightly coupled with the way SwitchInst::removeCase() removes
4183 // the cases in SwitchInst::removeCase(CaseIt).
4184 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4185 Weights->pop_back();
4186 }
4187 return SI.removeCase(I);
4188}
4189
4191 ConstantInt *OnVal, BasicBlock *Dest,
4193 SI.addCase(OnVal, Dest);
4194
4195 if (!Weights && W && *W) {
4196 Changed = true;
4197 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4198 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4199 } else if (Weights) {
4200 Changed = true;
4201 Weights->push_back(W.value_or(0));
4202 }
4203 if (Weights)
4204 assert(SI.getNumSuccessors() == Weights->size() &&
4205 "num of prof branch_weights must accord with num of successors");
4206}
4207
4210 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4211 Changed = false;
4212 if (Weights)
4213 Weights->resize(0);
4214 return SI.eraseFromParent();
4215}
4216
4219 if (!Weights)
4220 return std::nullopt;
4221 return (*Weights)[idx];
4222}
4223
4226 if (!W)
4227 return;
4228
4229 if (!Weights && *W)
4230 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4231
4232 if (Weights) {
4233 auto &OldW = (*Weights)[idx];
4234 if (*W != OldW) {
4235 Changed = true;
4236 OldW = *W;
4237 }
4238 }
4239}
4240
4243 unsigned idx) {
4244 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4245 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4246 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4247 ->getValue()
4248 .getZExtValue();
4249
4250 return std::nullopt;
4251}
4252
4253//===----------------------------------------------------------------------===//
4254// IndirectBrInst Implementation
4255//===----------------------------------------------------------------------===//
4256
4257void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4258 assert(Address && Address->getType()->isPointerTy() &&
4259 "Address of indirectbr must be a pointer");
4260 ReservedSpace = 1+NumDests;
4262 allocHungoffUses(ReservedSpace);
4263
4264 Op<0>() = Address;
4265}
4266
4267
4268/// growOperands - grow operands - This grows the operand list in response
4269/// to a push_back style of operation. This grows the number of ops by 2 times.
4270///
4271void IndirectBrInst::growOperands() {
4272 unsigned e = getNumOperands();
4273 unsigned NumOps = e*2;
4274
4275 ReservedSpace = NumOps;
4276 growHungoffUses(ReservedSpace);
4277}
4278
4279IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4280 InsertPosition InsertBefore)
4281 : Instruction(Type::getVoidTy(Address->getContext()),
4282 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4283 init(Address, NumCases);
4284}
4285
4286IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4287 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4288 AllocMarker) {
4289 NumUserOperands = IBI.NumUserOperands;
4290 allocHungoffUses(IBI.getNumOperands());
4291 Use *OL = getOperandList();
4292 const Use *InOL = IBI.getOperandList();
4293 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4294 OL[i] = InOL[i];
4295 SubclassOptionalData = IBI.SubclassOptionalData;
4296}
4297
4298/// addDestination - Add a destination.
4299///
4301 unsigned OpNo = getNumOperands();
4302 if (OpNo+1 > ReservedSpace)
4303 growOperands(); // Get more space!
4304 // Initialize some new operands.
4305 assert(OpNo < ReservedSpace && "Growing didn't work!");
4307 getOperandList()[OpNo] = DestBB;
4308}
4309
4310/// removeDestination - This method removes the specified successor from the
4311/// indirectbr instruction.
4313 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4314
4315 unsigned NumOps = getNumOperands();
4316 Use *OL = getOperandList();
4317
4318 // Replace this value with the last one.
4319 OL[idx+1] = OL[NumOps-1];
4320
4321 // Nuke the last value.
4322 OL[NumOps-1].set(nullptr);
4324}
4325
4326//===----------------------------------------------------------------------===//
4327// FreezeInst Implementation
4328//===----------------------------------------------------------------------===//
4329
4330FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4331 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4332 setName(Name);
4333}
4334
4335//===----------------------------------------------------------------------===//
4336// cloneImpl() implementations
4337//===----------------------------------------------------------------------===//
4338
4339// Define these methods here so vtables don't get emitted into every translation
4340// unit that uses these classes.
4341
4342GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4344 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4345}
4346
4350
4354
4356 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4357}
4358
4360 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4361}
4362
4363ExtractValueInst *ExtractValueInst::cloneImpl() const {
4364 return new ExtractValueInst(*this);
4365}
4366
4367InsertValueInst *InsertValueInst::cloneImpl() const {
4368 return new InsertValueInst(*this);
4369}
4370
4373 getOperand(0), getAlign());
4374 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4375 Result->setSwiftError(isSwiftError());
4376 return Result;
4377}
4378
4380 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4382}
4383
4388
4393 Result->setVolatile(isVolatile());
4394 Result->setWeak(isWeak());
4395 return Result;
4396}
4397
4399 AtomicRMWInst *Result =
4402 Result->setVolatile(isVolatile());
4403 return Result;
4404}
4405
4409
4411 return new TruncInst(getOperand(0), getType());
4412}
4413
4415 return new ZExtInst(getOperand(0), getType());
4416}
4417
4419 return new SExtInst(getOperand(0), getType());
4420}
4421
4423 return new FPTruncInst(getOperand(0), getType());
4424}
4425
4427 return new FPExtInst(getOperand(0), getType());
4428}
4429
4431 return new UIToFPInst(getOperand(0), getType());
4432}
4433
4435 return new SIToFPInst(getOperand(0), getType());
4436}
4437
4439 return new FPToUIInst(getOperand(0), getType());
4440}
4441
4443 return new FPToSIInst(getOperand(0), getType());
4444}
4445
4447 return new PtrToIntInst(getOperand(0), getType());
4448}
4449
4453
4455 return new IntToPtrInst(getOperand(0), getType());
4456}
4457
4459 return new BitCastInst(getOperand(0), getType());
4460}
4461
4465
4466CallInst *CallInst::cloneImpl() const {
4467 if (hasOperandBundles()) {
4471 return new (AllocMarker) CallInst(*this, AllocMarker);
4472 }
4474 return new (AllocMarker) CallInst(*this, AllocMarker);
4475}
4476
4477SelectInst *SelectInst::cloneImpl() const {
4479}
4480
4482 return new VAArgInst(getOperand(0), getType());
4483}
4484
4485ExtractElementInst *ExtractElementInst::cloneImpl() const {
4487}
4488
4489InsertElementInst *InsertElementInst::cloneImpl() const {
4491}
4492
4496
4497PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4498
4499LandingPadInst *LandingPadInst::cloneImpl() const {
4500 return new LandingPadInst(*this);
4501}
4502
4503ReturnInst *ReturnInst::cloneImpl() const {
4505 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4506}
4507
4508BranchInst *BranchInst::cloneImpl() const {
4510 return new (AllocMarker) BranchInst(*this, AllocMarker);
4511}
4512
4513SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4514
4515IndirectBrInst *IndirectBrInst::cloneImpl() const {
4516 return new IndirectBrInst(*this);
4517}
4518
4519InvokeInst *InvokeInst::cloneImpl() const {
4520 if (hasOperandBundles()) {
4524 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4525 }
4527 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4528}
4529
4530CallBrInst *CallBrInst::cloneImpl() const {
4531 if (hasOperandBundles()) {
4535 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4536 }
4538 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4539}
4540
4541ResumeInst *ResumeInst::cloneImpl() const {
4542 return new (AllocMarker) ResumeInst(*this);
4543}
4544
4545CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4547 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4548}
4549
4550CatchReturnInst *CatchReturnInst::cloneImpl() const {
4551 return new (AllocMarker) CatchReturnInst(*this);
4552}
4553
4554CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4555 return new CatchSwitchInst(*this);
4556}
4557
4558FuncletPadInst *FuncletPadInst::cloneImpl() const {
4560 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4561}
4562
4564 LLVMContext &Context = getContext();
4565 return new UnreachableInst(Context);
4566}
4567
4568bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4569 bool NoTrapAfterNoreturn) const {
4570 if (!TrapUnreachable)
4571 return false;
4572
4573 // We may be able to ignore unreachable behind a noreturn call.
4575 Call && Call->doesNotReturn()) {
4576 if (NoTrapAfterNoreturn)
4577 return false;
4578 // Do not emit an additional trap instruction.
4579 if (Call->isNonContinuableTrap())
4580 return false;
4581 }
4582
4583 if (getFunction()->hasFnAttribute(Attribute::Naked))
4584 return false;
4585
4586 return true;
4587}
4588
4590 return new FreezeInst(getOperand(0));
4591}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
static bool isSigned(unsigned int Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1330
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1639
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1598
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:200
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
iterator end() const
Definition ArrayRef.h:136
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
iterator begin() const
Definition ArrayRef.h:135
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:191
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:354
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:386
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:367
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:370
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:382
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:612
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:666
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:860
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:917
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:770
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:950
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:681
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:695
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:684
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:693
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:692
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:690
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:687
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:691
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:680
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:688
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:932
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:944
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:772
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:873
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:854
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:925
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:767
bool isStrictPredicate() const
Definition InstrTypes.h:845
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:895
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:778
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:938
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition MDBuilder.cpp:38
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1441
static MemoryEffectsBase readOnly()
Definition ModRef.h:125
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:221
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:215
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:141
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:234
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:224
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:218
static MemoryEffectsBase writeOnly()
Definition ModRef.h:130
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:158
static MemoryEffectsBase none()
Definition ModRef.h:120
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:245
StringRef getTag() const
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
BasicBlock ** block_iterator
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:281
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:250
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:279
const Use * getOperandList() const
Definition User.h:225
op_range operands()
Definition User.h:292
LLVM_ABI void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:50
op_iterator op_begin()
Definition User.h:284
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:265
Use & Op()
Definition User.h:196
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM_ABI void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition User.cpp:67
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:194
size_type size() const
Definition DenseSet.h:87
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:169
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:666
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1657
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:294
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:296
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:288
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1750
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1815
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:319
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2088
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:294
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:249
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66