LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
68 auto *C = dyn_cast<ConstantInt>(getArraySize());
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
110 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140
141 // Move everything after this operand down.
142 //
143 // FIXME: we could just swap with the end of the list, then erase. However,
144 // clients might not expect this to happen. The code as it is thrashes the
145 // use/def lists, which is kinda lame.
146 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
148
149 // Nuke the last value.
150 Op<-1>().set(nullptr);
152
153 // If the PHI node is dead, because it has zero entries, nuke it now.
154 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
155 // If anyone is using this PHI, make them use a dummy value instead...
158 }
159 return Removed;
160}
161
162void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
163 bool DeletePHIIfEmpty) {
164 SmallDenseSet<unsigned> RemoveIndices;
165 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
166 if (Predicate(Idx))
167 RemoveIndices.insert(Idx);
168
169 if (RemoveIndices.empty())
170 return;
171
172 // Remove operands.
173 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
174 return RemoveIndices.contains(U.getOperandNo());
175 });
176 for (Use &U : make_range(NewOpEnd, op_end()))
177 U.set(nullptr);
178
179 // Remove incoming blocks.
180 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
181 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
182 return RemoveIndices.contains(&BB - block_begin());
183 });
184
185 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
186
187 // If the PHI node is dead, because it has zero entries, nuke it now.
188 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
189 // If anyone is using this PHI, make them use a dummy value instead...
192 }
193}
194
195/// growOperands - grow operands - This grows the operand list in response
196/// to a push_back style of operation. This grows the number of ops by 1.5
197/// times.
198///
199void PHINode::growOperands() {
200 unsigned e = getNumOperands();
201 unsigned NumOps = e + e / 2;
202 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
203
204 ReservedSpace = NumOps;
205 growHungoffUses(ReservedSpace, /* IsPhi */ true);
206}
207
208/// hasConstantValue - If the specified PHI node always merges together the same
209/// value, return the value, otherwise return null.
211 // Exploit the fact that phi nodes always have at least one entry.
212 Value *ConstantValue = getIncomingValue(0);
213 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
214 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
215 if (ConstantValue != this)
216 return nullptr; // Incoming values not all the same.
217 // The case where the first value is this PHI.
218 ConstantValue = getIncomingValue(i);
219 }
220 if (ConstantValue == this)
221 return PoisonValue::get(getType());
222 return ConstantValue;
223}
224
225/// hasConstantOrUndefValue - Whether the specified PHI node always merges
226/// together the same value, assuming that undefs result in the same value as
227/// non-undefs.
228/// Unlike \ref hasConstantValue, this does not return a value because the
229/// unique non-undef incoming value need not dominate the PHI node.
231 Value *ConstantValue = nullptr;
232 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
234 if (Incoming != this && !isa<UndefValue>(Incoming)) {
235 if (ConstantValue && ConstantValue != Incoming)
236 return false;
237 ConstantValue = Incoming;
238 }
239 }
240 return true;
241}
242
243//===----------------------------------------------------------------------===//
244// LandingPadInst Implementation
245//===----------------------------------------------------------------------===//
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr,
249 InsertPosition InsertBefore)
250 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
251 init(NumReservedValues, NameStr);
252}
253
254LandingPadInst::LandingPadInst(const LandingPadInst &LP)
255 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
256 ReservedSpace(LP.getNumOperands()) {
259 Use *OL = getOperandList();
260 const Use *InOL = LP.getOperandList();
261 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
262 OL[I] = InOL[I];
263
264 setCleanup(LP.isCleanup());
265}
266
267LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
268 const Twine &NameStr,
269 InsertPosition InsertBefore) {
270 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
271}
272
273void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
274 ReservedSpace = NumReservedValues;
276 allocHungoffUses(ReservedSpace);
277 setName(NameStr);
278 setCleanup(false);
279}
280
281/// growOperands - grow operands - This grows the operand list in response to a
282/// push_back style of operation. This grows the number of ops by 2 times.
283void LandingPadInst::growOperands(unsigned Size) {
284 unsigned e = getNumOperands();
285 if (ReservedSpace >= e + Size) return;
286 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
287 growHungoffUses(ReservedSpace);
288}
289
291 unsigned OpNo = getNumOperands();
292 growOperands(1);
293 assert(OpNo < ReservedSpace && "Growing didn't work!");
295 getOperandList()[OpNo] = Val;
296}
297
298//===----------------------------------------------------------------------===//
299// CallBase Implementation
300//===----------------------------------------------------------------------===//
301
303 InsertPosition InsertPt) {
304 switch (CB->getOpcode()) {
305 case Instruction::Call:
306 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
307 case Instruction::Invoke:
308 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
309 case Instruction::CallBr:
310 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
311 default:
312 llvm_unreachable("Unknown CallBase sub-class!");
313 }
314}
315
317 InsertPosition InsertPt) {
319 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
320 auto ChildOB = CI->getOperandBundleAt(i);
321 if (ChildOB.getTagName() != OpB.getTag())
322 OpDefs.emplace_back(ChildOB);
323 }
324 OpDefs.emplace_back(OpB);
325 return CallBase::Create(CI, OpDefs, InsertPt);
326}
327
328Function *CallBase::getCaller() { return getParent()->getParent(); }
329
331 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
332 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
333}
334
336 const Value *V = getCalledOperand();
337 if (isa<Function>(V) || isa<Constant>(V))
338 return false;
339 return !isInlineAsm();
340}
341
342/// Tests if this call site must be tail call optimized. Only a CallInst can
343/// be tail call optimized.
345 if (auto *CI = dyn_cast<CallInst>(this))
346 return CI->isMustTailCall();
347 return false;
348}
349
350/// Tests if this call site is marked as a tail call.
352 if (auto *CI = dyn_cast<CallInst>(this))
353 return CI->isTailCall();
354 return false;
355}
356
358 if (auto *F = dyn_cast_or_null<Function>(getCalledOperand()))
359 return F->getIntrinsicID();
361}
362
365
366 if (const Function *F = getCalledFunction())
367 Mask |= F->getAttributes().getRetNoFPClass();
368 return Mask;
369}
370
373
374 if (const Function *F = getCalledFunction())
375 Mask |= F->getAttributes().getParamNoFPClass(i);
376 return Mask;
377}
378
379std::optional<ConstantRange> CallBase::getRange() const {
380 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
382 if (const Function *F = getCalledFunction())
383 FnAttr = F->getRetAttribute(Attribute::Range);
384
385 if (CallAttr.isValid() && FnAttr.isValid())
386 return CallAttr.getRange().intersectWith(FnAttr.getRange());
387 if (CallAttr.isValid())
388 return CallAttr.getRange();
389 if (FnAttr.isValid())
390 return FnAttr.getRange();
391 return std::nullopt;
392}
393
395 if (hasRetAttr(Attribute::NonNull))
396 return true;
397
398 if (getRetDereferenceableBytes() > 0 &&
400 return true;
401
402 return false;
403}
404
406 unsigned Index;
407
408 if (Attrs.hasAttrSomewhere(Kind, &Index))
410 if (const Function *F = getCalledFunction())
411 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
413
414 return nullptr;
415}
416
417/// Determine whether the argument or parameter has the given attribute.
418bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
419 assert(ArgNo < arg_size() && "Param index out of bounds!");
420
421 if (Attrs.hasParamAttr(ArgNo, Kind))
422 return true;
423
424 const Function *F = getCalledFunction();
425 if (!F)
426 return false;
427
428 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
429 return false;
430
431 // Take into account mod/ref by operand bundles.
432 switch (Kind) {
433 case Attribute::ReadNone:
435 case Attribute::ReadOnly:
437 case Attribute::WriteOnly:
438 return !hasReadingOperandBundles();
439 default:
440 return true;
441 }
442}
443
445 bool AllowUndefOrPoison) const {
447 "Argument must be a pointer");
448 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
449 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
450 return true;
451
452 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
454 getCaller(),
456 return true;
457
458 return false;
459}
460
461bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
462 if (auto *F = dyn_cast<Function>(getCalledOperand()))
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
469 if (auto *F = dyn_cast<Function>(getCalledOperand()))
470 return F->getAttributes().hasFnAttr(Kind);
471
472 return false;
473}
474
475template <typename AK>
476Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
477 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
478 // getMemoryEffects() correctly combines memory effects from the call-site,
479 // operand bundles and function.
480 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
481 }
482
483 if (auto *F = dyn_cast<Function>(getCalledOperand()))
484 return F->getAttributes().getFnAttr(Kind);
485
486 return Attribute();
487}
488
489template LLVM_ABI Attribute
490CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
491template LLVM_ABI Attribute
492CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
493
494template <typename AK>
495Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496 AK Kind) const {
498
499 if (auto *F = dyn_cast<Function>(V))
500 return F->getAttributes().getParamAttr(ArgNo, Kind);
501
502 return Attribute();
503}
504template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
505 unsigned ArgNo, Attribute::AttrKind Kind) const;
506template LLVM_ABI Attribute
507CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
508
511 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
513}
514
517 const unsigned BeginIndex) {
518 auto It = op_begin() + BeginIndex;
519 for (auto &B : Bundles)
520 It = std::copy(B.input_begin(), B.input_end(), It);
521
522 auto *ContextImpl = getContext().pImpl;
523 auto BI = Bundles.begin();
524 unsigned CurrentIndex = BeginIndex;
525
526 for (auto &BOI : bundle_op_infos()) {
527 assert(BI != Bundles.end() && "Incorrect allocation?");
528
529 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
530 BOI.Begin = CurrentIndex;
531 BOI.End = CurrentIndex + BI->input_size();
532 CurrentIndex = BOI.End;
533 BI++;
534 }
535
536 assert(BI == Bundles.end() && "Incorrect allocation?");
537
538 return It;
539}
540
542 /// When there isn't many bundles, we do a simple linear search.
543 /// Else fallback to a binary-search that use the fact that bundles usually
544 /// have similar number of argument to get faster convergence.
546 for (auto &BOI : bundle_op_infos())
547 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
548 return BOI;
549
550 llvm_unreachable("Did not find operand bundle for operand!");
551 }
552
553 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
555 OpIdx < std::prev(bundle_op_info_end())->End &&
556 "The Idx isn't in the operand bundle");
557
558 /// We need a decimal number below and to prevent using floating point numbers
559 /// we use an intergal value multiplied by this constant.
560 constexpr unsigned NumberScaling = 1024;
561
564 bundle_op_iterator Current = Begin;
565
566 while (Begin != End) {
567 unsigned ScaledOperandPerBundle =
568 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
569 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
570 ScaledOperandPerBundle);
571 if (Current >= End)
572 Current = std::prev(End);
573 assert(Current < End && Current >= Begin &&
574 "the operand bundle doesn't cover every value in the range");
575 if (OpIdx >= Current->Begin && OpIdx < Current->End)
576 break;
577 if (OpIdx >= Current->End)
578 Begin = Current + 1;
579 else
580 End = Current;
581 }
582
583 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
584 "the operand bundle doesn't cover every value in the range");
585 return *Current;
586}
587
590 InsertPosition InsertPt) {
591 if (CB->getOperandBundle(ID))
592 return CB;
593
595 CB->getOperandBundlesAsDefs(Bundles);
596 Bundles.push_back(OB);
597 return Create(CB, Bundles, InsertPt);
598}
599
601 InsertPosition InsertPt) {
603 bool CreateNew = false;
604
605 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
606 auto Bundle = CB->getOperandBundleAt(I);
607 if (Bundle.getTagID() == ID) {
608 CreateNew = true;
609 continue;
610 }
611 Bundles.emplace_back(Bundle);
612 }
613
614 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
615}
616
618 // Implementation note: this is a conservative implementation of operand
619 // bundle semantics, where *any* non-assume operand bundle (other than
620 // ptrauth) forces a callsite to be at least readonly.
624 getIntrinsicID() != Intrinsic::assume;
625}
626
632 getIntrinsicID() != Intrinsic::assume;
633}
634
637 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
638 MemoryEffects FnME = Fn->getMemoryEffects();
639 if (hasOperandBundles()) {
640 // TODO: Add a method to get memory effects for operand bundles instead.
642 FnME |= MemoryEffects::readOnly();
644 FnME |= MemoryEffects::writeOnly();
645 }
646 if (isVolatile()) {
647 // Volatile operations also access inaccessible memory.
649 }
650 ME &= FnME;
651 }
652 return ME;
653}
656}
657
658/// Determine if the function does not access memory.
661}
664}
665
666/// Determine if the function does not access or only reads memory.
669}
672}
673
674/// Determine if the function does not access or only writes memory.
677}
680}
681
682/// Determine if the call can access memmory only using pointers based
683/// on its arguments.
686}
689}
690
691/// Determine if the function may only access memory that is
692/// inaccessible from the IR.
695}
698}
699
700/// Determine if the function may only access memory that is
701/// either inaccessible from the IR or pointed to by its arguments.
704}
708}
709
711 if (OpNo < arg_size()) {
712 // If the argument is passed byval, the callee does not have access to the
713 // original pointer and thus cannot capture it.
714 if (isByValArgument(OpNo))
715 return CaptureInfo::none();
716
718 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
719 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
720 return CI;
721 }
722
723 // deopt operand bundles are captures(none)
724 auto &BOI = getBundleOpInfoForOperand(OpNo);
725 auto OBU = operandBundleFromBundleOpInfo(BOI);
726 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
727}
728
730 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
732 continue;
733
735 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
736 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
738 return true;
739 }
740 return false;
741}
742
743//===----------------------------------------------------------------------===//
744// CallInst Implementation
745//===----------------------------------------------------------------------===//
746
747void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
748 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
749 this->FTy = FTy;
750 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
751 "NumOperands not set up?");
752
753#ifndef NDEBUG
754 assert((Args.size() == FTy->getNumParams() ||
755 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
756 "Calling a function with bad signature!");
757
758 for (unsigned i = 0; i != Args.size(); ++i)
759 assert((i >= FTy->getNumParams() ||
760 FTy->getParamType(i) == Args[i]->getType()) &&
761 "Calling a function with a bad signature!");
762#endif
763
764 // Set operands in order of their index to match use-list-order
765 // prediction.
766 llvm::copy(Args, op_begin());
767 setCalledOperand(Func);
768
769 auto It = populateBundleOperandInfos(Bundles, Args.size());
770 (void)It;
771 assert(It + 1 == op_end() && "Should add up!");
772
773 setName(NameStr);
774}
775
776void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
777 this->FTy = FTy;
778 assert(getNumOperands() == 1 && "NumOperands not set up?");
779 setCalledOperand(Func);
780
781 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
782
783 setName(NameStr);
784}
785
786CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
787 AllocInfo AllocInfo, InsertPosition InsertBefore)
788 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
789 InsertBefore) {
790 init(Ty, Func, Name);
791}
792
793CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
794 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
796 "Wrong number of operands allocated");
797 setTailCallKind(CI.getTailCallKind());
799
800 std::copy(CI.op_begin(), CI.op_end(), op_begin());
801 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
804}
805
807 InsertPosition InsertPt) {
808 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
809
810 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
811 Args, OpB, CI->getName(), InsertPt);
812 NewCI->setTailCallKind(CI->getTailCallKind());
813 NewCI->setCallingConv(CI->getCallingConv());
814 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
815 NewCI->setAttributes(CI->getAttributes());
816 NewCI->setDebugLoc(CI->getDebugLoc());
817 return NewCI;
818}
819
820// Update profile weight for call instruction by scaling it using the ratio
821// of S/T. The meaning of "branch_weights" meta data for call instruction is
822// transfered to represent call count.
824 if (T == 0) {
825 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
826 "div by 0. Ignoring. Likely the function "
827 << getParent()->getParent()->getName()
828 << " has 0 entry count, and contains call instructions "
829 "with non-zero prof info.");
830 return;
831 }
832 scaleProfData(*this, S, T);
833}
834
835//===----------------------------------------------------------------------===//
836// InvokeInst Implementation
837//===----------------------------------------------------------------------===//
838
839void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
840 BasicBlock *IfException, ArrayRef<Value *> Args,
842 const Twine &NameStr) {
843 this->FTy = FTy;
844
846 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
847 "NumOperands not set up?");
848
849#ifndef NDEBUG
850 assert(((Args.size() == FTy->getNumParams()) ||
851 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
852 "Invoking a function with bad signature");
853
854 for (unsigned i = 0, e = Args.size(); i != e; i++)
855 assert((i >= FTy->getNumParams() ||
856 FTy->getParamType(i) == Args[i]->getType()) &&
857 "Invoking a function with a bad signature!");
858#endif
859
860 // Set operands in order of their index to match use-list-order
861 // prediction.
862 llvm::copy(Args, op_begin());
863 setNormalDest(IfNormal);
864 setUnwindDest(IfException);
866
867 auto It = populateBundleOperandInfos(Bundles, Args.size());
868 (void)It;
869 assert(It + 3 == op_end() && "Should add up!");
870
871 setName(NameStr);
872}
873
874InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
875 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
876 assert(getNumOperands() == II.getNumOperands() &&
877 "Wrong number of operands allocated");
878 setCallingConv(II.getCallingConv());
879 std::copy(II.op_begin(), II.op_end(), op_begin());
880 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
882 SubclassOptionalData = II.SubclassOptionalData;
883}
884
886 InsertPosition InsertPt) {
887 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
888
889 auto *NewII = InvokeInst::Create(
890 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
891 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
892 NewII->setCallingConv(II->getCallingConv());
893 NewII->SubclassOptionalData = II->SubclassOptionalData;
894 NewII->setAttributes(II->getAttributes());
895 NewII->setDebugLoc(II->getDebugLoc());
896 return NewII;
897}
898
900 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
901}
902
904 if (T == 0) {
905 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
906 "div by 0. Ignoring. Likely the function "
907 << getParent()->getParent()->getName()
908 << " has 0 entry count, and contains call instructions "
909 "with non-zero prof info.");
910 return;
911 }
912 scaleProfData(*this, S, T);
913}
914
915//===----------------------------------------------------------------------===//
916// CallBrInst Implementation
917//===----------------------------------------------------------------------===//
918
919void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
920 ArrayRef<BasicBlock *> IndirectDests,
923 const Twine &NameStr) {
924 this->FTy = FTy;
925
926 assert(getNumOperands() == ComputeNumOperands(Args.size(),
927 IndirectDests.size(),
928 CountBundleInputs(Bundles)) &&
929 "NumOperands not set up?");
930
931#ifndef NDEBUG
932 assert(((Args.size() == FTy->getNumParams()) ||
933 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
934 "Calling a function with bad signature");
935
936 for (unsigned i = 0, e = Args.size(); i != e; i++)
937 assert((i >= FTy->getNumParams() ||
938 FTy->getParamType(i) == Args[i]->getType()) &&
939 "Calling a function with a bad signature!");
940#endif
941
942 // Set operands in order of their index to match use-list-order
943 // prediction.
944 llvm::copy(Args, op_begin());
945 NumIndirectDests = IndirectDests.size();
946 setDefaultDest(Fallthrough);
947 for (unsigned i = 0; i != NumIndirectDests; ++i)
948 setIndirectDest(i, IndirectDests[i]);
950
951 auto It = populateBundleOperandInfos(Bundles, Args.size());
952 (void)It;
953 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
954
955 setName(NameStr);
956}
957
958CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
959 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
960 AllocInfo) {
962 "Wrong number of operands allocated");
964 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
965 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
968 NumIndirectDests = CBI.NumIndirectDests;
969}
970
972 InsertPosition InsertPt) {
973 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
974
975 auto *NewCBI = CallBrInst::Create(
976 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
977 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
978 NewCBI->setCallingConv(CBI->getCallingConv());
979 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
980 NewCBI->setAttributes(CBI->getAttributes());
981 NewCBI->setDebugLoc(CBI->getDebugLoc());
982 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
983 return NewCBI;
984}
985
986//===----------------------------------------------------------------------===//
987// ReturnInst Implementation
988//===----------------------------------------------------------------------===//
989
990ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
991 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
992 AllocInfo) {
994 "Wrong number of operands allocated");
995 if (RI.getNumOperands())
996 Op<0>() = RI.Op<0>();
998}
999
1000ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1001 InsertPosition InsertBefore)
1002 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1003 InsertBefore) {
1004 if (retVal)
1005 Op<0>() = retVal;
1006}
1007
1008//===----------------------------------------------------------------------===//
1009// ResumeInst Implementation
1010//===----------------------------------------------------------------------===//
1011
1012ResumeInst::ResumeInst(const ResumeInst &RI)
1013 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1014 AllocMarker) {
1015 Op<0>() = RI.Op<0>();
1016}
1017
1018ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1019 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1020 AllocMarker, InsertBefore) {
1021 Op<0>() = Exn;
1022}
1023
1024//===----------------------------------------------------------------------===//
1025// CleanupReturnInst Implementation
1026//===----------------------------------------------------------------------===//
1027
1028CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1030 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1032 "Wrong number of operands allocated");
1033 setSubclassData<Instruction::OpaqueField>(
1035 Op<0>() = CRI.Op<0>();
1036 if (CRI.hasUnwindDest())
1037 Op<1>() = CRI.Op<1>();
1038}
1039
1040void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1041 if (UnwindBB)
1042 setSubclassData<UnwindDestField>(true);
1043
1044 Op<0>() = CleanupPad;
1045 if (UnwindBB)
1046 Op<1>() = UnwindBB;
1047}
1048
1049CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1051 InsertPosition InsertBefore)
1052 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1053 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1054 init(CleanupPad, UnwindBB);
1055}
1056
1057//===----------------------------------------------------------------------===//
1058// CatchReturnInst Implementation
1059//===----------------------------------------------------------------------===//
1060void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1061 Op<0>() = CatchPad;
1062 Op<1>() = BB;
1063}
1064
1065CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1066 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1067 AllocMarker) {
1068 Op<0>() = CRI.Op<0>();
1069 Op<1>() = CRI.Op<1>();
1070}
1071
1072CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1073 InsertPosition InsertBefore)
1074 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1075 AllocMarker, InsertBefore) {
1076 init(CatchPad, BB);
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// CatchSwitchInst Implementation
1081//===----------------------------------------------------------------------===//
1082
1083CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1084 unsigned NumReservedValues,
1085 const Twine &NameStr,
1086 InsertPosition InsertBefore)
1087 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1088 InsertBefore) {
1089 if (UnwindDest)
1090 ++NumReservedValues;
1091 init(ParentPad, UnwindDest, NumReservedValues + 1);
1092 setName(NameStr);
1093}
1094
1095CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1096 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1098 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1099 setNumHungOffUseOperands(ReservedSpace);
1100 Use *OL = getOperandList();
1101 const Use *InOL = CSI.getOperandList();
1102 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1103 OL[I] = InOL[I];
1104}
1105
1106void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1107 unsigned NumReservedValues) {
1108 assert(ParentPad && NumReservedValues);
1109
1110 ReservedSpace = NumReservedValues;
1111 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1112 allocHungoffUses(ReservedSpace);
1113
1114 Op<0>() = ParentPad;
1115 if (UnwindDest) {
1116 setSubclassData<UnwindDestField>(true);
1117 setUnwindDest(UnwindDest);
1118 }
1119}
1120
1121/// growOperands - grow operands - This grows the operand list in response to a
1122/// push_back style of operation. This grows the number of ops by 2 times.
1123void CatchSwitchInst::growOperands(unsigned Size) {
1124 unsigned NumOperands = getNumOperands();
1125 assert(NumOperands >= 1);
1126 if (ReservedSpace >= NumOperands + Size)
1127 return;
1128 ReservedSpace = (NumOperands + Size / 2) * 2;
1129 growHungoffUses(ReservedSpace);
1130}
1131
1133 unsigned OpNo = getNumOperands();
1134 growOperands(1);
1135 assert(OpNo < ReservedSpace && "Growing didn't work!");
1137 getOperandList()[OpNo] = Handler;
1138}
1139
1141 // Move all subsequent handlers up one.
1142 Use *EndDst = op_end() - 1;
1143 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1144 *CurDst = *(CurDst + 1);
1145 // Null out the last handler use.
1146 *EndDst = nullptr;
1147
1149}
1150
1151//===----------------------------------------------------------------------===//
1152// FuncletPadInst Implementation
1153//===----------------------------------------------------------------------===//
1154void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1155 const Twine &NameStr) {
1156 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1157 llvm::copy(Args, op_begin());
1158 setParentPad(ParentPad);
1159 setName(NameStr);
1160}
1161
1162FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1163 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1165 "Wrong number of operands allocated");
1166 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1168}
1169
1170FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1172 const Twine &NameStr,
1173 InsertPosition InsertBefore)
1174 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1175 init(ParentPad, Args, NameStr);
1176}
1177
1178//===----------------------------------------------------------------------===//
1179// UnreachableInst Implementation
1180//===----------------------------------------------------------------------===//
1181
1183 InsertPosition InsertBefore)
1184 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1185 AllocMarker, InsertBefore) {}
1186
1187//===----------------------------------------------------------------------===//
1188// BranchInst Implementation
1189//===----------------------------------------------------------------------===//
1190
1191void BranchInst::AssertOK() {
1192 if (isConditional())
1193 assert(getCondition()->getType()->isIntegerTy(1) &&
1194 "May only branch on boolean predicates!");
1195}
1196
1197BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1198 InsertPosition InsertBefore)
1199 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1200 AllocInfo, InsertBefore) {
1201 assert(IfTrue && "Branch destination may not be null!");
1202 Op<-1>() = IfTrue;
1203}
1204
1205BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1206 AllocInfo AllocInfo, InsertPosition InsertBefore)
1207 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1208 AllocInfo, InsertBefore) {
1209 // Assign in order of operand index to make use-list order predictable.
1210 Op<-3>() = Cond;
1211 Op<-2>() = IfFalse;
1212 Op<-1>() = IfTrue;
1213#ifndef NDEBUG
1214 AssertOK();
1215#endif
1216}
1217
1218BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1219 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1220 AllocInfo) {
1222 "Wrong number of operands allocated");
1223 // Assign in order of operand index to make use-list order predictable.
1224 if (BI.getNumOperands() != 1) {
1225 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1226 Op<-3>() = BI.Op<-3>();
1227 Op<-2>() = BI.Op<-2>();
1228 }
1229 Op<-1>() = BI.Op<-1>();
1231}
1232
1235 "Cannot swap successors of an unconditional branch");
1236 Op<-1>().swap(Op<-2>());
1237
1238 // Update profile metadata if present and it matches our structural
1239 // expectations.
1241}
1242
1243//===----------------------------------------------------------------------===//
1244// AllocaInst Implementation
1245//===----------------------------------------------------------------------===//
1246
1247static Value *getAISize(LLVMContext &Context, Value *Amt) {
1248 if (!Amt)
1249 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1250 else {
1251 assert(!isa<BasicBlock>(Amt) &&
1252 "Passed basic block into allocation size parameter! Use other ctor");
1253 assert(Amt->getType()->isIntegerTy() &&
1254 "Allocation array size is not an integer!");
1255 }
1256 return Amt;
1257}
1258
1260 assert(Pos.isValid() &&
1261 "Insertion position cannot be null when alignment not provided!");
1262 BasicBlock *BB = Pos.getBasicBlock();
1263 assert(BB->getParent() &&
1264 "BB must be in a Function when alignment not provided!");
1265 const DataLayout &DL = BB->getDataLayout();
1266 return DL.getPrefTypeAlign(Ty);
1267}
1268
1269AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1270 InsertPosition InsertBefore)
1271 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1272
1273AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1274 const Twine &Name, InsertPosition InsertBefore)
1275 : AllocaInst(Ty, AddrSpace, ArraySize,
1276 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1277 InsertBefore) {}
1278
1279AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1280 Align Align, const Twine &Name,
1281 InsertPosition InsertBefore)
1282 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1283 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1284 AllocatedType(Ty) {
1286 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1287 setName(Name);
1288}
1289
1291 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1292 return !CI->isOne();
1293 return true;
1294}
1295
1296/// isStaticAlloca - Return true if this alloca is in the entry block of the
1297/// function and is a constant size. If so, the code generator will fold it
1298/// into the prolog/epilog code, so it is basically free.
1300 // Must be constant size.
1301 if (!isa<ConstantInt>(getArraySize())) return false;
1302
1303 // Must be in the entry block.
1304 const BasicBlock *Parent = getParent();
1305 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1306}
1307
1308//===----------------------------------------------------------------------===//
1309// LoadInst Implementation
1310//===----------------------------------------------------------------------===//
1311
1312void LoadInst::AssertOK() {
1314 "Ptr must have pointer type.");
1315}
1316
1318 assert(Pos.isValid() &&
1319 "Insertion position cannot be null when alignment not provided!");
1320 BasicBlock *BB = Pos.getBasicBlock();
1321 assert(BB->getParent() &&
1322 "BB must be in a Function when alignment not provided!");
1323 const DataLayout &DL = BB->getDataLayout();
1324 return DL.getABITypeAlign(Ty);
1325}
1326
1328 InsertPosition InsertBef)
1329 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1330
1331LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1332 InsertPosition InsertBef)
1333 : LoadInst(Ty, Ptr, Name, isVolatile,
1334 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1335
1336LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1337 Align Align, InsertPosition InsertBef)
1338 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1339 SyncScope::System, InsertBef) {}
1340
1341LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1343 InsertPosition InsertBef)
1344 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1347 setAtomic(Order, SSID);
1348 AssertOK();
1349 setName(Name);
1350}
1351
1352//===----------------------------------------------------------------------===//
1353// StoreInst Implementation
1354//===----------------------------------------------------------------------===//
1355
1356void StoreInst::AssertOK() {
1357 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1359 "Ptr must have pointer type!");
1360}
1361
1363 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1364
1365StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1366 InsertPosition InsertBefore)
1367 : StoreInst(val, addr, isVolatile,
1368 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1369 InsertBefore) {}
1370
1371StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1372 InsertPosition InsertBefore)
1373 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1374 SyncScope::System, InsertBefore) {}
1375
1376StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1377 AtomicOrdering Order, SyncScope::ID SSID,
1378 InsertPosition InsertBefore)
1379 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1380 InsertBefore) {
1381 Op<0>() = val;
1382 Op<1>() = addr;
1385 setAtomic(Order, SSID);
1386 AssertOK();
1387}
1388
1389//===----------------------------------------------------------------------===//
1390// AtomicCmpXchgInst Implementation
1391//===----------------------------------------------------------------------===//
1392
1393void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1394 Align Alignment, AtomicOrdering SuccessOrdering,
1395 AtomicOrdering FailureOrdering,
1396 SyncScope::ID SSID) {
1397 Op<0>() = Ptr;
1398 Op<1>() = Cmp;
1399 Op<2>() = NewVal;
1400 setSuccessOrdering(SuccessOrdering);
1401 setFailureOrdering(FailureOrdering);
1402 setSyncScopeID(SSID);
1403 setAlignment(Alignment);
1404
1405 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1406 "All operands must be non-null!");
1408 "Ptr must have pointer type!");
1409 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1410 "Cmp type and NewVal type must be same!");
1411}
1412
1414 Align Alignment,
1415 AtomicOrdering SuccessOrdering,
1416 AtomicOrdering FailureOrdering,
1417 SyncScope::ID SSID,
1418 InsertPosition InsertBefore)
1419 : Instruction(
1420 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1421 AtomicCmpXchg, AllocMarker, InsertBefore) {
1422 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1423}
1424
1425//===----------------------------------------------------------------------===//
1426// AtomicRMWInst Implementation
1427//===----------------------------------------------------------------------===//
1428
1429void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1430 Align Alignment, AtomicOrdering Ordering,
1431 SyncScope::ID SSID) {
1432 assert(Ordering != AtomicOrdering::NotAtomic &&
1433 "atomicrmw instructions can only be atomic.");
1434 assert(Ordering != AtomicOrdering::Unordered &&
1435 "atomicrmw instructions cannot be unordered.");
1436 Op<0>() = Ptr;
1437 Op<1>() = Val;
1439 setOrdering(Ordering);
1440 setSyncScopeID(SSID);
1441 setAlignment(Alignment);
1442
1443 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1445 "Ptr must have pointer type!");
1446 assert(Ordering != AtomicOrdering::NotAtomic &&
1447 "AtomicRMW instructions must be atomic!");
1448}
1449
1451 Align Alignment, AtomicOrdering Ordering,
1452 SyncScope::ID SSID, InsertPosition InsertBefore)
1453 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1454 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1455}
1456
1458 switch (Op) {
1460 return "xchg";
1461 case AtomicRMWInst::Add:
1462 return "add";
1463 case AtomicRMWInst::Sub:
1464 return "sub";
1465 case AtomicRMWInst::And:
1466 return "and";
1468 return "nand";
1469 case AtomicRMWInst::Or:
1470 return "or";
1471 case AtomicRMWInst::Xor:
1472 return "xor";
1473 case AtomicRMWInst::Max:
1474 return "max";
1475 case AtomicRMWInst::Min:
1476 return "min";
1478 return "umax";
1480 return "umin";
1482 return "fadd";
1484 return "fsub";
1486 return "fmax";
1488 return "fmin";
1490 return "fmaximum";
1492 return "fminimum";
1494 return "uinc_wrap";
1496 return "udec_wrap";
1498 return "usub_cond";
1500 return "usub_sat";
1502 return "<invalid operation>";
1503 }
1504
1505 llvm_unreachable("invalid atomicrmw operation");
1506}
1507
1508//===----------------------------------------------------------------------===//
1509// FenceInst Implementation
1510//===----------------------------------------------------------------------===//
1511
1513 SyncScope::ID SSID, InsertPosition InsertBefore)
1514 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1515 setOrdering(Ordering);
1516 setSyncScopeID(SSID);
1517}
1518
1519//===----------------------------------------------------------------------===//
1520// GetElementPtrInst Implementation
1521//===----------------------------------------------------------------------===//
1522
1523void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1524 const Twine &Name) {
1525 assert(getNumOperands() == 1 + IdxList.size() &&
1526 "NumOperands not initialized?");
1527 Op<0>() = Ptr;
1528 llvm::copy(IdxList, op_begin() + 1);
1529 setName(Name);
1530}
1531
1532GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1534 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1535 SourceElementType(GEPI.SourceElementType),
1536 ResultElementType(GEPI.ResultElementType) {
1537 assert(getNumOperands() == GEPI.getNumOperands() &&
1538 "Wrong number of operands allocated");
1539 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1541}
1542
1544 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1545 if (!Struct->indexValid(Idx))
1546 return nullptr;
1547 return Struct->getTypeAtIndex(Idx);
1548 }
1549 if (!Idx->getType()->isIntOrIntVectorTy())
1550 return nullptr;
1551 if (auto *Array = dyn_cast<ArrayType>(Ty))
1552 return Array->getElementType();
1553 if (auto *Vector = dyn_cast<VectorType>(Ty))
1554 return Vector->getElementType();
1555 return nullptr;
1556}
1557
1559 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1560 if (Idx >= Struct->getNumElements())
1561 return nullptr;
1562 return Struct->getElementType(Idx);
1563 }
1564 if (auto *Array = dyn_cast<ArrayType>(Ty))
1565 return Array->getElementType();
1566 if (auto *Vector = dyn_cast<VectorType>(Ty))
1567 return Vector->getElementType();
1568 return nullptr;
1569}
1570
1571template <typename IndexTy>
1573 if (IdxList.empty())
1574 return Ty;
1575 for (IndexTy V : IdxList.slice(1)) {
1577 if (!Ty)
1578 return Ty;
1579 }
1580 return Ty;
1581}
1582
1584 return getIndexedTypeInternal(Ty, IdxList);
1585}
1586
1588 ArrayRef<Constant *> IdxList) {
1589 return getIndexedTypeInternal(Ty, IdxList);
1590}
1591
1593 return getIndexedTypeInternal(Ty, IdxList);
1594}
1595
1596/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1597/// zeros. If so, the result pointer and the first operand have the same
1598/// value, just potentially different types.
1600 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1601 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1602 if (!CI->isZero()) return false;
1603 } else {
1604 return false;
1605 }
1606 }
1607 return true;
1608}
1609
1610/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1611/// constant integers. If so, the result pointer and the first operand have
1612/// a constant offset between them.
1614 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1615 if (!isa<ConstantInt>(getOperand(i)))
1616 return false;
1617 }
1618 return true;
1619}
1620
1623}
1624
1626 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1627 if (B)
1629 else
1630 NW = NW.withoutInBounds();
1631 setNoWrapFlags(NW);
1632}
1633
1635 return cast<GEPOperator>(this)->getNoWrapFlags();
1636}
1637
1639 return cast<GEPOperator>(this)->isInBounds();
1640}
1641
1643 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1644}
1645
1647 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1648}
1649
1651 APInt &Offset) const {
1652 // Delegate to the generic GEPOperator implementation.
1653 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1654}
1655
1657 const DataLayout &DL, unsigned BitWidth,
1658 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1659 APInt &ConstantOffset) const {
1660 // Delegate to the generic GEPOperator implementation.
1661 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1662 ConstantOffset);
1663}
1664
1665//===----------------------------------------------------------------------===//
1666// ExtractElementInst Implementation
1667//===----------------------------------------------------------------------===//
1668
1669ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1670 const Twine &Name,
1671 InsertPosition InsertBef)
1672 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1673 ExtractElement, AllocMarker, InsertBef) {
1674 assert(isValidOperands(Val, Index) &&
1675 "Invalid extractelement instruction operands!");
1676 Op<0>() = Val;
1677 Op<1>() = Index;
1678 setName(Name);
1679}
1680
1681bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1682 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1683 return false;
1684 return true;
1685}
1686
1687//===----------------------------------------------------------------------===//
1688// InsertElementInst Implementation
1689//===----------------------------------------------------------------------===//
1690
1691InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1692 const Twine &Name,
1693 InsertPosition InsertBef)
1694 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1695 assert(isValidOperands(Vec, Elt, Index) &&
1696 "Invalid insertelement instruction operands!");
1697 Op<0>() = Vec;
1698 Op<1>() = Elt;
1699 Op<2>() = Index;
1700 setName(Name);
1701}
1702
1704 const Value *Index) {
1705 if (!Vec->getType()->isVectorTy())
1706 return false; // First operand of insertelement must be vector type.
1707
1708 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1709 return false;// Second operand of insertelement must be vector element type.
1710
1711 if (!Index->getType()->isIntegerTy())
1712 return false; // Third operand of insertelement must be i32.
1713 return true;
1714}
1715
1716//===----------------------------------------------------------------------===//
1717// ShuffleVectorInst Implementation
1718//===----------------------------------------------------------------------===//
1719
1721 assert(V && "Cannot create placeholder of nullptr V");
1722 return PoisonValue::get(V->getType());
1723}
1724
1726 InsertPosition InsertBefore)
1728 InsertBefore) {}
1729
1731 const Twine &Name,
1732 InsertPosition InsertBefore)
1734 InsertBefore) {}
1735
1737 const Twine &Name,
1738 InsertPosition InsertBefore)
1739 : Instruction(
1740 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1741 cast<VectorType>(Mask->getType())->getElementCount()),
1742 ShuffleVector, AllocMarker, InsertBefore) {
1743 assert(isValidOperands(V1, V2, Mask) &&
1744 "Invalid shuffle vector instruction operands!");
1745
1746 Op<0>() = V1;
1747 Op<1>() = V2;
1748 SmallVector<int, 16> MaskArr;
1749 getShuffleMask(cast<Constant>(Mask), MaskArr);
1750 setShuffleMask(MaskArr);
1751 setName(Name);
1752}
1753
1755 const Twine &Name,
1756 InsertPosition InsertBefore)
1757 : Instruction(
1758 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1759 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1760 ShuffleVector, AllocMarker, InsertBefore) {
1761 assert(isValidOperands(V1, V2, Mask) &&
1762 "Invalid shuffle vector instruction operands!");
1763 Op<0>() = V1;
1764 Op<1>() = V2;
1765 setShuffleMask(Mask);
1766 setName(Name);
1767}
1768
1770 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1771 int NumMaskElts = ShuffleMask.size();
1772 SmallVector<int, 16> NewMask(NumMaskElts);
1773 for (int i = 0; i != NumMaskElts; ++i) {
1774 int MaskElt = getMaskValue(i);
1775 if (MaskElt == PoisonMaskElem) {
1776 NewMask[i] = PoisonMaskElem;
1777 continue;
1778 }
1779 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1780 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1781 NewMask[i] = MaskElt;
1782 }
1783 setShuffleMask(NewMask);
1784 Op<0>().swap(Op<1>());
1785}
1786
1788 ArrayRef<int> Mask) {
1789 // V1 and V2 must be vectors of the same type.
1790 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1791 return false;
1792
1793 // Make sure the mask elements make sense.
1794 int V1Size =
1795 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1796 for (int Elem : Mask)
1797 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1798 return false;
1799
1800 if (isa<ScalableVectorType>(V1->getType()))
1801 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1802 return false;
1803
1804 return true;
1805}
1806
1808 const Value *Mask) {
1809 // V1 and V2 must be vectors of the same type.
1810 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1811 return false;
1812
1813 // Mask must be vector of i32, and must be the same kind of vector as the
1814 // input vectors
1815 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1816 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1817 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1818 return false;
1819
1820 // Check to see if Mask is valid.
1821 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1822 return true;
1823
1824 // NOTE: Through vector ConstantInt we have the potential to support more
1825 // than just zero splat masks but that requires a LangRef change.
1826 if (isa<ScalableVectorType>(MaskTy))
1827 return false;
1828
1829 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1830
1831 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1832 return !CI->uge(V1Size * 2);
1833
1834 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1835 for (Value *Op : MV->operands()) {
1836 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1837 if (CI->uge(V1Size*2))
1838 return false;
1839 } else if (!isa<UndefValue>(Op)) {
1840 return false;
1841 }
1842 }
1843 return true;
1844 }
1845
1846 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1847 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1848 i != e; ++i)
1849 if (CDS->getElementAsInteger(i) >= V1Size*2)
1850 return false;
1851 return true;
1852 }
1853
1854 return false;
1855}
1856
1858 SmallVectorImpl<int> &Result) {
1859 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1860
1861 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1862 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1863 Result.append(EC.getKnownMinValue(), MaskVal);
1864 return;
1865 }
1866
1867 assert(!EC.isScalable() &&
1868 "Scalable vector shuffle mask must be undef or zeroinitializer");
1869
1870 unsigned NumElts = EC.getFixedValue();
1871
1872 Result.reserve(NumElts);
1873
1874 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1875 for (unsigned i = 0; i != NumElts; ++i)
1876 Result.push_back(CDS->getElementAsInteger(i));
1877 return;
1878 }
1879 for (unsigned i = 0; i != NumElts; ++i) {
1880 Constant *C = Mask->getAggregateElement(i);
1881 Result.push_back(isa<UndefValue>(C) ? -1 :
1882 cast<ConstantInt>(C)->getZExtValue());
1883 }
1884}
1885
1887 ShuffleMask.assign(Mask.begin(), Mask.end());
1888 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1889}
1890
1892 Type *ResultTy) {
1893 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1894 if (isa<ScalableVectorType>(ResultTy)) {
1895 assert(all_equal(Mask) && "Unexpected shuffle");
1896 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1897 if (Mask[0] == 0)
1898 return Constant::getNullValue(VecTy);
1899 return PoisonValue::get(VecTy);
1900 }
1902 for (int Elem : Mask) {
1903 if (Elem == PoisonMaskElem)
1904 MaskConst.push_back(PoisonValue::get(Int32Ty));
1905 else
1906 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1907 }
1908 return ConstantVector::get(MaskConst);
1909}
1910
1911static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1912 assert(!Mask.empty() && "Shuffle mask must contain elements");
1913 bool UsesLHS = false;
1914 bool UsesRHS = false;
1915 for (int I : Mask) {
1916 if (I == -1)
1917 continue;
1918 assert(I >= 0 && I < (NumOpElts * 2) &&
1919 "Out-of-bounds shuffle mask element");
1920 UsesLHS |= (I < NumOpElts);
1921 UsesRHS |= (I >= NumOpElts);
1922 if (UsesLHS && UsesRHS)
1923 return false;
1924 }
1925 // Allow for degenerate case: completely undef mask means neither source is used.
1926 return UsesLHS || UsesRHS;
1927}
1928
1930 // We don't have vector operand size information, so assume operands are the
1931 // same size as the mask.
1932 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1933}
1934
1935static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1936 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1937 return false;
1938 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1939 if (Mask[i] == -1)
1940 continue;
1941 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1942 return false;
1943 }
1944 return true;
1945}
1946
1948 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1949 return false;
1950 // We don't have vector operand size information, so assume operands are the
1951 // same size as the mask.
1952 return isIdentityMaskImpl(Mask, NumSrcElts);
1953}
1954
1956 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1957 return false;
1958 if (!isSingleSourceMask(Mask, NumSrcElts))
1959 return false;
1960
1961 // The number of elements in the mask must be at least 2.
1962 if (NumSrcElts < 2)
1963 return false;
1964
1965 for (int I = 0, E = Mask.size(); I < E; ++I) {
1966 if (Mask[I] == -1)
1967 continue;
1968 if (Mask[I] != (NumSrcElts - 1 - I) &&
1969 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1970 return false;
1971 }
1972 return true;
1973}
1974
1976 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1977 return false;
1978 if (!isSingleSourceMask(Mask, NumSrcElts))
1979 return false;
1980 for (int I = 0, E = Mask.size(); I < E; ++I) {
1981 if (Mask[I] == -1)
1982 continue;
1983 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1984 return false;
1985 }
1986 return true;
1987}
1988
1990 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1991 return false;
1992 // Select is differentiated from identity. It requires using both sources.
1993 if (isSingleSourceMask(Mask, NumSrcElts))
1994 return false;
1995 for (int I = 0, E = Mask.size(); I < E; ++I) {
1996 if (Mask[I] == -1)
1997 continue;
1998 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1999 return false;
2000 }
2001 return true;
2002}
2003
2005 // Example masks that will return true:
2006 // v1 = <a, b, c, d>
2007 // v2 = <e, f, g, h>
2008 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2009 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2010
2011 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2012 return false;
2013 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2014 int Sz = Mask.size();
2015 if (Sz < 2 || !isPowerOf2_32(Sz))
2016 return false;
2017
2018 // 2. The first element of the mask must be either a 0 or a 1.
2019 if (Mask[0] != 0 && Mask[0] != 1)
2020 return false;
2021
2022 // 3. The difference between the first 2 elements must be equal to the
2023 // number of elements in the mask.
2024 if ((Mask[1] - Mask[0]) != NumSrcElts)
2025 return false;
2026
2027 // 4. The difference between consecutive even-numbered and odd-numbered
2028 // elements must be equal to 2.
2029 for (int I = 2; I < Sz; ++I) {
2030 int MaskEltVal = Mask[I];
2031 if (MaskEltVal == -1)
2032 return false;
2033 int MaskEltPrevVal = Mask[I - 2];
2034 if (MaskEltVal - MaskEltPrevVal != 2)
2035 return false;
2036 }
2037 return true;
2038}
2039
2041 int &Index) {
2042 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2043 return false;
2044 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2045 int StartIndex = -1;
2046 for (int I = 0, E = Mask.size(); I != E; ++I) {
2047 int MaskEltVal = Mask[I];
2048 if (MaskEltVal == -1)
2049 continue;
2050
2051 if (StartIndex == -1) {
2052 // Don't support a StartIndex that begins in the second input, or if the
2053 // first non-undef index would access below the StartIndex.
2054 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2055 return false;
2056
2057 StartIndex = MaskEltVal - I;
2058 continue;
2059 }
2060
2061 // Splice is sequential starting from StartIndex.
2062 if (MaskEltVal != (StartIndex + I))
2063 return false;
2064 }
2065
2066 if (StartIndex == -1)
2067 return false;
2068
2069 // NOTE: This accepts StartIndex == 0 (COPY).
2070 Index = StartIndex;
2071 return true;
2072}
2073
2075 int NumSrcElts, int &Index) {
2076 // Must extract from a single source.
2077 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2078 return false;
2079
2080 // Must be smaller (else this is an Identity shuffle).
2081 if (NumSrcElts <= (int)Mask.size())
2082 return false;
2083
2084 // Find start of extraction, accounting that we may start with an UNDEF.
2085 int SubIndex = -1;
2086 for (int i = 0, e = Mask.size(); i != e; ++i) {
2087 int M = Mask[i];
2088 if (M < 0)
2089 continue;
2090 int Offset = (M % NumSrcElts) - i;
2091 if (0 <= SubIndex && SubIndex != Offset)
2092 return false;
2093 SubIndex = Offset;
2094 }
2095
2096 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2097 Index = SubIndex;
2098 return true;
2099 }
2100 return false;
2101}
2102
2104 int NumSrcElts, int &NumSubElts,
2105 int &Index) {
2106 int NumMaskElts = Mask.size();
2107
2108 // Don't try to match if we're shuffling to a smaller size.
2109 if (NumMaskElts < NumSrcElts)
2110 return false;
2111
2112 // TODO: We don't recognize self-insertion/widening.
2113 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2114 return false;
2115
2116 // Determine which mask elements are attributed to which source.
2117 APInt UndefElts = APInt::getZero(NumMaskElts);
2118 APInt Src0Elts = APInt::getZero(NumMaskElts);
2119 APInt Src1Elts = APInt::getZero(NumMaskElts);
2120 bool Src0Identity = true;
2121 bool Src1Identity = true;
2122
2123 for (int i = 0; i != NumMaskElts; ++i) {
2124 int M = Mask[i];
2125 if (M < 0) {
2126 UndefElts.setBit(i);
2127 continue;
2128 }
2129 if (M < NumSrcElts) {
2130 Src0Elts.setBit(i);
2131 Src0Identity &= (M == i);
2132 continue;
2133 }
2134 Src1Elts.setBit(i);
2135 Src1Identity &= (M == (i + NumSrcElts));
2136 }
2137 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2138 "unknown shuffle elements");
2139 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2140 "2-source shuffle not found");
2141
2142 // Determine lo/hi span ranges.
2143 // TODO: How should we handle undefs at the start of subvector insertions?
2144 int Src0Lo = Src0Elts.countr_zero();
2145 int Src1Lo = Src1Elts.countr_zero();
2146 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2147 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2148
2149 // If src0 is in place, see if the src1 elements is inplace within its own
2150 // span.
2151 if (Src0Identity) {
2152 int NumSub1Elts = Src1Hi - Src1Lo;
2153 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2154 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2155 NumSubElts = NumSub1Elts;
2156 Index = Src1Lo;
2157 return true;
2158 }
2159 }
2160
2161 // If src1 is in place, see if the src0 elements is inplace within its own
2162 // span.
2163 if (Src1Identity) {
2164 int NumSub0Elts = Src0Hi - Src0Lo;
2165 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2166 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2167 NumSubElts = NumSub0Elts;
2168 Index = Src0Lo;
2169 return true;
2170 }
2171 }
2172
2173 return false;
2174}
2175
2177 // FIXME: Not currently possible to express a shuffle mask for a scalable
2178 // vector for this case.
2179 if (isa<ScalableVectorType>(getType()))
2180 return false;
2181
2182 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2183 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2184 if (NumMaskElts <= NumOpElts)
2185 return false;
2186
2187 // The first part of the mask must choose elements from exactly 1 source op.
2189 if (!isIdentityMaskImpl(Mask, NumOpElts))
2190 return false;
2191
2192 // All extending must be with undef elements.
2193 for (int i = NumOpElts; i < NumMaskElts; ++i)
2194 if (Mask[i] != -1)
2195 return false;
2196
2197 return true;
2198}
2199
2201 // FIXME: Not currently possible to express a shuffle mask for a scalable
2202 // vector for this case.
2203 if (isa<ScalableVectorType>(getType()))
2204 return false;
2205
2206 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2207 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2208 if (NumMaskElts >= NumOpElts)
2209 return false;
2210
2211 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2212}
2213
2215 // Vector concatenation is differentiated from identity with padding.
2216 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2217 return false;
2218
2219 // FIXME: Not currently possible to express a shuffle mask for a scalable
2220 // vector for this case.
2221 if (isa<ScalableVectorType>(getType()))
2222 return false;
2223
2224 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2225 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2226 if (NumMaskElts != NumOpElts * 2)
2227 return false;
2228
2229 // Use the mask length rather than the operands' vector lengths here. We
2230 // already know that the shuffle returns a vector twice as long as the inputs,
2231 // and neither of the inputs are undef vectors. If the mask picks consecutive
2232 // elements from both inputs, then this is a concatenation of the inputs.
2233 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2234}
2235
2237 int ReplicationFactor, int VF) {
2238 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2239 "Unexpected mask size.");
2240
2241 for (int CurrElt : seq(VF)) {
2242 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2243 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2244 "Run out of mask?");
2245 Mask = Mask.drop_front(ReplicationFactor);
2246 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2247 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2248 }))
2249 return false;
2250 }
2251 assert(Mask.empty() && "Did not consume the whole mask?");
2252
2253 return true;
2254}
2255
2257 int &ReplicationFactor, int &VF) {
2258 // undef-less case is trivial.
2259 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2260 ReplicationFactor =
2261 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2262 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2263 return false;
2264 VF = Mask.size() / ReplicationFactor;
2265 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2266 }
2267
2268 // However, if the mask contains undef's, we have to enumerate possible tuples
2269 // and pick one. There are bounds on replication factor: [1, mask size]
2270 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2271 // Additionally, mask size is a replication factor multiplied by vector size,
2272 // which further significantly reduces the search space.
2273
2274 // Before doing that, let's perform basic correctness checking first.
2275 int Largest = -1;
2276 for (int MaskElt : Mask) {
2277 if (MaskElt == PoisonMaskElem)
2278 continue;
2279 // Elements must be in non-decreasing order.
2280 if (MaskElt < Largest)
2281 return false;
2282 Largest = std::max(Largest, MaskElt);
2283 }
2284
2285 // Prefer larger replication factor if all else equal.
2286 for (int PossibleReplicationFactor :
2287 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2288 if (Mask.size() % PossibleReplicationFactor != 0)
2289 continue;
2290 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2291 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2292 PossibleVF))
2293 continue;
2294 ReplicationFactor = PossibleReplicationFactor;
2295 VF = PossibleVF;
2296 return true;
2297 }
2298
2299 return false;
2300}
2301
2302bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2303 int &VF) const {
2304 // Not possible to express a shuffle mask for a scalable vector for this
2305 // case.
2306 if (isa<ScalableVectorType>(getType()))
2307 return false;
2308
2309 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2310 if (ShuffleMask.size() % VF != 0)
2311 return false;
2312 ReplicationFactor = ShuffleMask.size() / VF;
2313
2314 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2315}
2316
2318 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2319 Mask.size() % VF != 0)
2320 return false;
2321 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2322 ArrayRef<int> SubMask = Mask.slice(K, VF);
2323 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2324 continue;
2325 SmallBitVector Used(VF, false);
2326 for (int Idx : SubMask) {
2327 if (Idx != PoisonMaskElem && Idx < VF)
2328 Used.set(Idx);
2329 }
2330 if (!Used.all())
2331 return false;
2332 }
2333 return true;
2334}
2335
2336/// Return true if this shuffle mask is a replication mask.
2338 // Not possible to express a shuffle mask for a scalable vector for this
2339 // case.
2340 if (isa<ScalableVectorType>(getType()))
2341 return false;
2342 if (!isSingleSourceMask(ShuffleMask, VF))
2343 return false;
2344
2345 return isOneUseSingleSourceMask(ShuffleMask, VF);
2346}
2347
2348bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2349 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2350 // shuffle_vector can only interleave fixed length vectors - for scalable
2351 // vectors, see the @llvm.vector.interleave2 intrinsic
2352 if (!OpTy)
2353 return false;
2354 unsigned OpNumElts = OpTy->getNumElements();
2355
2356 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2357}
2358
2360 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2361 SmallVectorImpl<unsigned> &StartIndexes) {
2362 unsigned NumElts = Mask.size();
2363 if (NumElts % Factor)
2364 return false;
2365
2366 unsigned LaneLen = NumElts / Factor;
2367 if (!isPowerOf2_32(LaneLen))
2368 return false;
2369
2370 StartIndexes.resize(Factor);
2371
2372 // Check whether each element matches the general interleaved rule.
2373 // Ignore undef elements, as long as the defined elements match the rule.
2374 // Outer loop processes all factors (x, y, z in the above example)
2375 unsigned I = 0, J;
2376 for (; I < Factor; I++) {
2377 unsigned SavedLaneValue;
2378 unsigned SavedNoUndefs = 0;
2379
2380 // Inner loop processes consecutive accesses (x, x+1... in the example)
2381 for (J = 0; J < LaneLen - 1; J++) {
2382 // Lane computes x's position in the Mask
2383 unsigned Lane = J * Factor + I;
2384 unsigned NextLane = Lane + Factor;
2385 int LaneValue = Mask[Lane];
2386 int NextLaneValue = Mask[NextLane];
2387
2388 // If both are defined, values must be sequential
2389 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2390 LaneValue + 1 != NextLaneValue)
2391 break;
2392
2393 // If the next value is undef, save the current one as reference
2394 if (LaneValue >= 0 && NextLaneValue < 0) {
2395 SavedLaneValue = LaneValue;
2396 SavedNoUndefs = 1;
2397 }
2398
2399 // Undefs are allowed, but defined elements must still be consecutive:
2400 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2401 // Verify this by storing the last non-undef followed by an undef
2402 // Check that following non-undef masks are incremented with the
2403 // corresponding distance.
2404 if (SavedNoUndefs > 0 && LaneValue < 0) {
2405 SavedNoUndefs++;
2406 if (NextLaneValue >= 0 &&
2407 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2408 break;
2409 }
2410 }
2411
2412 if (J < LaneLen - 1)
2413 return false;
2414
2415 int StartMask = 0;
2416 if (Mask[I] >= 0) {
2417 // Check that the start of the I range (J=0) is greater than 0
2418 StartMask = Mask[I];
2419 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2420 // StartMask defined by the last value in lane
2421 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2422 } else if (SavedNoUndefs > 0) {
2423 // StartMask defined by some non-zero value in the j loop
2424 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2425 }
2426 // else StartMask remains set to 0, i.e. all elements are undefs
2427
2428 if (StartMask < 0)
2429 return false;
2430 // We must stay within the vectors; This case can happen with undefs.
2431 if (StartMask + LaneLen > NumInputElts)
2432 return false;
2433
2434 StartIndexes[I] = StartMask;
2435 }
2436
2437 return true;
2438}
2439
2440/// Check if the mask is a DE-interleave mask of the given factor
2441/// \p Factor like:
2442/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2444 unsigned Factor,
2445 unsigned &Index) {
2446 // Check all potential start indices from 0 to (Factor - 1).
2447 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2448 unsigned I = 0;
2449
2450 // Check that elements are in ascending order by Factor. Ignore undef
2451 // elements.
2452 for (; I < Mask.size(); I++)
2453 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2454 break;
2455
2456 if (I == Mask.size()) {
2457 Index = Idx;
2458 return true;
2459 }
2460 }
2461
2462 return false;
2463}
2464
2465/// Try to lower a vector shuffle as a bit rotation.
2466///
2467/// Look for a repeated rotation pattern in each sub group.
2468/// Returns an element-wise left bit rotation amount or -1 if failed.
2469static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2470 int NumElts = Mask.size();
2471 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2472
2473 int RotateAmt = -1;
2474 for (int i = 0; i != NumElts; i += NumSubElts) {
2475 for (int j = 0; j != NumSubElts; ++j) {
2476 int M = Mask[i + j];
2477 if (M < 0)
2478 continue;
2479 if (M < i || M >= i + NumSubElts)
2480 return -1;
2481 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2482 if (0 <= RotateAmt && Offset != RotateAmt)
2483 return -1;
2484 RotateAmt = Offset;
2485 }
2486 }
2487 return RotateAmt;
2488}
2489
2491 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2492 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2493 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2494 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2495 if (EltRotateAmt < 0)
2496 continue;
2497 RotateAmt = EltRotateAmt * EltSizeInBits;
2498 return true;
2499 }
2500
2501 return false;
2502}
2503
2504//===----------------------------------------------------------------------===//
2505// InsertValueInst Class
2506//===----------------------------------------------------------------------===//
2507
2508void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2509 const Twine &Name) {
2510 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2511
2512 // There's no fundamental reason why we require at least one index
2513 // (other than weirdness with &*IdxBegin being invalid; see
2514 // getelementptr's init routine for example). But there's no
2515 // present need to support it.
2516 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2517
2519 Val->getType() && "Inserted value must match indexed type!");
2520 Op<0>() = Agg;
2521 Op<1>() = Val;
2522
2523 Indices.append(Idxs.begin(), Idxs.end());
2524 setName(Name);
2525}
2526
2527InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2528 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2529 Indices(IVI.Indices) {
2530 Op<0>() = IVI.getOperand(0);
2531 Op<1>() = IVI.getOperand(1);
2533}
2534
2535//===----------------------------------------------------------------------===//
2536// ExtractValueInst Class
2537//===----------------------------------------------------------------------===//
2538
2539void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2540 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2541
2542 // There's no fundamental reason why we require at least one index.
2543 // But there's no present need to support it.
2544 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2545
2546 Indices.append(Idxs.begin(), Idxs.end());
2547 setName(Name);
2548}
2549
2550ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2551 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2552 (BasicBlock *)nullptr),
2553 Indices(EVI.Indices) {
2555}
2556
2557// getIndexedType - Returns the type of the element that would be extracted
2558// with an extractvalue instruction with the specified parameters.
2559//
2560// A null type is returned if the indices are invalid for the specified
2561// pointer type.
2562//
2564 ArrayRef<unsigned> Idxs) {
2565 for (unsigned Index : Idxs) {
2566 // We can't use CompositeType::indexValid(Index) here.
2567 // indexValid() always returns true for arrays because getelementptr allows
2568 // out-of-bounds indices. Since we don't allow those for extractvalue and
2569 // insertvalue we need to check array indexing manually.
2570 // Since the only other types we can index into are struct types it's just
2571 // as easy to check those manually as well.
2572 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2573 if (Index >= AT->getNumElements())
2574 return nullptr;
2575 Agg = AT->getElementType();
2576 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2577 if (Index >= ST->getNumElements())
2578 return nullptr;
2579 Agg = ST->getElementType(Index);
2580 } else {
2581 // Not a valid type to index into.
2582 return nullptr;
2583 }
2584 }
2585 return Agg;
2586}
2587
2588//===----------------------------------------------------------------------===//
2589// UnaryOperator Class
2590//===----------------------------------------------------------------------===//
2591
2593 const Twine &Name, InsertPosition InsertBefore)
2594 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2595 Op<0>() = S;
2596 setName(Name);
2597 AssertOK();
2598}
2599
2601 InsertPosition InsertBefore) {
2602 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2603}
2604
2605void UnaryOperator::AssertOK() {
2606 Value *LHS = getOperand(0);
2607 (void)LHS; // Silence warnings.
2608#ifndef NDEBUG
2609 switch (getOpcode()) {
2610 case FNeg:
2611 assert(getType() == LHS->getType() &&
2612 "Unary operation should return same type as operand!");
2613 assert(getType()->isFPOrFPVectorTy() &&
2614 "Tried to create a floating-point operation on a "
2615 "non-floating-point type!");
2616 break;
2617 default: llvm_unreachable("Invalid opcode provided");
2618 }
2619#endif
2620}
2621
2622//===----------------------------------------------------------------------===//
2623// BinaryOperator Class
2624//===----------------------------------------------------------------------===//
2625
2627 const Twine &Name, InsertPosition InsertBefore)
2628 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2629 Op<0>() = S1;
2630 Op<1>() = S2;
2631 setName(Name);
2632 AssertOK();
2633}
2634
2635void BinaryOperator::AssertOK() {
2636 Value *LHS = getOperand(0), *RHS = getOperand(1);
2637 (void)LHS; (void)RHS; // Silence warnings.
2638 assert(LHS->getType() == RHS->getType() &&
2639 "Binary operator operand types must match!");
2640#ifndef NDEBUG
2641 switch (getOpcode()) {
2642 case Add: case Sub:
2643 case Mul:
2644 assert(getType() == LHS->getType() &&
2645 "Arithmetic operation should return same type as operands!");
2646 assert(getType()->isIntOrIntVectorTy() &&
2647 "Tried to create an integer operation on a non-integer type!");
2648 break;
2649 case FAdd: case FSub:
2650 case FMul:
2651 assert(getType() == LHS->getType() &&
2652 "Arithmetic operation should return same type as operands!");
2653 assert(getType()->isFPOrFPVectorTy() &&
2654 "Tried to create a floating-point operation on a "
2655 "non-floating-point type!");
2656 break;
2657 case UDiv:
2658 case SDiv:
2659 assert(getType() == LHS->getType() &&
2660 "Arithmetic operation should return same type as operands!");
2661 assert(getType()->isIntOrIntVectorTy() &&
2662 "Incorrect operand type (not integer) for S/UDIV");
2663 break;
2664 case FDiv:
2665 assert(getType() == LHS->getType() &&
2666 "Arithmetic operation should return same type as operands!");
2667 assert(getType()->isFPOrFPVectorTy() &&
2668 "Incorrect operand type (not floating point) for FDIV");
2669 break;
2670 case URem:
2671 case SRem:
2672 assert(getType() == LHS->getType() &&
2673 "Arithmetic operation should return same type as operands!");
2674 assert(getType()->isIntOrIntVectorTy() &&
2675 "Incorrect operand type (not integer) for S/UREM");
2676 break;
2677 case FRem:
2678 assert(getType() == LHS->getType() &&
2679 "Arithmetic operation should return same type as operands!");
2680 assert(getType()->isFPOrFPVectorTy() &&
2681 "Incorrect operand type (not floating point) for FREM");
2682 break;
2683 case Shl:
2684 case LShr:
2685 case AShr:
2686 assert(getType() == LHS->getType() &&
2687 "Shift operation should return same type as operands!");
2688 assert(getType()->isIntOrIntVectorTy() &&
2689 "Tried to create a shift operation on a non-integral type!");
2690 break;
2691 case And: case Or:
2692 case Xor:
2693 assert(getType() == LHS->getType() &&
2694 "Logical operation should return same type as operands!");
2695 assert(getType()->isIntOrIntVectorTy() &&
2696 "Tried to create a logical operation on a non-integral type!");
2697 break;
2698 default: llvm_unreachable("Invalid opcode provided");
2699 }
2700#endif
2701}
2702
2704 const Twine &Name,
2705 InsertPosition InsertBefore) {
2706 assert(S1->getType() == S2->getType() &&
2707 "Cannot create binary operator with two operands of differing type!");
2708 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2709}
2710
2712 InsertPosition InsertBefore) {
2713 Value *Zero = ConstantInt::get(Op->getType(), 0);
2714 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2715 InsertBefore);
2716}
2717
2719 InsertPosition InsertBefore) {
2720 Value *Zero = ConstantInt::get(Op->getType(), 0);
2721 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2722}
2723
2725 InsertPosition InsertBefore) {
2726 Constant *C = Constant::getAllOnesValue(Op->getType());
2727 return new BinaryOperator(Instruction::Xor, Op, C,
2728 Op->getType(), Name, InsertBefore);
2729}
2730
2731// Exchange the two operands to this instruction. This instruction is safe to
2732// use on any binary instruction and does not modify the semantics of the
2733// instruction.
2735 if (!isCommutative())
2736 return true; // Can't commute operands
2737 Op<0>().swap(Op<1>());
2738 return false;
2739}
2740
2741//===----------------------------------------------------------------------===//
2742// FPMathOperator Class
2743//===----------------------------------------------------------------------===//
2744
2746 const MDNode *MD =
2747 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2748 if (!MD)
2749 return 0.0;
2750 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2751 return Accuracy->getValueAPF().convertToFloat();
2752}
2753
2754//===----------------------------------------------------------------------===//
2755// CastInst Class
2756//===----------------------------------------------------------------------===//
2757
2758// Just determine if this cast only deals with integral->integral conversion.
2760 switch (getOpcode()) {
2761 default: return false;
2762 case Instruction::ZExt:
2763 case Instruction::SExt:
2764 case Instruction::Trunc:
2765 return true;
2766 case Instruction::BitCast:
2767 return getOperand(0)->getType()->isIntegerTy() &&
2768 getType()->isIntegerTy();
2769 }
2770}
2771
2772/// This function determines if the CastInst does not require any bits to be
2773/// changed in order to effect the cast. Essentially, it identifies cases where
2774/// no code gen is necessary for the cast, hence the name no-op cast. For
2775/// example, the following are all no-op casts:
2776/// # bitcast i32* %x to i8*
2777/// # bitcast <2 x i32> %x to <4 x i16>
2778/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2779/// Determine if the described cast is a no-op.
2781 Type *SrcTy,
2782 Type *DestTy,
2783 const DataLayout &DL) {
2784 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2785 switch (Opcode) {
2786 default: llvm_unreachable("Invalid CastOp");
2787 case Instruction::Trunc:
2788 case Instruction::ZExt:
2789 case Instruction::SExt:
2790 case Instruction::FPTrunc:
2791 case Instruction::FPExt:
2792 case Instruction::UIToFP:
2793 case Instruction::SIToFP:
2794 case Instruction::FPToUI:
2795 case Instruction::FPToSI:
2796 case Instruction::AddrSpaceCast:
2797 // TODO: Target informations may give a more accurate answer here.
2798 return false;
2799 case Instruction::BitCast:
2800 return true; // BitCast never modifies bits.
2801 case Instruction::PtrToAddr:
2802 case Instruction::PtrToInt:
2803 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2804 DestTy->getScalarSizeInBits();
2805 case Instruction::IntToPtr:
2806 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2807 SrcTy->getScalarSizeInBits();
2808 }
2809}
2810
2812 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2813}
2814
2815/// This function determines if a pair of casts can be eliminated and what
2816/// opcode should be used in the elimination. This assumes that there are two
2817/// instructions like this:
2818/// * %F = firstOpcode SrcTy %x to MidTy
2819/// * %S = secondOpcode MidTy %F to DstTy
2820/// The function returns a resultOpcode so these two casts can be replaced with:
2821/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2822/// If no such cast is permitted, the function returns 0.
2825 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2826 Type *DstIntPtrTy) {
2827 // Define the 144 possibilities for these two cast instructions. The values
2828 // in this matrix determine what to do in a given situation and select the
2829 // case in the switch below. The rows correspond to firstOp, the columns
2830 // correspond to secondOp. In looking at the table below, keep in mind
2831 // the following cast properties:
2832 //
2833 // Size Compare Source Destination
2834 // Operator Src ? Size Type Sign Type Sign
2835 // -------- ------------ ------------------- ---------------------
2836 // TRUNC > Integer Any Integral Any
2837 // ZEXT < Integral Unsigned Integer Any
2838 // SEXT < Integral Signed Integer Any
2839 // FPTOUI n/a FloatPt n/a Integral Unsigned
2840 // FPTOSI n/a FloatPt n/a Integral Signed
2841 // UITOFP n/a Integral Unsigned FloatPt n/a
2842 // SITOFP n/a Integral Signed FloatPt n/a
2843 // FPTRUNC > FloatPt n/a FloatPt n/a
2844 // FPEXT < FloatPt n/a FloatPt n/a
2845 // PTRTOINT n/a Pointer n/a Integral Unsigned
2846 // INTTOPTR n/a Integral Unsigned Pointer n/a
2847 // BITCAST = FirstClass n/a FirstClass n/a
2848 // ADDRSPCST n/a Pointer n/a Pointer n/a
2849 //
2850 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2851 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2852 // into "fptoui double to i64", but this loses information about the range
2853 // of the produced value (we no longer know the top-part is all zeros).
2854 // Further this conversion is often much more expensive for typical hardware,
2855 // and causes issues when building libgcc. We disallow fptosi+sext for the
2856 // same reason.
2857 const unsigned numCastOps =
2858 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2859 // clang-format off
2860 static const uint8_t CastResults[numCastOps][numCastOps] = {
2861 // T F F U S F F P P I B A -+
2862 // R Z S P P I I T P 2 2 N T S |
2863 // U E E 2 2 2 2 R E I A T C C +- secondOp
2864 // N X X U S F F N X N D 2 V V |
2865 // C T T I I P P C T T R P T T -+
2866 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2867 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2868 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2869 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2870 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2871 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2872 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2873 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2874 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2875 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2876 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2877 { 99,99,99,99,99,99,99,99,99,11,99,99,15, 0}, // IntToPtr |
2878 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2879 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2880 };
2881 // clang-format on
2882
2883 // TODO: This logic could be encoded into the table above and handled in the
2884 // switch below.
2885 // If either of the casts are a bitcast from scalar to vector, disallow the
2886 // merging. However, any pair of bitcasts are allowed.
2887 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2888 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2889 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2890
2891 // Check if any of the casts convert scalars <-> vectors.
2892 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2893 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2894 if (!AreBothBitcasts)
2895 return 0;
2896
2897 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2898 [secondOp-Instruction::CastOpsBegin];
2899 switch (ElimCase) {
2900 case 0:
2901 // Categorically disallowed.
2902 return 0;
2903 case 1:
2904 // Allowed, use first cast's opcode.
2905 return firstOp;
2906 case 2:
2907 // Allowed, use second cast's opcode.
2908 return secondOp;
2909 case 3:
2910 // No-op cast in second op implies firstOp as long as the DestTy
2911 // is integer and we are not converting between a vector and a
2912 // non-vector type.
2913 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2914 return firstOp;
2915 return 0;
2916 case 4:
2917 // No-op cast in second op implies firstOp as long as the DestTy
2918 // matches MidTy.
2919 if (DstTy == MidTy)
2920 return firstOp;
2921 return 0;
2922 case 5:
2923 // No-op cast in first op implies secondOp as long as the SrcTy
2924 // is an integer.
2925 if (SrcTy->isIntegerTy())
2926 return secondOp;
2927 return 0;
2928 case 7: {
2929 // Disable inttoptr/ptrtoint optimization if enabled.
2930 if (DisableI2pP2iOpt)
2931 return 0;
2932
2933 // Cannot simplify if address spaces are different!
2934 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2935 return 0;
2936
2937 unsigned MidSize = MidTy->getScalarSizeInBits();
2938 // We can still fold this without knowing the actual sizes as long we
2939 // know that the intermediate pointer is the largest possible
2940 // pointer size.
2941 // FIXME: Is this always true?
2942 if (MidSize == 64)
2943 return Instruction::BitCast;
2944
2945 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2946 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2947 return 0;
2948 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2949 if (MidSize >= PtrSize)
2950 return Instruction::BitCast;
2951 return 0;
2952 }
2953 case 8: {
2954 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2955 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2956 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2957 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2958 unsigned DstSize = DstTy->getScalarSizeInBits();
2959 if (SrcTy == DstTy)
2960 return Instruction::BitCast;
2961 if (SrcSize < DstSize)
2962 return firstOp;
2963 if (SrcSize > DstSize)
2964 return secondOp;
2965 return 0;
2966 }
2967 case 9:
2968 // zext, sext -> zext, because sext can't sign extend after zext
2969 return Instruction::ZExt;
2970 case 11: {
2971 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2972 if (!MidIntPtrTy)
2973 return 0;
2974 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2975 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2976 unsigned DstSize = DstTy->getScalarSizeInBits();
2977 if (SrcSize <= PtrSize && SrcSize == DstSize)
2978 return Instruction::BitCast;
2979 return 0;
2980 }
2981 case 12:
2982 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2983 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2984 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2985 return Instruction::AddrSpaceCast;
2986 return Instruction::BitCast;
2987 case 13:
2988 // FIXME: this state can be merged with (1), but the following assert
2989 // is useful to check the correcteness of the sequence due to semantic
2990 // change of bitcast.
2991 assert(
2992 SrcTy->isPtrOrPtrVectorTy() &&
2993 MidTy->isPtrOrPtrVectorTy() &&
2994 DstTy->isPtrOrPtrVectorTy() &&
2995 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2996 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2997 "Illegal addrspacecast, bitcast sequence!");
2998 // Allowed, use first cast's opcode
2999 return firstOp;
3000 case 14:
3001 // bitcast, addrspacecast -> addrspacecast
3002 return Instruction::AddrSpaceCast;
3003 case 15:
3004 // FIXME: this state can be merged with (1), but the following assert
3005 // is useful to check the correcteness of the sequence due to semantic
3006 // change of bitcast.
3007 assert(
3008 SrcTy->isIntOrIntVectorTy() &&
3009 MidTy->isPtrOrPtrVectorTy() &&
3010 DstTy->isPtrOrPtrVectorTy() &&
3011 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3012 "Illegal inttoptr, bitcast sequence!");
3013 // Allowed, use first cast's opcode
3014 return firstOp;
3015 case 16:
3016 // FIXME: this state can be merged with (2), but the following assert
3017 // is useful to check the correcteness of the sequence due to semantic
3018 // change of bitcast.
3019 assert(
3020 SrcTy->isPtrOrPtrVectorTy() &&
3021 MidTy->isPtrOrPtrVectorTy() &&
3022 DstTy->isIntOrIntVectorTy() &&
3023 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3024 "Illegal bitcast, ptrtoint sequence!");
3025 // Allowed, use second cast's opcode
3026 return secondOp;
3027 case 17:
3028 // (sitofp (zext x)) -> (uitofp x)
3029 return Instruction::UIToFP;
3030 case 99:
3031 // Cast combination can't happen (error in input). This is for all cases
3032 // where the MidTy is not the same for the two cast instructions.
3033 llvm_unreachable("Invalid Cast Combination");
3034 default:
3035 llvm_unreachable("Error in CastResults table!!!");
3036 }
3037}
3038
3040 const Twine &Name, InsertPosition InsertBefore) {
3041 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3042 // Construct and return the appropriate CastInst subclass
3043 switch (op) {
3044 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3045 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3046 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3047 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3048 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3049 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3050 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3051 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3052 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3053 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3054 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3055 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3056 case BitCast:
3057 return new BitCastInst(S, Ty, Name, InsertBefore);
3058 case AddrSpaceCast:
3059 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3060 default:
3061 llvm_unreachable("Invalid opcode provided");
3062 }
3063}
3064
3066 InsertPosition InsertBefore) {
3067 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3068 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3069 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3070}
3071
3073 InsertPosition InsertBefore) {
3074 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3075 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3076 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3077}
3078
3080 InsertPosition InsertBefore) {
3081 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3082 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3083 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3084}
3085
3086/// Create a BitCast or a PtrToInt cast instruction
3088 InsertPosition InsertBefore) {
3089 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3090 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3091 "Invalid cast");
3092 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3093 assert((!Ty->isVectorTy() ||
3094 cast<VectorType>(Ty)->getElementCount() ==
3095 cast<VectorType>(S->getType())->getElementCount()) &&
3096 "Invalid cast");
3097
3098 if (Ty->isIntOrIntVectorTy())
3099 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3100
3101 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3102}
3103
3105 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3106 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3107 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3108
3110 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3111
3112 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3113}
3114
3116 const Twine &Name,
3117 InsertPosition InsertBefore) {
3118 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3119 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3120 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3121 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3122
3123 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3124}
3125
3127 const Twine &Name,
3128 InsertPosition InsertBefore) {
3129 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3130 "Invalid integer cast");
3131 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3132 unsigned DstBits = Ty->getScalarSizeInBits();
3133 Instruction::CastOps opcode =
3134 (SrcBits == DstBits ? Instruction::BitCast :
3135 (SrcBits > DstBits ? Instruction::Trunc :
3136 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3137 return Create(opcode, C, Ty, Name, InsertBefore);
3138}
3139
3141 InsertPosition InsertBefore) {
3142 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3143 "Invalid cast");
3144 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3145 unsigned DstBits = Ty->getScalarSizeInBits();
3146 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3147 Instruction::CastOps opcode =
3148 (SrcBits == DstBits ? Instruction::BitCast :
3149 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3150 return Create(opcode, C, Ty, Name, InsertBefore);
3151}
3152
3153bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3154 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3155 return false;
3156
3157 if (SrcTy == DestTy)
3158 return true;
3159
3160 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3161 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3162 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3163 // An element by element cast. Valid if casting the elements is valid.
3164 SrcTy = SrcVecTy->getElementType();
3165 DestTy = DestVecTy->getElementType();
3166 }
3167 }
3168 }
3169
3170 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3171 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3172 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3173 }
3174 }
3175
3176 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3177 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3178
3179 // Could still have vectors of pointers if the number of elements doesn't
3180 // match
3181 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3182 return false;
3183
3184 if (SrcBits != DestBits)
3185 return false;
3186
3187 return true;
3188}
3189
3191 const DataLayout &DL) {
3192 // ptrtoint and inttoptr are not allowed on non-integral pointers
3193 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3194 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3195 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3196 !DL.isNonIntegralPointerType(PtrTy));
3197 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3198 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3199 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3200 !DL.isNonIntegralPointerType(PtrTy));
3201
3202 return isBitCastable(SrcTy, DestTy);
3203}
3204
3205// Provide a way to get a "cast" where the cast opcode is inferred from the
3206// types and size of the operand. This, basically, is a parallel of the
3207// logic in the castIsValid function below. This axiom should hold:
3208// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3209// should not assert in castIsValid. In other words, this produces a "correct"
3210// casting opcode for the arguments passed to it.
3213 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3214 Type *SrcTy = Src->getType();
3215
3216 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3217 "Only first class types are castable!");
3218
3219 if (SrcTy == DestTy)
3220 return BitCast;
3221
3222 // FIXME: Check address space sizes here
3223 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3224 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3225 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3226 // An element by element cast. Find the appropriate opcode based on the
3227 // element types.
3228 SrcTy = SrcVecTy->getElementType();
3229 DestTy = DestVecTy->getElementType();
3230 }
3231
3232 // Get the bit sizes, we'll need these
3233 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3234 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3235
3236 // Run through the possibilities ...
3237 if (DestTy->isIntegerTy()) { // Casting to integral
3238 if (SrcTy->isIntegerTy()) { // Casting from integral
3239 if (DestBits < SrcBits)
3240 return Trunc; // int -> smaller int
3241 else if (DestBits > SrcBits) { // its an extension
3242 if (SrcIsSigned)
3243 return SExt; // signed -> SEXT
3244 else
3245 return ZExt; // unsigned -> ZEXT
3246 } else {
3247 return BitCast; // Same size, No-op cast
3248 }
3249 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3250 if (DestIsSigned)
3251 return FPToSI; // FP -> sint
3252 else
3253 return FPToUI; // FP -> uint
3254 } else if (SrcTy->isVectorTy()) {
3255 assert(DestBits == SrcBits &&
3256 "Casting vector to integer of different width");
3257 return BitCast; // Same size, no-op cast
3258 } else {
3259 assert(SrcTy->isPointerTy() &&
3260 "Casting from a value that is not first-class type");
3261 return PtrToInt; // ptr -> int
3262 }
3263 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3264 if (SrcTy->isIntegerTy()) { // Casting from integral
3265 if (SrcIsSigned)
3266 return SIToFP; // sint -> FP
3267 else
3268 return UIToFP; // uint -> FP
3269 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3270 if (DestBits < SrcBits) {
3271 return FPTrunc; // FP -> smaller FP
3272 } else if (DestBits > SrcBits) {
3273 return FPExt; // FP -> larger FP
3274 } else {
3275 return BitCast; // same size, no-op cast
3276 }
3277 } else if (SrcTy->isVectorTy()) {
3278 assert(DestBits == SrcBits &&
3279 "Casting vector to floating point of different width");
3280 return BitCast; // same size, no-op cast
3281 }
3282 llvm_unreachable("Casting pointer or non-first class to float");
3283 } else if (DestTy->isVectorTy()) {
3284 assert(DestBits == SrcBits &&
3285 "Illegal cast to vector (wrong type or size)");
3286 return BitCast;
3287 } else if (DestTy->isPointerTy()) {
3288 if (SrcTy->isPointerTy()) {
3289 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3290 return AddrSpaceCast;
3291 return BitCast; // ptr -> ptr
3292 } else if (SrcTy->isIntegerTy()) {
3293 return IntToPtr; // int -> ptr
3294 }
3295 llvm_unreachable("Casting pointer to other than pointer or int");
3296 }
3297 llvm_unreachable("Casting to type that is not first-class");
3298}
3299
3300//===----------------------------------------------------------------------===//
3301// CastInst SubClass Constructors
3302//===----------------------------------------------------------------------===//
3303
3304/// Check that the construction parameters for a CastInst are correct. This
3305/// could be broken out into the separate constructors but it is useful to have
3306/// it in one place and to eliminate the redundant code for getting the sizes
3307/// of the types involved.
3308bool
3310 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3311 SrcTy->isAggregateType() || DstTy->isAggregateType())
3312 return false;
3313
3314 // Get the size of the types in bits, and whether we are dealing
3315 // with vector types, we'll need this later.
3316 bool SrcIsVec = isa<VectorType>(SrcTy);
3317 bool DstIsVec = isa<VectorType>(DstTy);
3318 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3319 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3320
3321 // If these are vector types, get the lengths of the vectors (using zero for
3322 // scalar types means that checking that vector lengths match also checks that
3323 // scalars are not being converted to vectors or vectors to scalars).
3324 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3326 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3328
3329 // Switch on the opcode provided
3330 switch (op) {
3331 default: return false; // This is an input error
3332 case Instruction::Trunc:
3333 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3334 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3335 case Instruction::ZExt:
3336 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3337 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3338 case Instruction::SExt:
3339 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3340 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3341 case Instruction::FPTrunc:
3342 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3343 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3344 case Instruction::FPExt:
3345 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3346 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3347 case Instruction::UIToFP:
3348 case Instruction::SIToFP:
3349 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3350 SrcEC == DstEC;
3351 case Instruction::FPToUI:
3352 case Instruction::FPToSI:
3353 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3354 SrcEC == DstEC;
3355 case Instruction::PtrToAddr:
3356 case Instruction::PtrToInt:
3357 if (SrcEC != DstEC)
3358 return false;
3359 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3360 case Instruction::IntToPtr:
3361 if (SrcEC != DstEC)
3362 return false;
3363 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3364 case Instruction::BitCast: {
3365 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3366 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3367
3368 // BitCast implies a no-op cast of type only. No bits change.
3369 // However, you can't cast pointers to anything but pointers.
3370 if (!SrcPtrTy != !DstPtrTy)
3371 return false;
3372
3373 // For non-pointer cases, the cast is okay if the source and destination bit
3374 // widths are identical.
3375 if (!SrcPtrTy)
3376 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3377
3378 // If both are pointers then the address spaces must match.
3379 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3380 return false;
3381
3382 // A vector of pointers must have the same number of elements.
3383 if (SrcIsVec && DstIsVec)
3384 return SrcEC == DstEC;
3385 if (SrcIsVec)
3386 return SrcEC == ElementCount::getFixed(1);
3387 if (DstIsVec)
3388 return DstEC == ElementCount::getFixed(1);
3389
3390 return true;
3391 }
3392 case Instruction::AddrSpaceCast: {
3393 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3394 if (!SrcPtrTy)
3395 return false;
3396
3397 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3398 if (!DstPtrTy)
3399 return false;
3400
3401 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3402 return false;
3403
3404 return SrcEC == DstEC;
3405 }
3406 }
3407}
3408
3410 InsertPosition InsertBefore)
3411 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3412 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3413}
3414
3416 InsertPosition InsertBefore)
3417 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3418 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3419}
3420
3422 InsertPosition InsertBefore)
3423 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3424 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3425}
3426
3428 InsertPosition InsertBefore)
3429 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3430 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3431}
3432
3434 InsertPosition InsertBefore)
3435 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3436 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3437}
3438
3440 InsertPosition InsertBefore)
3441 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3443}
3444
3446 InsertPosition InsertBefore)
3447 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3449}
3450
3452 InsertPosition InsertBefore)
3453 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3455}
3456
3458 InsertPosition InsertBefore)
3459 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3460 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3461}
3462
3464 InsertPosition InsertBefore)
3465 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3466 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3467}
3468
3470 InsertPosition InsertBefore)
3471 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3472 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3473}
3474
3476 InsertPosition InsertBefore)
3477 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3478 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3479}
3480
3482 InsertPosition InsertBefore)
3483 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3484 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3485}
3486
3488 InsertPosition InsertBefore)
3489 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3490 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3491}
3492
3493//===----------------------------------------------------------------------===//
3494// CmpInst Classes
3495//===----------------------------------------------------------------------===//
3496
3498 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3499 Instruction *FlagsSource)
3500 : Instruction(ty, op, AllocMarker, InsertBefore) {
3501 Op<0>() = LHS;
3502 Op<1>() = RHS;
3503 setPredicate(predicate);
3504 setName(Name);
3505 if (FlagsSource)
3506 copyIRFlags(FlagsSource);
3507}
3508
3510 const Twine &Name, InsertPosition InsertBefore) {
3511 if (Op == Instruction::ICmp) {
3512 if (InsertBefore.isValid())
3513 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3514 S1, S2, Name);
3515 else
3516 return new ICmpInst(CmpInst::Predicate(predicate),
3517 S1, S2, Name);
3518 }
3519
3520 if (InsertBefore.isValid())
3521 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3522 S1, S2, Name);
3523 else
3524 return new FCmpInst(CmpInst::Predicate(predicate),
3525 S1, S2, Name);
3526}
3527
3529 Value *S2,
3530 const Instruction *FlagsSource,
3531 const Twine &Name,
3532 InsertPosition InsertBefore) {
3533 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3534 Inst->copyIRFlags(FlagsSource);
3535 return Inst;
3536}
3537
3539 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3540 IC->swapOperands();
3541 else
3542 cast<FCmpInst>(this)->swapOperands();
3543}
3544
3546 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3547 return IC->isCommutative();
3548 return cast<FCmpInst>(this)->isCommutative();
3549}
3550
3553 return ICmpInst::isEquality(P);
3555 return FCmpInst::isEquality(P);
3556 llvm_unreachable("Unsupported predicate kind");
3557}
3558
3559// Returns true if either operand of CmpInst is a provably non-zero
3560// floating-point constant.
3561static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3562 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3563 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3564 if (auto *Const = LHS ? LHS : RHS) {
3565 using namespace llvm::PatternMatch;
3566 return match(Const, m_NonZeroNotDenormalFP());
3567 }
3568 return false;
3569}
3570
3571// Floating-point equality is not an equivalence when comparing +0.0 with
3572// -0.0, when comparing NaN with another value, or when flushing
3573// denormals-to-zero.
3574bool CmpInst::isEquivalence(bool Invert) const {
3575 switch (Invert ? getInversePredicate() : getPredicate()) {
3577 return true;
3579 if (!hasNoNaNs())
3580 return false;
3581 [[fallthrough]];
3583 return hasNonZeroFPOperands(this);
3584 default:
3585 return false;
3586 }
3587}
3588
3590 switch (pred) {
3591 default: llvm_unreachable("Unknown cmp predicate!");
3592 case ICMP_EQ: return ICMP_NE;
3593 case ICMP_NE: return ICMP_EQ;
3594 case ICMP_UGT: return ICMP_ULE;
3595 case ICMP_ULT: return ICMP_UGE;
3596 case ICMP_UGE: return ICMP_ULT;
3597 case ICMP_ULE: return ICMP_UGT;
3598 case ICMP_SGT: return ICMP_SLE;
3599 case ICMP_SLT: return ICMP_SGE;
3600 case ICMP_SGE: return ICMP_SLT;
3601 case ICMP_SLE: return ICMP_SGT;
3602
3603 case FCMP_OEQ: return FCMP_UNE;
3604 case FCMP_ONE: return FCMP_UEQ;
3605 case FCMP_OGT: return FCMP_ULE;
3606 case FCMP_OLT: return FCMP_UGE;
3607 case FCMP_OGE: return FCMP_ULT;
3608 case FCMP_OLE: return FCMP_UGT;
3609 case FCMP_UEQ: return FCMP_ONE;
3610 case FCMP_UNE: return FCMP_OEQ;
3611 case FCMP_UGT: return FCMP_OLE;
3612 case FCMP_ULT: return FCMP_OGE;
3613 case FCMP_UGE: return FCMP_OLT;
3614 case FCMP_ULE: return FCMP_OGT;
3615 case FCMP_ORD: return FCMP_UNO;
3616 case FCMP_UNO: return FCMP_ORD;
3617 case FCMP_TRUE: return FCMP_FALSE;
3618 case FCMP_FALSE: return FCMP_TRUE;
3619 }
3620}
3621
3623 switch (Pred) {
3624 default: return "unknown";
3625 case FCmpInst::FCMP_FALSE: return "false";
3626 case FCmpInst::FCMP_OEQ: return "oeq";
3627 case FCmpInst::FCMP_OGT: return "ogt";
3628 case FCmpInst::FCMP_OGE: return "oge";
3629 case FCmpInst::FCMP_OLT: return "olt";
3630 case FCmpInst::FCMP_OLE: return "ole";
3631 case FCmpInst::FCMP_ONE: return "one";
3632 case FCmpInst::FCMP_ORD: return "ord";
3633 case FCmpInst::FCMP_UNO: return "uno";
3634 case FCmpInst::FCMP_UEQ: return "ueq";
3635 case FCmpInst::FCMP_UGT: return "ugt";
3636 case FCmpInst::FCMP_UGE: return "uge";
3637 case FCmpInst::FCMP_ULT: return "ult";
3638 case FCmpInst::FCMP_ULE: return "ule";
3639 case FCmpInst::FCMP_UNE: return "une";
3640 case FCmpInst::FCMP_TRUE: return "true";
3641 case ICmpInst::ICMP_EQ: return "eq";
3642 case ICmpInst::ICMP_NE: return "ne";
3643 case ICmpInst::ICMP_SGT: return "sgt";
3644 case ICmpInst::ICMP_SGE: return "sge";
3645 case ICmpInst::ICMP_SLT: return "slt";
3646 case ICmpInst::ICMP_SLE: return "sle";
3647 case ICmpInst::ICMP_UGT: return "ugt";
3648 case ICmpInst::ICMP_UGE: return "uge";
3649 case ICmpInst::ICMP_ULT: return "ult";
3650 case ICmpInst::ICMP_ULE: return "ule";
3651 }
3652}
3653
3656 return OS;
3657}
3658
3660 switch (pred) {
3661 default: llvm_unreachable("Unknown icmp predicate!");
3662 case ICMP_EQ: case ICMP_NE:
3663 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3664 return pred;
3665 case ICMP_UGT: return ICMP_SGT;
3666 case ICMP_ULT: return ICMP_SLT;
3667 case ICMP_UGE: return ICMP_SGE;
3668 case ICMP_ULE: return ICMP_SLE;
3669 }
3670}
3671
3673 switch (pred) {
3674 default: llvm_unreachable("Unknown icmp predicate!");
3675 case ICMP_EQ: case ICMP_NE:
3676 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3677 return pred;
3678 case ICMP_SGT: return ICMP_UGT;
3679 case ICMP_SLT: return ICMP_ULT;
3680 case ICMP_SGE: return ICMP_UGE;
3681 case ICMP_SLE: return ICMP_ULE;
3682 }
3683}
3684
3686 switch (pred) {
3687 default: llvm_unreachable("Unknown cmp predicate!");
3688 case ICMP_EQ: case ICMP_NE:
3689 return pred;
3690 case ICMP_SGT: return ICMP_SLT;
3691 case ICMP_SLT: return ICMP_SGT;
3692 case ICMP_SGE: return ICMP_SLE;
3693 case ICMP_SLE: return ICMP_SGE;
3694 case ICMP_UGT: return ICMP_ULT;
3695 case ICMP_ULT: return ICMP_UGT;
3696 case ICMP_UGE: return ICMP_ULE;
3697 case ICMP_ULE: return ICMP_UGE;
3698
3699 case FCMP_FALSE: case FCMP_TRUE:
3700 case FCMP_OEQ: case FCMP_ONE:
3701 case FCMP_UEQ: case FCMP_UNE:
3702 case FCMP_ORD: case FCMP_UNO:
3703 return pred;
3704 case FCMP_OGT: return FCMP_OLT;
3705 case FCMP_OLT: return FCMP_OGT;
3706 case FCMP_OGE: return FCMP_OLE;
3707 case FCMP_OLE: return FCMP_OGE;
3708 case FCMP_UGT: return FCMP_ULT;
3709 case FCMP_ULT: return FCMP_UGT;
3710 case FCMP_UGE: return FCMP_ULE;
3711 case FCMP_ULE: return FCMP_UGE;
3712 }
3713}
3714
3716 switch (pred) {
3717 case ICMP_SGE:
3718 case ICMP_SLE:
3719 case ICMP_UGE:
3720 case ICMP_ULE:
3721 case FCMP_OGE:
3722 case FCMP_OLE:
3723 case FCMP_UGE:
3724 case FCMP_ULE:
3725 return true;
3726 default:
3727 return false;
3728 }
3729}
3730
3732 switch (pred) {
3733 case ICMP_SGT:
3734 case ICMP_SLT:
3735 case ICMP_UGT:
3736 case ICMP_ULT:
3737 case FCMP_OGT:
3738 case FCMP_OLT:
3739 case FCMP_UGT:
3740 case FCMP_ULT:
3741 return true;
3742 default:
3743 return false;
3744 }
3745}
3746
3748 switch (pred) {
3749 case ICMP_SGE:
3750 return ICMP_SGT;
3751 case ICMP_SLE:
3752 return ICMP_SLT;
3753 case ICMP_UGE:
3754 return ICMP_UGT;
3755 case ICMP_ULE:
3756 return ICMP_ULT;
3757 case FCMP_OGE:
3758 return FCMP_OGT;
3759 case FCMP_OLE:
3760 return FCMP_OLT;
3761 case FCMP_UGE:
3762 return FCMP_UGT;
3763 case FCMP_ULE:
3764 return FCMP_ULT;
3765 default:
3766 return pred;
3767 }
3768}
3769
3771 switch (pred) {
3772 case ICMP_SGT:
3773 return ICMP_SGE;
3774 case ICMP_SLT:
3775 return ICMP_SLE;
3776 case ICMP_UGT:
3777 return ICMP_UGE;
3778 case ICMP_ULT:
3779 return ICMP_ULE;
3780 case FCMP_OGT:
3781 return FCMP_OGE;
3782 case FCMP_OLT:
3783 return FCMP_OLE;
3784 case FCMP_UGT:
3785 return FCMP_UGE;
3786 case FCMP_ULT:
3787 return FCMP_ULE;
3788 default:
3789 return pred;
3790 }
3791}
3792
3794 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3795
3796 if (isStrictPredicate(pred))
3797 return getNonStrictPredicate(pred);
3798 if (isNonStrictPredicate(pred))
3799 return getStrictPredicate(pred);
3800
3801 llvm_unreachable("Unknown predicate!");
3802}
3803
3805 switch (predicate) {
3806 default: return false;
3808 case ICmpInst::ICMP_UGE: return true;
3809 }
3810}
3811
3813 switch (predicate) {
3814 default: return false;
3816 case ICmpInst::ICMP_SGE: return true;
3817 }
3818}
3819
3820bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3821 ICmpInst::Predicate Pred) {
3822 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3823 switch (Pred) {
3825 return LHS.eq(RHS);
3827 return LHS.ne(RHS);
3829 return LHS.ugt(RHS);
3831 return LHS.uge(RHS);
3833 return LHS.ult(RHS);
3835 return LHS.ule(RHS);
3837 return LHS.sgt(RHS);
3839 return LHS.sge(RHS);
3841 return LHS.slt(RHS);
3843 return LHS.sle(RHS);
3844 default:
3845 llvm_unreachable("Unexpected non-integer predicate.");
3846 };
3847}
3848
3849bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3850 FCmpInst::Predicate Pred) {
3851 APFloat::cmpResult R = LHS.compare(RHS);
3852 switch (Pred) {
3853 default:
3854 llvm_unreachable("Invalid FCmp Predicate");
3856 return false;
3858 return true;
3859 case FCmpInst::FCMP_UNO:
3860 return R == APFloat::cmpUnordered;
3861 case FCmpInst::FCMP_ORD:
3862 return R != APFloat::cmpUnordered;
3863 case FCmpInst::FCMP_UEQ:
3864 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3865 case FCmpInst::FCMP_OEQ:
3866 return R == APFloat::cmpEqual;
3867 case FCmpInst::FCMP_UNE:
3868 return R != APFloat::cmpEqual;
3869 case FCmpInst::FCMP_ONE:
3871 case FCmpInst::FCMP_ULT:
3872 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3873 case FCmpInst::FCMP_OLT:
3874 return R == APFloat::cmpLessThan;
3875 case FCmpInst::FCMP_UGT:
3877 case FCmpInst::FCMP_OGT:
3878 return R == APFloat::cmpGreaterThan;
3879 case FCmpInst::FCMP_ULE:
3880 return R != APFloat::cmpGreaterThan;
3881 case FCmpInst::FCMP_OLE:
3882 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3883 case FCmpInst::FCMP_UGE:
3884 return R != APFloat::cmpLessThan;
3885 case FCmpInst::FCMP_OGE:
3886 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3887 }
3888}
3889
3890std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3891 const KnownBits &RHS,
3892 ICmpInst::Predicate Pred) {
3893 switch (Pred) {
3894 case ICmpInst::ICMP_EQ:
3895 return KnownBits::eq(LHS, RHS);
3896 case ICmpInst::ICMP_NE:
3897 return KnownBits::ne(LHS, RHS);
3898 case ICmpInst::ICMP_UGE:
3899 return KnownBits::uge(LHS, RHS);
3900 case ICmpInst::ICMP_UGT:
3901 return KnownBits::ugt(LHS, RHS);
3902 case ICmpInst::ICMP_ULE:
3903 return KnownBits::ule(LHS, RHS);
3904 case ICmpInst::ICMP_ULT:
3905 return KnownBits::ult(LHS, RHS);
3906 case ICmpInst::ICMP_SGE:
3907 return KnownBits::sge(LHS, RHS);
3908 case ICmpInst::ICMP_SGT:
3909 return KnownBits::sgt(LHS, RHS);
3910 case ICmpInst::ICMP_SLE:
3911 return KnownBits::sle(LHS, RHS);
3912 case ICmpInst::ICMP_SLT:
3913 return KnownBits::slt(LHS, RHS);
3914 default:
3915 llvm_unreachable("Unexpected non-integer predicate.");
3916 }
3917}
3918
3920 if (CmpInst::isEquality(pred))
3921 return pred;
3922 if (isSigned(pred))
3923 return getUnsignedPredicate(pred);
3924 if (isUnsigned(pred))
3925 return getSignedPredicate(pred);
3926
3927 llvm_unreachable("Unknown predicate!");
3928}
3929
3931 switch (predicate) {
3932 default: return false;
3935 case FCmpInst::FCMP_ORD: return true;
3936 }
3937}
3938
3940 switch (predicate) {
3941 default: return false;
3944 case FCmpInst::FCMP_UNO: return true;
3945 }
3946}
3947
3949 switch(predicate) {
3950 default: return false;
3951 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3952 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3953 }
3954}
3955
3957 switch(predicate) {
3958 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3959 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3960 default: return false;
3961 }
3962}
3963
3965 // If the predicates match, then we know the first condition implies the
3966 // second is true.
3967 if (CmpPredicate::getMatching(Pred1, Pred2))
3968 return true;
3969
3970 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3972 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3974
3975 switch (Pred1) {
3976 default:
3977 break;
3978 case CmpInst::ICMP_EQ:
3979 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3980 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3981 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3982 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3983 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3984 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3985 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3986 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3987 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3988 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3989 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3990 }
3991 return false;
3992}
3993
3995 CmpPredicate Pred2) {
3996 return isImpliedTrueByMatchingCmp(Pred1,
3998}
3999
4001 CmpPredicate Pred2) {
4002 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4003 return true;
4004 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4005 return false;
4006 return std::nullopt;
4007}
4008
4009//===----------------------------------------------------------------------===//
4010// CmpPredicate Implementation
4011//===----------------------------------------------------------------------===//
4012
4013std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4014 CmpPredicate B) {
4015 if (A.Pred == B.Pred)
4016 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4018 return {};
4019 if (A.HasSameSign &&
4021 return B.Pred;
4022 if (B.HasSameSign &&
4024 return A.Pred;
4025 return {};
4026}
4027
4029 return HasSameSign ? ICmpInst::getSignedPredicate(Pred) : Pred;
4030}
4031
4033 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4034 return ICI->getCmpPredicate();
4035 return Cmp->getPredicate();
4036}
4037
4039 return {CmpInst::getSwappedPredicate(P), P.hasSameSign()};
4040}
4041
4043 return getSwapped(get(Cmp));
4044}
4045
4046//===----------------------------------------------------------------------===//
4047// SwitchInst Implementation
4048//===----------------------------------------------------------------------===//
4049
4050void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4051 assert(Value && Default && NumReserved);
4052 ReservedSpace = NumReserved;
4054 allocHungoffUses(ReservedSpace);
4055
4056 Op<0>() = Value;
4057 Op<1>() = Default;
4058}
4059
4060/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4061/// switch on and a default destination. The number of additional cases can
4062/// be specified here to make memory allocation more efficient. This
4063/// constructor can also autoinsert before another instruction.
4064SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4065 InsertPosition InsertBefore)
4066 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4067 AllocMarker, InsertBefore) {
4068 init(Value, Default, 2+NumCases*2);
4069}
4070
4071SwitchInst::SwitchInst(const SwitchInst &SI)
4072 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4073 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4074 setNumHungOffUseOperands(SI.getNumOperands());
4075 Use *OL = getOperandList();
4076 const Use *InOL = SI.getOperandList();
4077 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4078 OL[i] = InOL[i];
4079 OL[i+1] = InOL[i+1];
4080 }
4081 SubclassOptionalData = SI.SubclassOptionalData;
4082}
4083
4084/// addCase - Add an entry to the switch instruction...
4085///
4087 unsigned NewCaseIdx = getNumCases();
4088 unsigned OpNo = getNumOperands();
4089 if (OpNo+2 > ReservedSpace)
4090 growOperands(); // Get more space!
4091 // Initialize some new operands.
4092 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4094 CaseHandle Case(this, NewCaseIdx);
4095 Case.setValue(OnVal);
4096 Case.setSuccessor(Dest);
4097}
4098
4099/// removeCase - This method removes the specified case and its successor
4100/// from the switch instruction.
4102 unsigned idx = I->getCaseIndex();
4103
4104 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4105
4106 unsigned NumOps = getNumOperands();
4107 Use *OL = getOperandList();
4108
4109 // Overwrite this case with the end of the list.
4110 if (2 + (idx + 1) * 2 != NumOps) {
4111 OL[2 + idx * 2] = OL[NumOps - 2];
4112 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4113 }
4114
4115 // Nuke the last value.
4116 OL[NumOps-2].set(nullptr);
4117 OL[NumOps-2+1].set(nullptr);
4118 setNumHungOffUseOperands(NumOps-2);
4119
4120 return CaseIt(this, idx);
4121}
4122
4123/// growOperands - grow operands - This grows the operand list in response
4124/// to a push_back style of operation. This grows the number of ops by 3 times.
4125///
4126void SwitchInst::growOperands() {
4127 unsigned e = getNumOperands();
4128 unsigned NumOps = e*3;
4129
4130 ReservedSpace = NumOps;
4131 growHungoffUses(ReservedSpace);
4132}
4133
4135 assert(Changed && "called only if metadata has changed");
4136
4137 if (!Weights)
4138 return nullptr;
4139
4140 assert(SI.getNumSuccessors() == Weights->size() &&
4141 "num of prof branch_weights must accord with num of successors");
4142
4143 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4144
4145 if (AllZeroes || Weights->size() < 2)
4146 return nullptr;
4147
4148 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4149}
4150
4152 MDNode *ProfileData = getBranchWeightMDNode(SI);
4153 if (!ProfileData)
4154 return;
4155
4156 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4157 llvm_unreachable("number of prof branch_weights metadata operands does "
4158 "not correspond to number of succesors");
4159 }
4160
4162 if (!extractBranchWeights(ProfileData, Weights))
4163 return;
4164 this->Weights = std::move(Weights);
4165}
4166
4169 if (Weights) {
4170 assert(SI.getNumSuccessors() == Weights->size() &&
4171 "num of prof branch_weights must accord with num of successors");
4172 Changed = true;
4173 // Copy the last case to the place of the removed one and shrink.
4174 // This is tightly coupled with the way SwitchInst::removeCase() removes
4175 // the cases in SwitchInst::removeCase(CaseIt).
4176 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4177 Weights->pop_back();
4178 }
4179 return SI.removeCase(I);
4180}
4181
4183 ConstantInt *OnVal, BasicBlock *Dest,
4185 SI.addCase(OnVal, Dest);
4186
4187 if (!Weights && W && *W) {
4188 Changed = true;
4189 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4190 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4191 } else if (Weights) {
4192 Changed = true;
4193 Weights->push_back(W.value_or(0));
4194 }
4195 if (Weights)
4196 assert(SI.getNumSuccessors() == Weights->size() &&
4197 "num of prof branch_weights must accord with num of successors");
4198}
4199
4202 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4203 Changed = false;
4204 if (Weights)
4205 Weights->resize(0);
4206 return SI.eraseFromParent();
4207}
4208
4211 if (!Weights)
4212 return std::nullopt;
4213 return (*Weights)[idx];
4214}
4215
4218 if (!W)
4219 return;
4220
4221 if (!Weights && *W)
4222 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4223
4224 if (Weights) {
4225 auto &OldW = (*Weights)[idx];
4226 if (*W != OldW) {
4227 Changed = true;
4228 OldW = *W;
4229 }
4230 }
4231}
4232
4235 unsigned idx) {
4236 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4237 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4238 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4239 ->getValue()
4240 .getZExtValue();
4241
4242 return std::nullopt;
4243}
4244
4245//===----------------------------------------------------------------------===//
4246// IndirectBrInst Implementation
4247//===----------------------------------------------------------------------===//
4248
4249void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4250 assert(Address && Address->getType()->isPointerTy() &&
4251 "Address of indirectbr must be a pointer");
4252 ReservedSpace = 1+NumDests;
4254 allocHungoffUses(ReservedSpace);
4255
4256 Op<0>() = Address;
4257}
4258
4259
4260/// growOperands - grow operands - This grows the operand list in response
4261/// to a push_back style of operation. This grows the number of ops by 2 times.
4262///
4263void IndirectBrInst::growOperands() {
4264 unsigned e = getNumOperands();
4265 unsigned NumOps = e*2;
4266
4267 ReservedSpace = NumOps;
4268 growHungoffUses(ReservedSpace);
4269}
4270
4271IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4272 InsertPosition InsertBefore)
4273 : Instruction(Type::getVoidTy(Address->getContext()),
4274 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4275 init(Address, NumCases);
4276}
4277
4278IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4279 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4280 AllocMarker) {
4281 NumUserOperands = IBI.NumUserOperands;
4282 allocHungoffUses(IBI.getNumOperands());
4283 Use *OL = getOperandList();
4284 const Use *InOL = IBI.getOperandList();
4285 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4286 OL[i] = InOL[i];
4287 SubclassOptionalData = IBI.SubclassOptionalData;
4288}
4289
4290/// addDestination - Add a destination.
4291///
4293 unsigned OpNo = getNumOperands();
4294 if (OpNo+1 > ReservedSpace)
4295 growOperands(); // Get more space!
4296 // Initialize some new operands.
4297 assert(OpNo < ReservedSpace && "Growing didn't work!");
4299 getOperandList()[OpNo] = DestBB;
4300}
4301
4302/// removeDestination - This method removes the specified successor from the
4303/// indirectbr instruction.
4305 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4306
4307 unsigned NumOps = getNumOperands();
4308 Use *OL = getOperandList();
4309
4310 // Replace this value with the last one.
4311 OL[idx+1] = OL[NumOps-1];
4312
4313 // Nuke the last value.
4314 OL[NumOps-1].set(nullptr);
4315 setNumHungOffUseOperands(NumOps-1);
4316}
4317
4318//===----------------------------------------------------------------------===//
4319// FreezeInst Implementation
4320//===----------------------------------------------------------------------===//
4321
4323 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4324 setName(Name);
4325}
4326
4327//===----------------------------------------------------------------------===//
4328// cloneImpl() implementations
4329//===----------------------------------------------------------------------===//
4330
4331// Define these methods here so vtables don't get emitted into every translation
4332// unit that uses these classes.
4333
4336 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4337}
4338
4340 return Create(getOpcode(), Op<0>());
4341}
4342
4344 return Create(getOpcode(), Op<0>(), Op<1>());
4345}
4346
4348 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4349}
4350
4352 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4353}
4354
4356 return new ExtractValueInst(*this);
4357}
4358
4360 return new InsertValueInst(*this);
4361}
4362
4365 getOperand(0), getAlign());
4366 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4367 Result->setSwiftError(isSwiftError());
4368 return Result;
4369}
4370
4372 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4374}
4375
4377 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4379}
4380
4385 Result->setVolatile(isVolatile());
4386 Result->setWeak(isWeak());
4387 return Result;
4388}
4389
4391 AtomicRMWInst *Result =
4394 Result->setVolatile(isVolatile());
4395 return Result;
4396}
4397
4399 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4400}
4401
4403 return new TruncInst(getOperand(0), getType());
4404}
4405
4407 return new ZExtInst(getOperand(0), getType());
4408}
4409
4411 return new SExtInst(getOperand(0), getType());
4412}
4413
4415 return new FPTruncInst(getOperand(0), getType());
4416}
4417
4419 return new FPExtInst(getOperand(0), getType());
4420}
4421
4423 return new UIToFPInst(getOperand(0), getType());
4424}
4425
4427 return new SIToFPInst(getOperand(0), getType());
4428}
4429
4431 return new FPToUIInst(getOperand(0), getType());
4432}
4433
4435 return new FPToSIInst(getOperand(0), getType());
4436}
4437
4439 return new PtrToIntInst(getOperand(0), getType());
4440}
4441
4443 return new PtrToAddrInst(getOperand(0), getType());
4444}
4445
4447 return new IntToPtrInst(getOperand(0), getType());
4448}
4449
4451 return new BitCastInst(getOperand(0), getType());
4452}
4453
4455 return new AddrSpaceCastInst(getOperand(0), getType());
4456}
4457
4459 if (hasOperandBundles()) {
4463 return new (AllocMarker) CallInst(*this, AllocMarker);
4464 }
4466 return new (AllocMarker) CallInst(*this, AllocMarker);
4467}
4468
4471}
4472
4474 return new VAArgInst(getOperand(0), getType());
4475}
4476
4479}
4480
4483}
4484
4487}
4488
4489PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4490
4492 return new LandingPadInst(*this);
4493}
4494
4497 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4498}
4499
4502 return new (AllocMarker) BranchInst(*this, AllocMarker);
4503}
4504
4505SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4506
4508 return new IndirectBrInst(*this);
4509}
4510
4512 if (hasOperandBundles()) {
4516 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4517 }
4519 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4520}
4521
4523 if (hasOperandBundles()) {
4527 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4528 }
4530 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4531}
4532
4534 return new (AllocMarker) ResumeInst(*this);
4535}
4536
4539 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4540}
4541
4543 return new (AllocMarker) CatchReturnInst(*this);
4544}
4545
4547 return new CatchSwitchInst(*this);
4548}
4549
4552 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4553}
4554
4557 return new UnreachableInst(Context);
4558}
4559
4560bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4561 bool NoTrapAfterNoreturn) const {
4562 if (!TrapUnreachable)
4563 return false;
4564
4565 // We may be able to ignore unreachable behind a noreturn call.
4566 if (const CallInst *Call = dyn_cast_or_null<CallInst>(getPrevNode());
4567 Call && Call->doesNotReturn()) {
4568 if (NoTrapAfterNoreturn)
4569 return false;
4570 // Do not emit an additional trap instruction.
4571 if (Call->isNonContinuableTrap())
4572 return false;
4573 }
4574
4575 if (getFunction()->hasFnAttribute(Attribute::Naked))
4576 return false;
4577
4578 return true;
4579}
4580
4582 return new FreezeInst(getOperand(0));
4583}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
Definition: Attributes.cpp:761
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_ABI
Definition: Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:247
Value * RHS
Value * LHS
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:6143
Class for arbitrary precision integers.
Definition: APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1330
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1639
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1598
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:200
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:64
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:153
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:128
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:121
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:143
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:132
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:136
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
iterator begin() const
Definition: ArrayRef.h:135
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:191
Class to represent array types.
Definition: DerivedTypes.h:398
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:506
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:630
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:560
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:604
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:592
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:549
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:567
void setAlignment(Align Align)
Definition: Instructions.h:553
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:709
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:843
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:853
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:721
@ Add
*p = old + v
Definition: Instructions.h:725
@ FAdd
*p = old + v
Definition: Instructions.h:746
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:777
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
Definition: Instructions.h:765
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:739
@ Or
*p = old | v
Definition: Instructions.h:733
@ Sub
*p = old - v
Definition: Instructions.h:727
@ And
*p = old & v
Definition: Instructions.h:729
@ Xor
*p = old ^ v
Definition: Instructions.h:735
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:781
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
Definition: Instructions.h:761
@ FSub
*p = old - v
Definition: Instructions.h:749
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:769
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:737
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:743
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:757
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:741
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:753
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:773
@ Nand
*p = ~(old & v)
Definition: Instructions.h:731
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:882
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:868
void setOperation(BinOp Operation)
Definition: Instructions.h:837
BinOp getOperation() const
Definition: Instructions.h:819
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:877
void setAlignment(Align Align)
Definition: Instructions.h:847
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:863
LLVM_ABI bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
LLVM_ABI FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:845
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
Definition: Attributes.h:915
LLVM_ABI FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
LLVM_ABI MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
LLVM_ABI CaptureInfo getCaptureInfo() const
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:510
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:281
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:223
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:549
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1116
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1415
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1410
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2245
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1481
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2052
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
Definition: InstrTypes.h:2190
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2083
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1348
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
Definition: InstrTypes.h:1435
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1591
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1996
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1406
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2262
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1267
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1709
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2278
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
Value * getCalledOperand() const
Definition: InstrTypes.h:1340
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1130
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2158
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1292
FunctionType * FTy
Definition: InstrTypes.h:1131
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1840
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1273
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1205
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2313
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1384
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1290
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1424
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2001
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition: ModRef.h:354
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition: ModRef.h:386
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition: ModRef.h:367
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition: ModRef.h:370
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition: ModRef.h:382
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:448
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:612
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:666
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:860
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:917
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:770
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:950
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:681
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:695
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:684
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:693
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:705
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:692
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:690
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:687
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
@ ICMP_NE
not equal
Definition: InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:704
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:691
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:680
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:688
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition: InstrTypes.h:932
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:829
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:944
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:873
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition: InstrTypes.h:854
bool isFPPredicate() const
Definition: InstrTypes.h:784
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:791
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:767
bool isStrictPredicate() const
Definition: InstrTypes.h:845
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:895
bool isIntPredicate() const
Definition: InstrTypes.h:785
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition: InstrTypes.h:938
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:928
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:23
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
Definition: CmpPredicate.h:29
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:43
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:277
const APFloat & getValueAPF() const
Definition: Constants.h:320
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1423
This is an important base class in LLVM.
Definition: Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:312
This instruction extracts a single (scalar) element from a VectorType value.
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:429
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:470
LLVM_ABI FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:460
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:454
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:592
unsigned getNumElements() const
Definition: DerivedTypes.h:635
This class represents a freeze function that returns random concrete value if an operand is either a ...
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2392
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2391
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:105
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:137
bool isVarArg() const
Definition: DerivedTypes.h:125
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
This instruction compares its operands according to the predicate given to the constructor.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition: Instruction.h:62
BasicBlock * getBasicBlock()
Definition: Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:1053
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:513
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:82
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:312
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
An instruction for reading from memory.
Definition: Instructions.h:180
void setAlignment(Align Align)
Definition: Instructions.h:219
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:209
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:245
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:224
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:212
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:234
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:215
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:38
Metadata node.
Definition: Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1445
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:125
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:221
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:215
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:135
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:141
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:234
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:224
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:218
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:130
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:158
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:120
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:245
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1069
StringRef getTag() const
Definition: InstrTypes.h:1092
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:700
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:740
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:639
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:283
size_t size() const
Definition: SmallVector.h:79
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:574
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:705
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:684
void resize(size_type N)
Definition: SmallVector.h:639
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
An instruction for storing to memory.
Definition: Instructions.h:296
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:347
Align getAlign() const
Definition: Instructions.h:338
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:333
void setAlignment(Align Align)
Definition: Instructions.h:342
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:358
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:330
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:369
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
Class to represent struct types.
Definition: DerivedTypes.h:218
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:346
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition: TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:270
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:352
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition: InstrTypes.h:154
This function has undefined behavior.
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:35
LLVM_ABI void set(Value *Val)
Definition: Value.h:905
const Use * getOperandList() const
Definition: User.h:225
op_range operands()
Definition: User.h:292
LLVM_ABI void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:284
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:265
Use & Op()
Definition: User.h:196
Value * getOperand(unsigned i) const
Definition: User.h:232
unsigned getNumOperands() const
Definition: User.h:254
op_iterator op_end()
Definition: User.h:286
LLVM_ABI void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
unsigned NumUserOperands
Definition: Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:695
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:194
size_type size() const
Definition: DenseSet.h:87
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:34
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
Definition: PatternMatch.h:805
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
@ Offset
Definition: DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1702
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:294
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:288
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1172
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:288
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1789
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:312
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1854
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:223
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
bool capturesAnything(CaptureComponents CC)
Definition: ModRef.h:319
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2127
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:294
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2169
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2180
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2176
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
Definition: KnownBits.cpp:487
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
Definition: KnownBits.cpp:495
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
Definition: KnownBits.cpp:535
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
Definition: KnownBits.cpp:501
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
Definition: KnownBits.cpp:541
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
Definition: KnownBits.cpp:517
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
Definition: KnownBits.cpp:521
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
Definition: KnownBits.cpp:545
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
Definition: KnownBits.cpp:525
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Definition: KnownBits.cpp:511
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:249
Indicates this User has operands co-allocated.
Definition: User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition: User.h:66