LLVM 21.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
45#include "llvm/Support/ModRef.h"
47#include <algorithm>
48#include <cassert>
49#include <cstdint>
50#include <optional>
51#include <vector>
52
53using namespace llvm;
54
56 "disable-i2p-p2i-opt", cl::init(false),
57 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
58
59//===----------------------------------------------------------------------===//
60// AllocaInst Class
61//===----------------------------------------------------------------------===//
62
63std::optional<TypeSize>
65 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
66 if (isArrayAllocation()) {
67 auto *C = dyn_cast<ConstantInt>(getArraySize());
68 if (!C)
69 return std::nullopt;
70 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
71 auto CheckedProd =
72 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
73 if (!CheckedProd)
74 return std::nullopt;
75 return TypeSize::getFixed(*CheckedProd);
76 }
77 return Size;
78}
79
80std::optional<TypeSize>
82 std::optional<TypeSize> Size = getAllocationSize(DL);
83 if (!Size)
84 return std::nullopt;
85 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
86 static_cast<TypeSize::ScalarTy>(8));
87 if (!CheckedProd)
88 return std::nullopt;
89 return TypeSize::get(*CheckedProd, Size->isScalable());
90}
91
92//===----------------------------------------------------------------------===//
93// SelectInst Class
94//===----------------------------------------------------------------------===//
95
96/// areInvalidOperands - Return a string if the specified operands are invalid
97/// for a select operation, otherwise return null.
98const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
99 if (Op1->getType() != Op2->getType())
100 return "both values to select must have same type";
101
102 if (Op1->getType()->isTokenTy())
103 return "select values cannot have token type";
104
105 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
106 // Vector select.
107 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
108 return "vector select condition element type must be i1";
109 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
110 if (!ET)
111 return "selected values for vector select must be vectors";
112 if (ET->getElementCount() != VT->getElementCount())
113 return "vector select requires selected vectors to have "
114 "the same vector length as select condition";
115 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
116 return "select condition must be i1 or <n x i1>";
117 }
118 return nullptr;
119}
120
121//===----------------------------------------------------------------------===//
122// PHINode Class
123//===----------------------------------------------------------------------===//
124
125PHINode::PHINode(const PHINode &PN)
126 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
127 ReservedSpace(PN.getNumOperands()) {
130 std::copy(PN.op_begin(), PN.op_end(), op_begin());
131 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
133}
134
135// removeIncomingValue - Remove an incoming value. This is useful if a
136// predecessor basic block is deleted.
137Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
138 Value *Removed = getIncomingValue(Idx);
139
140 // Move everything after this operand down.
141 //
142 // FIXME: we could just swap with the end of the list, then erase. However,
143 // clients might not expect this to happen. The code as it is thrashes the
144 // use/def lists, which is kinda lame.
145 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
147
148 // Nuke the last value.
149 Op<-1>().set(nullptr);
151
152 // If the PHI node is dead, because it has zero entries, nuke it now.
153 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
154 // If anyone is using this PHI, make them use a dummy value instead...
157 }
158 return Removed;
159}
160
161void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
162 bool DeletePHIIfEmpty) {
163 SmallDenseSet<unsigned> RemoveIndices;
164 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
165 if (Predicate(Idx))
166 RemoveIndices.insert(Idx);
167
168 if (RemoveIndices.empty())
169 return;
170
171 // Remove operands.
172 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
173 return RemoveIndices.contains(U.getOperandNo());
174 });
175 for (Use &U : make_range(NewOpEnd, op_end()))
176 U.set(nullptr);
177
178 // Remove incoming blocks.
179 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
180 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
181 return RemoveIndices.contains(&BB - block_begin());
182 });
183
184 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
185
186 // If the PHI node is dead, because it has zero entries, nuke it now.
187 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
188 // If anyone is using this PHI, make them use a dummy value instead...
191 }
192}
193
194/// growOperands - grow operands - This grows the operand list in response
195/// to a push_back style of operation. This grows the number of ops by 1.5
196/// times.
197///
198void PHINode::growOperands() {
199 unsigned e = getNumOperands();
200 unsigned NumOps = e + e / 2;
201 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
202
203 ReservedSpace = NumOps;
204 growHungoffUses(ReservedSpace, /* IsPhi */ true);
205}
206
207/// hasConstantValue - If the specified PHI node always merges together the same
208/// value, return the value, otherwise return null.
210 // Exploit the fact that phi nodes always have at least one entry.
211 Value *ConstantValue = getIncomingValue(0);
212 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
213 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
214 if (ConstantValue != this)
215 return nullptr; // Incoming values not all the same.
216 // The case where the first value is this PHI.
217 ConstantValue = getIncomingValue(i);
218 }
219 if (ConstantValue == this)
220 return PoisonValue::get(getType());
221 return ConstantValue;
222}
223
224/// hasConstantOrUndefValue - Whether the specified PHI node always merges
225/// together the same value, assuming that undefs result in the same value as
226/// non-undefs.
227/// Unlike \ref hasConstantValue, this does not return a value because the
228/// unique non-undef incoming value need not dominate the PHI node.
230 Value *ConstantValue = nullptr;
231 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
233 if (Incoming != this && !isa<UndefValue>(Incoming)) {
234 if (ConstantValue && ConstantValue != Incoming)
235 return false;
236 ConstantValue = Incoming;
237 }
238 }
239 return true;
240}
241
242//===----------------------------------------------------------------------===//
243// LandingPadInst Implementation
244//===----------------------------------------------------------------------===//
245
246LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
247 const Twine &NameStr,
248 InsertPosition InsertBefore)
249 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
255 ReservedSpace(LP.getNumOperands()) {
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 InsertPosition InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
273 ReservedSpace = NumReservedValues;
275 allocHungoffUses(ReservedSpace);
276 setName(NameStr);
277 setCleanup(false);
278}
279
280/// growOperands - grow operands - This grows the operand list in response to a
281/// push_back style of operation. This grows the number of ops by 2 times.
282void LandingPadInst::growOperands(unsigned Size) {
283 unsigned e = getNumOperands();
284 if (ReservedSpace >= e + Size) return;
285 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
286 growHungoffUses(ReservedSpace);
287}
288
290 unsigned OpNo = getNumOperands();
291 growOperands(1);
292 assert(OpNo < ReservedSpace && "Growing didn't work!");
294 getOperandList()[OpNo] = Val;
295}
296
297//===----------------------------------------------------------------------===//
298// CallBase Implementation
299//===----------------------------------------------------------------------===//
300
302 InsertPosition InsertPt) {
303 switch (CB->getOpcode()) {
304 case Instruction::Call:
305 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
306 case Instruction::Invoke:
307 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
308 case Instruction::CallBr:
309 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
310 default:
311 llvm_unreachable("Unknown CallBase sub-class!");
312 }
313}
314
316 InsertPosition InsertPt) {
318 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
319 auto ChildOB = CI->getOperandBundleAt(i);
320 if (ChildOB.getTagName() != OpB.getTag())
321 OpDefs.emplace_back(ChildOB);
322 }
323 OpDefs.emplace_back(OpB);
324 return CallBase::Create(CI, OpDefs, InsertPt);
325}
326
327Function *CallBase::getCaller() { return getParent()->getParent(); }
328
330 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
331 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
332}
333
335 const Value *V = getCalledOperand();
336 if (isa<Function>(V) || isa<Constant>(V))
337 return false;
338 return !isInlineAsm();
339}
340
341/// Tests if this call site must be tail call optimized. Only a CallInst can
342/// be tail call optimized.
344 if (auto *CI = dyn_cast<CallInst>(this))
345 return CI->isMustTailCall();
346 return false;
347}
348
349/// Tests if this call site is marked as a tail call.
351 if (auto *CI = dyn_cast<CallInst>(this))
352 return CI->isTailCall();
353 return false;
354}
355
357 if (auto *F = getCalledFunction())
358 return F->getIntrinsicID();
360}
361
364
365 if (const Function *F = getCalledFunction())
366 Mask |= F->getAttributes().getRetNoFPClass();
367 return Mask;
368}
369
372
373 if (const Function *F = getCalledFunction())
374 Mask |= F->getAttributes().getParamNoFPClass(i);
375 return Mask;
376}
377
378std::optional<ConstantRange> CallBase::getRange() const {
379 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
380 if (RangeAttr.isValid())
381 return RangeAttr.getRange();
382 return std::nullopt;
383}
384
386 if (hasRetAttr(Attribute::NonNull))
387 return true;
388
389 if (getRetDereferenceableBytes() > 0 &&
391 return true;
392
393 return false;
394}
395
397 unsigned Index;
398
399 if (Attrs.hasAttrSomewhere(Kind, &Index))
401 if (const Function *F = getCalledFunction())
402 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
404
405 return nullptr;
406}
407
408/// Determine whether the argument or parameter has the given attribute.
409bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
410 assert(ArgNo < arg_size() && "Param index out of bounds!");
411
412 if (Attrs.hasParamAttr(ArgNo, Kind))
413 return true;
414
415 const Function *F = getCalledFunction();
416 if (!F)
417 return false;
418
419 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
420 return false;
421
422 // Take into account mod/ref by operand bundles.
423 switch (Kind) {
424 case Attribute::ReadNone:
426 case Attribute::ReadOnly:
428 case Attribute::WriteOnly:
429 return !hasReadingOperandBundles();
430 default:
431 return true;
432 }
433}
434
436 bool AllowUndefOrPoison) const {
438 "Argument must be a pointer");
439 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
440 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
441 return true;
442
443 if (getParamDereferenceableBytes(ArgNo) > 0 &&
445 getCaller(),
447 return true;
448
449 return false;
450}
451
452bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
453 if (auto *F = dyn_cast<Function>(getCalledOperand()))
454 return F->getAttributes().hasFnAttr(Kind);
455
456 return false;
457}
458
459bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
460 if (auto *F = dyn_cast<Function>(getCalledOperand()))
461 return F->getAttributes().hasFnAttr(Kind);
462
463 return false;
464}
465
466template <typename AK>
467Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
468 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
469 // getMemoryEffects() correctly combines memory effects from the call-site,
470 // operand bundles and function.
471 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
472 }
473
474 if (auto *F = dyn_cast<Function>(getCalledOperand()))
475 return F->getAttributes().getFnAttr(Kind);
476
477 return Attribute();
478}
479
480template Attribute
481CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
482template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
483
484template <typename AK>
485Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
486 AK Kind) const {
488
489 if (auto *F = dyn_cast<Function>(V))
490 return F->getAttributes().getParamAttr(ArgNo, Kind);
491
492 return Attribute();
493}
494template Attribute
495CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496 Attribute::AttrKind Kind) const;
497template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
498 StringRef Kind) const;
499
502 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
504}
505
508 const unsigned BeginIndex) {
509 auto It = op_begin() + BeginIndex;
510 for (auto &B : Bundles)
511 It = std::copy(B.input_begin(), B.input_end(), It);
512
513 auto *ContextImpl = getContext().pImpl;
514 auto BI = Bundles.begin();
515 unsigned CurrentIndex = BeginIndex;
516
517 for (auto &BOI : bundle_op_infos()) {
518 assert(BI != Bundles.end() && "Incorrect allocation?");
519
520 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
521 BOI.Begin = CurrentIndex;
522 BOI.End = CurrentIndex + BI->input_size();
523 CurrentIndex = BOI.End;
524 BI++;
525 }
526
527 assert(BI == Bundles.end() && "Incorrect allocation?");
528
529 return It;
530}
531
533 /// When there isn't many bundles, we do a simple linear search.
534 /// Else fallback to a binary-search that use the fact that bundles usually
535 /// have similar number of argument to get faster convergence.
537 for (auto &BOI : bundle_op_infos())
538 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
539 return BOI;
540
541 llvm_unreachable("Did not find operand bundle for operand!");
542 }
543
544 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
546 OpIdx < std::prev(bundle_op_info_end())->End &&
547 "The Idx isn't in the operand bundle");
548
549 /// We need a decimal number below and to prevent using floating point numbers
550 /// we use an intergal value multiplied by this constant.
551 constexpr unsigned NumberScaling = 1024;
552
555 bundle_op_iterator Current = Begin;
556
557 while (Begin != End) {
558 unsigned ScaledOperandPerBundle =
559 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
560 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
561 ScaledOperandPerBundle);
562 if (Current >= End)
563 Current = std::prev(End);
564 assert(Current < End && Current >= Begin &&
565 "the operand bundle doesn't cover every value in the range");
566 if (OpIdx >= Current->Begin && OpIdx < Current->End)
567 break;
568 if (OpIdx >= Current->End)
569 Begin = Current + 1;
570 else
571 End = Current;
572 }
573
574 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
575 "the operand bundle doesn't cover every value in the range");
576 return *Current;
577}
578
581 InsertPosition InsertPt) {
582 if (CB->getOperandBundle(ID))
583 return CB;
584
586 CB->getOperandBundlesAsDefs(Bundles);
587 Bundles.push_back(OB);
588 return Create(CB, Bundles, InsertPt);
589}
590
592 InsertPosition InsertPt) {
594 bool CreateNew = false;
595
596 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
597 auto Bundle = CB->getOperandBundleAt(I);
598 if (Bundle.getTagID() == ID) {
599 CreateNew = true;
600 continue;
601 }
602 Bundles.emplace_back(Bundle);
603 }
604
605 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
606}
607
609 // Implementation note: this is a conservative implementation of operand
610 // bundle semantics, where *any* non-assume operand bundle (other than
611 // ptrauth) forces a callsite to be at least readonly.
614 getIntrinsicID() != Intrinsic::assume;
615}
616
621 getIntrinsicID() != Intrinsic::assume;
622}
623
626 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
627 MemoryEffects FnME = Fn->getMemoryEffects();
628 if (hasOperandBundles()) {
629 // TODO: Add a method to get memory effects for operand bundles instead.
631 FnME |= MemoryEffects::readOnly();
633 FnME |= MemoryEffects::writeOnly();
634 }
635 ME &= FnME;
636 }
637 return ME;
638}
641}
642
643/// Determine if the function does not access memory.
646}
649}
650
651/// Determine if the function does not access or only reads memory.
654}
657}
658
659/// Determine if the function does not access or only writes memory.
662}
665}
666
667/// Determine if the call can access memmory only using pointers based
668/// on its arguments.
671}
674}
675
676/// Determine if the function may only access memory that is
677/// inaccessible from the IR.
680}
683}
684
685/// Determine if the function may only access memory that is
686/// either inaccessible from the IR or pointed to by its arguments.
689}
693}
694
696 if (OpNo < arg_size()) {
697 // If the argument is passed byval, the callee does not have access to the
698 // original pointer and thus cannot capture it.
699 if (isByValArgument(OpNo))
700 return CaptureInfo::none();
701
703 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
704 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
705 return CI;
706 }
707
708 // deopt operand bundles are captures(none)
709 auto &BOI = getBundleOpInfoForOperand(OpNo);
710 auto OBU = operandBundleFromBundleOpInfo(BOI);
711 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
712}
713
714//===----------------------------------------------------------------------===//
715// CallInst Implementation
716//===----------------------------------------------------------------------===//
717
718void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
719 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
720 this->FTy = FTy;
721 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
722 "NumOperands not set up?");
723
724#ifndef NDEBUG
725 assert((Args.size() == FTy->getNumParams() ||
726 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
727 "Calling a function with bad signature!");
728
729 for (unsigned i = 0; i != Args.size(); ++i)
730 assert((i >= FTy->getNumParams() ||
731 FTy->getParamType(i) == Args[i]->getType()) &&
732 "Calling a function with a bad signature!");
733#endif
734
735 // Set operands in order of their index to match use-list-order
736 // prediction.
737 llvm::copy(Args, op_begin());
738 setCalledOperand(Func);
739
740 auto It = populateBundleOperandInfos(Bundles, Args.size());
741 (void)It;
742 assert(It + 1 == op_end() && "Should add up!");
743
744 setName(NameStr);
745}
746
747void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
748 this->FTy = FTy;
749 assert(getNumOperands() == 1 && "NumOperands not set up?");
750 setCalledOperand(Func);
751
752 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
753
754 setName(NameStr);
755}
756
757CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
758 AllocInfo AllocInfo, InsertPosition InsertBefore)
759 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
760 InsertBefore) {
761 init(Ty, Func, Name);
762}
763
764CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
765 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
767 "Wrong number of operands allocated");
768 setTailCallKind(CI.getTailCallKind());
770
771 std::copy(CI.op_begin(), CI.op_end(), op_begin());
772 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
775}
776
778 InsertPosition InsertPt) {
779 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
780
781 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
782 Args, OpB, CI->getName(), InsertPt);
783 NewCI->setTailCallKind(CI->getTailCallKind());
784 NewCI->setCallingConv(CI->getCallingConv());
785 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
786 NewCI->setAttributes(CI->getAttributes());
787 NewCI->setDebugLoc(CI->getDebugLoc());
788 return NewCI;
789}
790
791// Update profile weight for call instruction by scaling it using the ratio
792// of S/T. The meaning of "branch_weights" meta data for call instruction is
793// transfered to represent call count.
795 if (T == 0) {
796 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
797 "div by 0. Ignoring. Likely the function "
798 << getParent()->getParent()->getName()
799 << " has 0 entry count, and contains call instructions "
800 "with non-zero prof info.");
801 return;
802 }
803 scaleProfData(*this, S, T);
804}
805
806//===----------------------------------------------------------------------===//
807// InvokeInst Implementation
808//===----------------------------------------------------------------------===//
809
810void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
811 BasicBlock *IfException, ArrayRef<Value *> Args,
813 const Twine &NameStr) {
814 this->FTy = FTy;
815
817 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
818 "NumOperands not set up?");
819
820#ifndef NDEBUG
821 assert(((Args.size() == FTy->getNumParams()) ||
822 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
823 "Invoking a function with bad signature");
824
825 for (unsigned i = 0, e = Args.size(); i != e; i++)
826 assert((i >= FTy->getNumParams() ||
827 FTy->getParamType(i) == Args[i]->getType()) &&
828 "Invoking a function with a bad signature!");
829#endif
830
831 // Set operands in order of their index to match use-list-order
832 // prediction.
833 llvm::copy(Args, op_begin());
834 setNormalDest(IfNormal);
835 setUnwindDest(IfException);
837
838 auto It = populateBundleOperandInfos(Bundles, Args.size());
839 (void)It;
840 assert(It + 3 == op_end() && "Should add up!");
841
842 setName(NameStr);
843}
844
845InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
846 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
847 assert(getNumOperands() == II.getNumOperands() &&
848 "Wrong number of operands allocated");
849 setCallingConv(II.getCallingConv());
850 std::copy(II.op_begin(), II.op_end(), op_begin());
851 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
853 SubclassOptionalData = II.SubclassOptionalData;
854}
855
857 InsertPosition InsertPt) {
858 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
859
860 auto *NewII = InvokeInst::Create(
861 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
862 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
863 NewII->setCallingConv(II->getCallingConv());
864 NewII->SubclassOptionalData = II->SubclassOptionalData;
865 NewII->setAttributes(II->getAttributes());
866 NewII->setDebugLoc(II->getDebugLoc());
867 return NewII;
868}
869
871 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
872}
873
875 if (T == 0) {
876 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
877 "div by 0. Ignoring. Likely the function "
878 << getParent()->getParent()->getName()
879 << " has 0 entry count, and contains call instructions "
880 "with non-zero prof info.");
881 return;
882 }
883 scaleProfData(*this, S, T);
884}
885
886//===----------------------------------------------------------------------===//
887// CallBrInst Implementation
888//===----------------------------------------------------------------------===//
889
890void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
891 ArrayRef<BasicBlock *> IndirectDests,
894 const Twine &NameStr) {
895 this->FTy = FTy;
896
897 assert(getNumOperands() == ComputeNumOperands(Args.size(),
898 IndirectDests.size(),
899 CountBundleInputs(Bundles)) &&
900 "NumOperands not set up?");
901
902#ifndef NDEBUG
903 assert(((Args.size() == FTy->getNumParams()) ||
904 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
905 "Calling a function with bad signature");
906
907 for (unsigned i = 0, e = Args.size(); i != e; i++)
908 assert((i >= FTy->getNumParams() ||
909 FTy->getParamType(i) == Args[i]->getType()) &&
910 "Calling a function with a bad signature!");
911#endif
912
913 // Set operands in order of their index to match use-list-order
914 // prediction.
915 std::copy(Args.begin(), Args.end(), op_begin());
916 NumIndirectDests = IndirectDests.size();
917 setDefaultDest(Fallthrough);
918 for (unsigned i = 0; i != NumIndirectDests; ++i)
919 setIndirectDest(i, IndirectDests[i]);
921
922 auto It = populateBundleOperandInfos(Bundles, Args.size());
923 (void)It;
924 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
925
926 setName(NameStr);
927}
928
929CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
930 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
931 AllocInfo) {
933 "Wrong number of operands allocated");
935 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
936 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
939 NumIndirectDests = CBI.NumIndirectDests;
940}
941
943 InsertPosition InsertPt) {
944 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
945
946 auto *NewCBI = CallBrInst::Create(
947 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
948 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
949 NewCBI->setCallingConv(CBI->getCallingConv());
950 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
951 NewCBI->setAttributes(CBI->getAttributes());
952 NewCBI->setDebugLoc(CBI->getDebugLoc());
953 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
954 return NewCBI;
955}
956
957//===----------------------------------------------------------------------===//
958// ReturnInst Implementation
959//===----------------------------------------------------------------------===//
960
961ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
962 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
963 AllocInfo) {
965 "Wrong number of operands allocated");
966 if (RI.getNumOperands())
967 Op<0>() = RI.Op<0>();
969}
970
971ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
972 InsertPosition InsertBefore)
973 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
974 InsertBefore) {
975 if (retVal)
976 Op<0>() = retVal;
977}
978
979//===----------------------------------------------------------------------===//
980// ResumeInst Implementation
981//===----------------------------------------------------------------------===//
982
983ResumeInst::ResumeInst(const ResumeInst &RI)
984 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
985 AllocMarker) {
986 Op<0>() = RI.Op<0>();
987}
988
989ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
990 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
991 AllocMarker, InsertBefore) {
992 Op<0>() = Exn;
993}
994
995//===----------------------------------------------------------------------===//
996// CleanupReturnInst Implementation
997//===----------------------------------------------------------------------===//
998
999CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1001 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1003 "Wrong number of operands allocated");
1004 setSubclassData<Instruction::OpaqueField>(
1006 Op<0>() = CRI.Op<0>();
1007 if (CRI.hasUnwindDest())
1008 Op<1>() = CRI.Op<1>();
1009}
1010
1011void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1012 if (UnwindBB)
1013 setSubclassData<UnwindDestField>(true);
1014
1015 Op<0>() = CleanupPad;
1016 if (UnwindBB)
1017 Op<1>() = UnwindBB;
1018}
1019
1020CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1022 InsertPosition InsertBefore)
1023 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1024 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1025 init(CleanupPad, UnwindBB);
1026}
1027
1028//===----------------------------------------------------------------------===//
1029// CatchReturnInst Implementation
1030//===----------------------------------------------------------------------===//
1031void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1032 Op<0>() = CatchPad;
1033 Op<1>() = BB;
1034}
1035
1036CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1037 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1038 AllocMarker) {
1039 Op<0>() = CRI.Op<0>();
1040 Op<1>() = CRI.Op<1>();
1041}
1042
1043CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1044 InsertPosition InsertBefore)
1045 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1046 AllocMarker, InsertBefore) {
1047 init(CatchPad, BB);
1048}
1049
1050//===----------------------------------------------------------------------===//
1051// CatchSwitchInst Implementation
1052//===----------------------------------------------------------------------===//
1053
1054CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1055 unsigned NumReservedValues,
1056 const Twine &NameStr,
1057 InsertPosition InsertBefore)
1058 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1059 InsertBefore) {
1060 if (UnwindDest)
1061 ++NumReservedValues;
1062 init(ParentPad, UnwindDest, NumReservedValues + 1);
1063 setName(NameStr);
1064}
1065
1066CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1067 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1069 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1070 setNumHungOffUseOperands(ReservedSpace);
1071 Use *OL = getOperandList();
1072 const Use *InOL = CSI.getOperandList();
1073 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1074 OL[I] = InOL[I];
1075}
1076
1077void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1078 unsigned NumReservedValues) {
1079 assert(ParentPad && NumReservedValues);
1080
1081 ReservedSpace = NumReservedValues;
1082 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1083 allocHungoffUses(ReservedSpace);
1084
1085 Op<0>() = ParentPad;
1086 if (UnwindDest) {
1087 setSubclassData<UnwindDestField>(true);
1088 setUnwindDest(UnwindDest);
1089 }
1090}
1091
1092/// growOperands - grow operands - This grows the operand list in response to a
1093/// push_back style of operation. This grows the number of ops by 2 times.
1094void CatchSwitchInst::growOperands(unsigned Size) {
1095 unsigned NumOperands = getNumOperands();
1096 assert(NumOperands >= 1);
1097 if (ReservedSpace >= NumOperands + Size)
1098 return;
1099 ReservedSpace = (NumOperands + Size / 2) * 2;
1100 growHungoffUses(ReservedSpace);
1101}
1102
1104 unsigned OpNo = getNumOperands();
1105 growOperands(1);
1106 assert(OpNo < ReservedSpace && "Growing didn't work!");
1108 getOperandList()[OpNo] = Handler;
1109}
1110
1112 // Move all subsequent handlers up one.
1113 Use *EndDst = op_end() - 1;
1114 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1115 *CurDst = *(CurDst + 1);
1116 // Null out the last handler use.
1117 *EndDst = nullptr;
1118
1120}
1121
1122//===----------------------------------------------------------------------===//
1123// FuncletPadInst Implementation
1124//===----------------------------------------------------------------------===//
1125void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1126 const Twine &NameStr) {
1127 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1128 llvm::copy(Args, op_begin());
1129 setParentPad(ParentPad);
1130 setName(NameStr);
1131}
1132
1133FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1134 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1136 "Wrong number of operands allocated");
1137 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1139}
1140
1141FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1143 const Twine &NameStr,
1144 InsertPosition InsertBefore)
1145 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1146 init(ParentPad, Args, NameStr);
1147}
1148
1149//===----------------------------------------------------------------------===//
1150// UnreachableInst Implementation
1151//===----------------------------------------------------------------------===//
1152
1154 InsertPosition InsertBefore)
1155 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1156 AllocMarker, InsertBefore) {}
1157
1158//===----------------------------------------------------------------------===//
1159// BranchInst Implementation
1160//===----------------------------------------------------------------------===//
1161
1162void BranchInst::AssertOK() {
1163 if (isConditional())
1164 assert(getCondition()->getType()->isIntegerTy(1) &&
1165 "May only branch on boolean predicates!");
1166}
1167
1168BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1169 InsertPosition InsertBefore)
1170 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1171 AllocInfo, InsertBefore) {
1172 assert(IfTrue && "Branch destination may not be null!");
1173 Op<-1>() = IfTrue;
1174}
1175
1176BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1177 AllocInfo AllocInfo, InsertPosition InsertBefore)
1178 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1179 AllocInfo, InsertBefore) {
1180 // Assign in order of operand index to make use-list order predictable.
1181 Op<-3>() = Cond;
1182 Op<-2>() = IfFalse;
1183 Op<-1>() = IfTrue;
1184#ifndef NDEBUG
1185 AssertOK();
1186#endif
1187}
1188
1189BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1190 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1191 AllocInfo) {
1193 "Wrong number of operands allocated");
1194 // Assign in order of operand index to make use-list order predictable.
1195 if (BI.getNumOperands() != 1) {
1196 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1197 Op<-3>() = BI.Op<-3>();
1198 Op<-2>() = BI.Op<-2>();
1199 }
1200 Op<-1>() = BI.Op<-1>();
1202}
1203
1206 "Cannot swap successors of an unconditional branch");
1207 Op<-1>().swap(Op<-2>());
1208
1209 // Update profile metadata if present and it matches our structural
1210 // expectations.
1212}
1213
1214//===----------------------------------------------------------------------===//
1215// AllocaInst Implementation
1216//===----------------------------------------------------------------------===//
1217
1218static Value *getAISize(LLVMContext &Context, Value *Amt) {
1219 if (!Amt)
1220 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1221 else {
1222 assert(!isa<BasicBlock>(Amt) &&
1223 "Passed basic block into allocation size parameter! Use other ctor");
1224 assert(Amt->getType()->isIntegerTy() &&
1225 "Allocation array size is not an integer!");
1226 }
1227 return Amt;
1228}
1229
1231 assert(Pos.isValid() &&
1232 "Insertion position cannot be null when alignment not provided!");
1233 BasicBlock *BB = Pos.getBasicBlock();
1234 assert(BB->getParent() &&
1235 "BB must be in a Function when alignment not provided!");
1236 const DataLayout &DL = BB->getDataLayout();
1237 return DL.getPrefTypeAlign(Ty);
1238}
1239
1240AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1241 InsertPosition InsertBefore)
1242 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1243
1244AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1245 const Twine &Name, InsertPosition InsertBefore)
1246 : AllocaInst(Ty, AddrSpace, ArraySize,
1247 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1248 InsertBefore) {}
1249
1250AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1251 Align Align, const Twine &Name,
1252 InsertPosition InsertBefore)
1253 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1254 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1255 AllocatedType(Ty) {
1257 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1258 setName(Name);
1259}
1260
1262 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1263 return !CI->isOne();
1264 return true;
1265}
1266
1267/// isStaticAlloca - Return true if this alloca is in the entry block of the
1268/// function and is a constant size. If so, the code generator will fold it
1269/// into the prolog/epilog code, so it is basically free.
1271 // Must be constant size.
1272 if (!isa<ConstantInt>(getArraySize())) return false;
1273
1274 // Must be in the entry block.
1275 const BasicBlock *Parent = getParent();
1276 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1277}
1278
1279//===----------------------------------------------------------------------===//
1280// LoadInst Implementation
1281//===----------------------------------------------------------------------===//
1282
1283void LoadInst::AssertOK() {
1285 "Ptr must have pointer type.");
1286}
1287
1289 assert(Pos.isValid() &&
1290 "Insertion position cannot be null when alignment not provided!");
1291 BasicBlock *BB = Pos.getBasicBlock();
1292 assert(BB->getParent() &&
1293 "BB must be in a Function when alignment not provided!");
1294 const DataLayout &DL = BB->getDataLayout();
1295 return DL.getABITypeAlign(Ty);
1296}
1297
1299 InsertPosition InsertBef)
1300 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1301
1302LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1303 InsertPosition InsertBef)
1304 : LoadInst(Ty, Ptr, Name, isVolatile,
1305 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1306
1307LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1308 Align Align, InsertPosition InsertBef)
1309 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1310 SyncScope::System, InsertBef) {}
1311
1312LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1314 InsertPosition InsertBef)
1315 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1318 setAtomic(Order, SSID);
1319 AssertOK();
1320 setName(Name);
1321}
1322
1323//===----------------------------------------------------------------------===//
1324// StoreInst Implementation
1325//===----------------------------------------------------------------------===//
1326
1327void StoreInst::AssertOK() {
1328 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1330 "Ptr must have pointer type!");
1331}
1332
1334 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1335
1336StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1337 InsertPosition InsertBefore)
1338 : StoreInst(val, addr, isVolatile,
1339 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1340 InsertBefore) {}
1341
1342StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1343 InsertPosition InsertBefore)
1344 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1345 SyncScope::System, InsertBefore) {}
1346
1347StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1348 AtomicOrdering Order, SyncScope::ID SSID,
1349 InsertPosition InsertBefore)
1350 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1351 InsertBefore) {
1352 Op<0>() = val;
1353 Op<1>() = addr;
1356 setAtomic(Order, SSID);
1357 AssertOK();
1358}
1359
1360//===----------------------------------------------------------------------===//
1361// AtomicCmpXchgInst Implementation
1362//===----------------------------------------------------------------------===//
1363
1364void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1365 Align Alignment, AtomicOrdering SuccessOrdering,
1366 AtomicOrdering FailureOrdering,
1367 SyncScope::ID SSID) {
1368 Op<0>() = Ptr;
1369 Op<1>() = Cmp;
1370 Op<2>() = NewVal;
1371 setSuccessOrdering(SuccessOrdering);
1372 setFailureOrdering(FailureOrdering);
1373 setSyncScopeID(SSID);
1374 setAlignment(Alignment);
1375
1376 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1377 "All operands must be non-null!");
1379 "Ptr must have pointer type!");
1380 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1381 "Cmp type and NewVal type must be same!");
1382}
1383
1385 Align Alignment,
1386 AtomicOrdering SuccessOrdering,
1387 AtomicOrdering FailureOrdering,
1388 SyncScope::ID SSID,
1389 InsertPosition InsertBefore)
1390 : Instruction(
1391 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1392 AtomicCmpXchg, AllocMarker, InsertBefore) {
1393 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1394}
1395
1396//===----------------------------------------------------------------------===//
1397// AtomicRMWInst Implementation
1398//===----------------------------------------------------------------------===//
1399
1400void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1401 Align Alignment, AtomicOrdering Ordering,
1402 SyncScope::ID SSID) {
1403 assert(Ordering != AtomicOrdering::NotAtomic &&
1404 "atomicrmw instructions can only be atomic.");
1405 assert(Ordering != AtomicOrdering::Unordered &&
1406 "atomicrmw instructions cannot be unordered.");
1407 Op<0>() = Ptr;
1408 Op<1>() = Val;
1410 setOrdering(Ordering);
1411 setSyncScopeID(SSID);
1412 setAlignment(Alignment);
1413
1414 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1416 "Ptr must have pointer type!");
1417 assert(Ordering != AtomicOrdering::NotAtomic &&
1418 "AtomicRMW instructions must be atomic!");
1419}
1420
1422 Align Alignment, AtomicOrdering Ordering,
1423 SyncScope::ID SSID, InsertPosition InsertBefore)
1424 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1425 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1426}
1427
1429 switch (Op) {
1431 return "xchg";
1432 case AtomicRMWInst::Add:
1433 return "add";
1434 case AtomicRMWInst::Sub:
1435 return "sub";
1436 case AtomicRMWInst::And:
1437 return "and";
1439 return "nand";
1440 case AtomicRMWInst::Or:
1441 return "or";
1442 case AtomicRMWInst::Xor:
1443 return "xor";
1444 case AtomicRMWInst::Max:
1445 return "max";
1446 case AtomicRMWInst::Min:
1447 return "min";
1449 return "umax";
1451 return "umin";
1453 return "fadd";
1455 return "fsub";
1457 return "fmax";
1459 return "fmin";
1461 return "uinc_wrap";
1463 return "udec_wrap";
1465 return "usub_cond";
1467 return "usub_sat";
1469 return "<invalid operation>";
1470 }
1471
1472 llvm_unreachable("invalid atomicrmw operation");
1473}
1474
1475//===----------------------------------------------------------------------===//
1476// FenceInst Implementation
1477//===----------------------------------------------------------------------===//
1478
1480 SyncScope::ID SSID, InsertPosition InsertBefore)
1481 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1482 setOrdering(Ordering);
1483 setSyncScopeID(SSID);
1484}
1485
1486//===----------------------------------------------------------------------===//
1487// GetElementPtrInst Implementation
1488//===----------------------------------------------------------------------===//
1489
1490void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1491 const Twine &Name) {
1492 assert(getNumOperands() == 1 + IdxList.size() &&
1493 "NumOperands not initialized?");
1494 Op<0>() = Ptr;
1495 llvm::copy(IdxList, op_begin() + 1);
1496 setName(Name);
1497}
1498
1499GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1501 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1502 SourceElementType(GEPI.SourceElementType),
1503 ResultElementType(GEPI.ResultElementType) {
1504 assert(getNumOperands() == GEPI.getNumOperands() &&
1505 "Wrong number of operands allocated");
1506 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1508}
1509
1511 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1512 if (!Struct->indexValid(Idx))
1513 return nullptr;
1514 return Struct->getTypeAtIndex(Idx);
1515 }
1516 if (!Idx->getType()->isIntOrIntVectorTy())
1517 return nullptr;
1518 if (auto *Array = dyn_cast<ArrayType>(Ty))
1519 return Array->getElementType();
1520 if (auto *Vector = dyn_cast<VectorType>(Ty))
1521 return Vector->getElementType();
1522 return nullptr;
1523}
1524
1526 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1527 if (Idx >= Struct->getNumElements())
1528 return nullptr;
1529 return Struct->getElementType(Idx);
1530 }
1531 if (auto *Array = dyn_cast<ArrayType>(Ty))
1532 return Array->getElementType();
1533 if (auto *Vector = dyn_cast<VectorType>(Ty))
1534 return Vector->getElementType();
1535 return nullptr;
1536}
1537
1538template <typename IndexTy>
1540 if (IdxList.empty())
1541 return Ty;
1542 for (IndexTy V : IdxList.slice(1)) {
1544 if (!Ty)
1545 return Ty;
1546 }
1547 return Ty;
1548}
1549
1551 return getIndexedTypeInternal(Ty, IdxList);
1552}
1553
1555 ArrayRef<Constant *> IdxList) {
1556 return getIndexedTypeInternal(Ty, IdxList);
1557}
1558
1560 return getIndexedTypeInternal(Ty, IdxList);
1561}
1562
1563/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1564/// zeros. If so, the result pointer and the first operand have the same
1565/// value, just potentially different types.
1567 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1568 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1569 if (!CI->isZero()) return false;
1570 } else {
1571 return false;
1572 }
1573 }
1574 return true;
1575}
1576
1577/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1578/// constant integers. If so, the result pointer and the first operand have
1579/// a constant offset between them.
1581 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1582 if (!isa<ConstantInt>(getOperand(i)))
1583 return false;
1584 }
1585 return true;
1586}
1587
1590}
1591
1593 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1594 if (B)
1596 else
1597 NW = NW.withoutInBounds();
1598 setNoWrapFlags(NW);
1599}
1600
1602 return cast<GEPOperator>(this)->getNoWrapFlags();
1603}
1604
1606 return cast<GEPOperator>(this)->isInBounds();
1607}
1608
1610 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1611}
1612
1614 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1615}
1616
1618 APInt &Offset) const {
1619 // Delegate to the generic GEPOperator implementation.
1620 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1621}
1622
1624 const DataLayout &DL, unsigned BitWidth,
1625 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1626 APInt &ConstantOffset) const {
1627 // Delegate to the generic GEPOperator implementation.
1628 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1629 ConstantOffset);
1630}
1631
1632//===----------------------------------------------------------------------===//
1633// ExtractElementInst Implementation
1634//===----------------------------------------------------------------------===//
1635
1636ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1637 const Twine &Name,
1638 InsertPosition InsertBef)
1639 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1640 ExtractElement, AllocMarker, InsertBef) {
1641 assert(isValidOperands(Val, Index) &&
1642 "Invalid extractelement instruction operands!");
1643 Op<0>() = Val;
1644 Op<1>() = Index;
1645 setName(Name);
1646}
1647
1648bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1649 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1650 return false;
1651 return true;
1652}
1653
1654//===----------------------------------------------------------------------===//
1655// InsertElementInst Implementation
1656//===----------------------------------------------------------------------===//
1657
1658InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1659 const Twine &Name,
1660 InsertPosition InsertBef)
1661 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1662 assert(isValidOperands(Vec, Elt, Index) &&
1663 "Invalid insertelement instruction operands!");
1664 Op<0>() = Vec;
1665 Op<1>() = Elt;
1666 Op<2>() = Index;
1667 setName(Name);
1668}
1669
1671 const Value *Index) {
1672 if (!Vec->getType()->isVectorTy())
1673 return false; // First operand of insertelement must be vector type.
1674
1675 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1676 return false;// Second operand of insertelement must be vector element type.
1677
1678 if (!Index->getType()->isIntegerTy())
1679 return false; // Third operand of insertelement must be i32.
1680 return true;
1681}
1682
1683//===----------------------------------------------------------------------===//
1684// ShuffleVectorInst Implementation
1685//===----------------------------------------------------------------------===//
1686
1688 assert(V && "Cannot create placeholder of nullptr V");
1689 return PoisonValue::get(V->getType());
1690}
1691
1693 InsertPosition InsertBefore)
1695 InsertBefore) {}
1696
1698 const Twine &Name,
1699 InsertPosition InsertBefore)
1701 InsertBefore) {}
1702
1704 const Twine &Name,
1705 InsertPosition InsertBefore)
1706 : Instruction(
1707 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1708 cast<VectorType>(Mask->getType())->getElementCount()),
1709 ShuffleVector, AllocMarker, InsertBefore) {
1710 assert(isValidOperands(V1, V2, Mask) &&
1711 "Invalid shuffle vector instruction operands!");
1712
1713 Op<0>() = V1;
1714 Op<1>() = V2;
1715 SmallVector<int, 16> MaskArr;
1716 getShuffleMask(cast<Constant>(Mask), MaskArr);
1717 setShuffleMask(MaskArr);
1718 setName(Name);
1719}
1720
1722 const Twine &Name,
1723 InsertPosition InsertBefore)
1724 : Instruction(
1725 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1726 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1727 ShuffleVector, AllocMarker, InsertBefore) {
1728 assert(isValidOperands(V1, V2, Mask) &&
1729 "Invalid shuffle vector instruction operands!");
1730 Op<0>() = V1;
1731 Op<1>() = V2;
1732 setShuffleMask(Mask);
1733 setName(Name);
1734}
1735
1737 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1738 int NumMaskElts = ShuffleMask.size();
1739 SmallVector<int, 16> NewMask(NumMaskElts);
1740 for (int i = 0; i != NumMaskElts; ++i) {
1741 int MaskElt = getMaskValue(i);
1742 if (MaskElt == PoisonMaskElem) {
1743 NewMask[i] = PoisonMaskElem;
1744 continue;
1745 }
1746 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1747 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1748 NewMask[i] = MaskElt;
1749 }
1750 setShuffleMask(NewMask);
1751 Op<0>().swap(Op<1>());
1752}
1753
1755 ArrayRef<int> Mask) {
1756 // V1 and V2 must be vectors of the same type.
1757 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1758 return false;
1759
1760 // Make sure the mask elements make sense.
1761 int V1Size =
1762 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1763 for (int Elem : Mask)
1764 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1765 return false;
1766
1767 if (isa<ScalableVectorType>(V1->getType()))
1768 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1769 return false;
1770
1771 return true;
1772}
1773
1775 const Value *Mask) {
1776 // V1 and V2 must be vectors of the same type.
1777 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1778 return false;
1779
1780 // Mask must be vector of i32, and must be the same kind of vector as the
1781 // input vectors
1782 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1783 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1784 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1785 return false;
1786
1787 // Check to see if Mask is valid.
1788 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1789 return true;
1790
1791 // NOTE: Through vector ConstantInt we have the potential to support more
1792 // than just zero splat masks but that requires a LangRef change.
1793 if (isa<ScalableVectorType>(MaskTy))
1794 return false;
1795
1796 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1797
1798 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1799 return !CI->uge(V1Size * 2);
1800
1801 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1802 for (Value *Op : MV->operands()) {
1803 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1804 if (CI->uge(V1Size*2))
1805 return false;
1806 } else if (!isa<UndefValue>(Op)) {
1807 return false;
1808 }
1809 }
1810 return true;
1811 }
1812
1813 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1814 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1815 i != e; ++i)
1816 if (CDS->getElementAsInteger(i) >= V1Size*2)
1817 return false;
1818 return true;
1819 }
1820
1821 return false;
1822}
1823
1825 SmallVectorImpl<int> &Result) {
1826 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1827
1828 if (isa<ConstantAggregateZero>(Mask)) {
1829 Result.resize(EC.getKnownMinValue(), 0);
1830 return;
1831 }
1832
1833 Result.reserve(EC.getKnownMinValue());
1834
1835 if (EC.isScalable()) {
1836 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1837 "Scalable vector shuffle mask must be undef or zeroinitializer");
1838 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1839 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
1840 Result.emplace_back(MaskVal);
1841 return;
1842 }
1843
1844 unsigned NumElts = EC.getKnownMinValue();
1845
1846 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1847 for (unsigned i = 0; i != NumElts; ++i)
1848 Result.push_back(CDS->getElementAsInteger(i));
1849 return;
1850 }
1851 for (unsigned i = 0; i != NumElts; ++i) {
1852 Constant *C = Mask->getAggregateElement(i);
1853 Result.push_back(isa<UndefValue>(C) ? -1 :
1854 cast<ConstantInt>(C)->getZExtValue());
1855 }
1856}
1857
1859 ShuffleMask.assign(Mask.begin(), Mask.end());
1860 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1861}
1862
1864 Type *ResultTy) {
1865 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1866 if (isa<ScalableVectorType>(ResultTy)) {
1867 assert(all_equal(Mask) && "Unexpected shuffle");
1868 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1869 if (Mask[0] == 0)
1870 return Constant::getNullValue(VecTy);
1871 return PoisonValue::get(VecTy);
1872 }
1874 for (int Elem : Mask) {
1875 if (Elem == PoisonMaskElem)
1876 MaskConst.push_back(PoisonValue::get(Int32Ty));
1877 else
1878 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1879 }
1880 return ConstantVector::get(MaskConst);
1881}
1882
1883static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1884 assert(!Mask.empty() && "Shuffle mask must contain elements");
1885 bool UsesLHS = false;
1886 bool UsesRHS = false;
1887 for (int I : Mask) {
1888 if (I == -1)
1889 continue;
1890 assert(I >= 0 && I < (NumOpElts * 2) &&
1891 "Out-of-bounds shuffle mask element");
1892 UsesLHS |= (I < NumOpElts);
1893 UsesRHS |= (I >= NumOpElts);
1894 if (UsesLHS && UsesRHS)
1895 return false;
1896 }
1897 // Allow for degenerate case: completely undef mask means neither source is used.
1898 return UsesLHS || UsesRHS;
1899}
1900
1902 // We don't have vector operand size information, so assume operands are the
1903 // same size as the mask.
1904 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1905}
1906
1907static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1908 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1909 return false;
1910 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1911 if (Mask[i] == -1)
1912 continue;
1913 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1914 return false;
1915 }
1916 return true;
1917}
1918
1920 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1921 return false;
1922 // We don't have vector operand size information, so assume operands are the
1923 // same size as the mask.
1924 return isIdentityMaskImpl(Mask, NumSrcElts);
1925}
1926
1928 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1929 return false;
1930 if (!isSingleSourceMask(Mask, NumSrcElts))
1931 return false;
1932
1933 // The number of elements in the mask must be at least 2.
1934 if (NumSrcElts < 2)
1935 return false;
1936
1937 for (int I = 0, E = Mask.size(); I < E; ++I) {
1938 if (Mask[I] == -1)
1939 continue;
1940 if (Mask[I] != (NumSrcElts - 1 - I) &&
1941 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1942 return false;
1943 }
1944 return true;
1945}
1946
1948 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1949 return false;
1950 if (!isSingleSourceMask(Mask, NumSrcElts))
1951 return false;
1952 for (int I = 0, E = Mask.size(); I < E; ++I) {
1953 if (Mask[I] == -1)
1954 continue;
1955 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1956 return false;
1957 }
1958 return true;
1959}
1960
1962 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1963 return false;
1964 // Select is differentiated from identity. It requires using both sources.
1965 if (isSingleSourceMask(Mask, NumSrcElts))
1966 return false;
1967 for (int I = 0, E = Mask.size(); I < E; ++I) {
1968 if (Mask[I] == -1)
1969 continue;
1970 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1971 return false;
1972 }
1973 return true;
1974}
1975
1977 // Example masks that will return true:
1978 // v1 = <a, b, c, d>
1979 // v2 = <e, f, g, h>
1980 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
1981 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
1982
1983 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1984 return false;
1985 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
1986 int Sz = Mask.size();
1987 if (Sz < 2 || !isPowerOf2_32(Sz))
1988 return false;
1989
1990 // 2. The first element of the mask must be either a 0 or a 1.
1991 if (Mask[0] != 0 && Mask[0] != 1)
1992 return false;
1993
1994 // 3. The difference between the first 2 elements must be equal to the
1995 // number of elements in the mask.
1996 if ((Mask[1] - Mask[0]) != NumSrcElts)
1997 return false;
1998
1999 // 4. The difference between consecutive even-numbered and odd-numbered
2000 // elements must be equal to 2.
2001 for (int I = 2; I < Sz; ++I) {
2002 int MaskEltVal = Mask[I];
2003 if (MaskEltVal == -1)
2004 return false;
2005 int MaskEltPrevVal = Mask[I - 2];
2006 if (MaskEltVal - MaskEltPrevVal != 2)
2007 return false;
2008 }
2009 return true;
2010}
2011
2013 int &Index) {
2014 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2015 return false;
2016 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2017 int StartIndex = -1;
2018 for (int I = 0, E = Mask.size(); I != E; ++I) {
2019 int MaskEltVal = Mask[I];
2020 if (MaskEltVal == -1)
2021 continue;
2022
2023 if (StartIndex == -1) {
2024 // Don't support a StartIndex that begins in the second input, or if the
2025 // first non-undef index would access below the StartIndex.
2026 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2027 return false;
2028
2029 StartIndex = MaskEltVal - I;
2030 continue;
2031 }
2032
2033 // Splice is sequential starting from StartIndex.
2034 if (MaskEltVal != (StartIndex + I))
2035 return false;
2036 }
2037
2038 if (StartIndex == -1)
2039 return false;
2040
2041 // NOTE: This accepts StartIndex == 0 (COPY).
2042 Index = StartIndex;
2043 return true;
2044}
2045
2047 int NumSrcElts, int &Index) {
2048 // Must extract from a single source.
2049 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2050 return false;
2051
2052 // Must be smaller (else this is an Identity shuffle).
2053 if (NumSrcElts <= (int)Mask.size())
2054 return false;
2055
2056 // Find start of extraction, accounting that we may start with an UNDEF.
2057 int SubIndex = -1;
2058 for (int i = 0, e = Mask.size(); i != e; ++i) {
2059 int M = Mask[i];
2060 if (M < 0)
2061 continue;
2062 int Offset = (M % NumSrcElts) - i;
2063 if (0 <= SubIndex && SubIndex != Offset)
2064 return false;
2065 SubIndex = Offset;
2066 }
2067
2068 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2069 Index = SubIndex;
2070 return true;
2071 }
2072 return false;
2073}
2074
2076 int NumSrcElts, int &NumSubElts,
2077 int &Index) {
2078 int NumMaskElts = Mask.size();
2079
2080 // Don't try to match if we're shuffling to a smaller size.
2081 if (NumMaskElts < NumSrcElts)
2082 return false;
2083
2084 // TODO: We don't recognize self-insertion/widening.
2085 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2086 return false;
2087
2088 // Determine which mask elements are attributed to which source.
2089 APInt UndefElts = APInt::getZero(NumMaskElts);
2090 APInt Src0Elts = APInt::getZero(NumMaskElts);
2091 APInt Src1Elts = APInt::getZero(NumMaskElts);
2092 bool Src0Identity = true;
2093 bool Src1Identity = true;
2094
2095 for (int i = 0; i != NumMaskElts; ++i) {
2096 int M = Mask[i];
2097 if (M < 0) {
2098 UndefElts.setBit(i);
2099 continue;
2100 }
2101 if (M < NumSrcElts) {
2102 Src0Elts.setBit(i);
2103 Src0Identity &= (M == i);
2104 continue;
2105 }
2106 Src1Elts.setBit(i);
2107 Src1Identity &= (M == (i + NumSrcElts));
2108 }
2109 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2110 "unknown shuffle elements");
2111 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2112 "2-source shuffle not found");
2113
2114 // Determine lo/hi span ranges.
2115 // TODO: How should we handle undefs at the start of subvector insertions?
2116 int Src0Lo = Src0Elts.countr_zero();
2117 int Src1Lo = Src1Elts.countr_zero();
2118 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2119 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2120
2121 // If src0 is in place, see if the src1 elements is inplace within its own
2122 // span.
2123 if (Src0Identity) {
2124 int NumSub1Elts = Src1Hi - Src1Lo;
2125 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2126 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2127 NumSubElts = NumSub1Elts;
2128 Index = Src1Lo;
2129 return true;
2130 }
2131 }
2132
2133 // If src1 is in place, see if the src0 elements is inplace within its own
2134 // span.
2135 if (Src1Identity) {
2136 int NumSub0Elts = Src0Hi - Src0Lo;
2137 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2138 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2139 NumSubElts = NumSub0Elts;
2140 Index = Src0Lo;
2141 return true;
2142 }
2143 }
2144
2145 return false;
2146}
2147
2149 // FIXME: Not currently possible to express a shuffle mask for a scalable
2150 // vector for this case.
2151 if (isa<ScalableVectorType>(getType()))
2152 return false;
2153
2154 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2155 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2156 if (NumMaskElts <= NumOpElts)
2157 return false;
2158
2159 // The first part of the mask must choose elements from exactly 1 source op.
2161 if (!isIdentityMaskImpl(Mask, NumOpElts))
2162 return false;
2163
2164 // All extending must be with undef elements.
2165 for (int i = NumOpElts; i < NumMaskElts; ++i)
2166 if (Mask[i] != -1)
2167 return false;
2168
2169 return true;
2170}
2171
2173 // FIXME: Not currently possible to express a shuffle mask for a scalable
2174 // vector for this case.
2175 if (isa<ScalableVectorType>(getType()))
2176 return false;
2177
2178 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2179 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2180 if (NumMaskElts >= NumOpElts)
2181 return false;
2182
2183 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2184}
2185
2187 // Vector concatenation is differentiated from identity with padding.
2188 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2189 return false;
2190
2191 // FIXME: Not currently possible to express a shuffle mask for a scalable
2192 // vector for this case.
2193 if (isa<ScalableVectorType>(getType()))
2194 return false;
2195
2196 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2197 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2198 if (NumMaskElts != NumOpElts * 2)
2199 return false;
2200
2201 // Use the mask length rather than the operands' vector lengths here. We
2202 // already know that the shuffle returns a vector twice as long as the inputs,
2203 // and neither of the inputs are undef vectors. If the mask picks consecutive
2204 // elements from both inputs, then this is a concatenation of the inputs.
2205 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2206}
2207
2209 int ReplicationFactor, int VF) {
2210 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2211 "Unexpected mask size.");
2212
2213 for (int CurrElt : seq(VF)) {
2214 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2215 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2216 "Run out of mask?");
2217 Mask = Mask.drop_front(ReplicationFactor);
2218 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2219 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2220 }))
2221 return false;
2222 }
2223 assert(Mask.empty() && "Did not consume the whole mask?");
2224
2225 return true;
2226}
2227
2229 int &ReplicationFactor, int &VF) {
2230 // undef-less case is trivial.
2231 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2232 ReplicationFactor =
2233 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2234 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2235 return false;
2236 VF = Mask.size() / ReplicationFactor;
2237 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2238 }
2239
2240 // However, if the mask contains undef's, we have to enumerate possible tuples
2241 // and pick one. There are bounds on replication factor: [1, mask size]
2242 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2243 // Additionally, mask size is a replication factor multiplied by vector size,
2244 // which further significantly reduces the search space.
2245
2246 // Before doing that, let's perform basic correctness checking first.
2247 int Largest = -1;
2248 for (int MaskElt : Mask) {
2249 if (MaskElt == PoisonMaskElem)
2250 continue;
2251 // Elements must be in non-decreasing order.
2252 if (MaskElt < Largest)
2253 return false;
2254 Largest = std::max(Largest, MaskElt);
2255 }
2256
2257 // Prefer larger replication factor if all else equal.
2258 for (int PossibleReplicationFactor :
2259 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2260 if (Mask.size() % PossibleReplicationFactor != 0)
2261 continue;
2262 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2263 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2264 PossibleVF))
2265 continue;
2266 ReplicationFactor = PossibleReplicationFactor;
2267 VF = PossibleVF;
2268 return true;
2269 }
2270
2271 return false;
2272}
2273
2274bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2275 int &VF) const {
2276 // Not possible to express a shuffle mask for a scalable vector for this
2277 // case.
2278 if (isa<ScalableVectorType>(getType()))
2279 return false;
2280
2281 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2282 if (ShuffleMask.size() % VF != 0)
2283 return false;
2284 ReplicationFactor = ShuffleMask.size() / VF;
2285
2286 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2287}
2288
2290 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2291 Mask.size() % VF != 0)
2292 return false;
2293 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2294 ArrayRef<int> SubMask = Mask.slice(K, VF);
2295 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2296 continue;
2297 SmallBitVector Used(VF, false);
2298 for (int Idx : SubMask) {
2299 if (Idx != PoisonMaskElem && Idx < VF)
2300 Used.set(Idx);
2301 }
2302 if (!Used.all())
2303 return false;
2304 }
2305 return true;
2306}
2307
2308/// Return true if this shuffle mask is a replication mask.
2310 // Not possible to express a shuffle mask for a scalable vector for this
2311 // case.
2312 if (isa<ScalableVectorType>(getType()))
2313 return false;
2314 if (!isSingleSourceMask(ShuffleMask, VF))
2315 return false;
2316
2317 return isOneUseSingleSourceMask(ShuffleMask, VF);
2318}
2319
2320bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2321 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2322 // shuffle_vector can only interleave fixed length vectors - for scalable
2323 // vectors, see the @llvm.vector.interleave2 intrinsic
2324 if (!OpTy)
2325 return false;
2326 unsigned OpNumElts = OpTy->getNumElements();
2327
2328 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2329}
2330
2332 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2333 SmallVectorImpl<unsigned> &StartIndexes) {
2334 unsigned NumElts = Mask.size();
2335 if (NumElts % Factor)
2336 return false;
2337
2338 unsigned LaneLen = NumElts / Factor;
2339 if (!isPowerOf2_32(LaneLen))
2340 return false;
2341
2342 StartIndexes.resize(Factor);
2343
2344 // Check whether each element matches the general interleaved rule.
2345 // Ignore undef elements, as long as the defined elements match the rule.
2346 // Outer loop processes all factors (x, y, z in the above example)
2347 unsigned I = 0, J;
2348 for (; I < Factor; I++) {
2349 unsigned SavedLaneValue;
2350 unsigned SavedNoUndefs = 0;
2351
2352 // Inner loop processes consecutive accesses (x, x+1... in the example)
2353 for (J = 0; J < LaneLen - 1; J++) {
2354 // Lane computes x's position in the Mask
2355 unsigned Lane = J * Factor + I;
2356 unsigned NextLane = Lane + Factor;
2357 int LaneValue = Mask[Lane];
2358 int NextLaneValue = Mask[NextLane];
2359
2360 // If both are defined, values must be sequential
2361 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2362 LaneValue + 1 != NextLaneValue)
2363 break;
2364
2365 // If the next value is undef, save the current one as reference
2366 if (LaneValue >= 0 && NextLaneValue < 0) {
2367 SavedLaneValue = LaneValue;
2368 SavedNoUndefs = 1;
2369 }
2370
2371 // Undefs are allowed, but defined elements must still be consecutive:
2372 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2373 // Verify this by storing the last non-undef followed by an undef
2374 // Check that following non-undef masks are incremented with the
2375 // corresponding distance.
2376 if (SavedNoUndefs > 0 && LaneValue < 0) {
2377 SavedNoUndefs++;
2378 if (NextLaneValue >= 0 &&
2379 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2380 break;
2381 }
2382 }
2383
2384 if (J < LaneLen - 1)
2385 return false;
2386
2387 int StartMask = 0;
2388 if (Mask[I] >= 0) {
2389 // Check that the start of the I range (J=0) is greater than 0
2390 StartMask = Mask[I];
2391 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2392 // StartMask defined by the last value in lane
2393 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2394 } else if (SavedNoUndefs > 0) {
2395 // StartMask defined by some non-zero value in the j loop
2396 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2397 }
2398 // else StartMask remains set to 0, i.e. all elements are undefs
2399
2400 if (StartMask < 0)
2401 return false;
2402 // We must stay within the vectors; This case can happen with undefs.
2403 if (StartMask + LaneLen > NumInputElts)
2404 return false;
2405
2406 StartIndexes[I] = StartMask;
2407 }
2408
2409 return true;
2410}
2411
2412/// Check if the mask is a DE-interleave mask of the given factor
2413/// \p Factor like:
2414/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2416 unsigned Factor,
2417 unsigned &Index) {
2418 // Check all potential start indices from 0 to (Factor - 1).
2419 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2420 unsigned I = 0;
2421
2422 // Check that elements are in ascending order by Factor. Ignore undef
2423 // elements.
2424 for (; I < Mask.size(); I++)
2425 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2426 break;
2427
2428 if (I == Mask.size()) {
2429 Index = Idx;
2430 return true;
2431 }
2432 }
2433
2434 return false;
2435}
2436
2437/// Try to lower a vector shuffle as a bit rotation.
2438///
2439/// Look for a repeated rotation pattern in each sub group.
2440/// Returns an element-wise left bit rotation amount or -1 if failed.
2441static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2442 int NumElts = Mask.size();
2443 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2444
2445 int RotateAmt = -1;
2446 for (int i = 0; i != NumElts; i += NumSubElts) {
2447 for (int j = 0; j != NumSubElts; ++j) {
2448 int M = Mask[i + j];
2449 if (M < 0)
2450 continue;
2451 if (M < i || M >= i + NumSubElts)
2452 return -1;
2453 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2454 if (0 <= RotateAmt && Offset != RotateAmt)
2455 return -1;
2456 RotateAmt = Offset;
2457 }
2458 }
2459 return RotateAmt;
2460}
2461
2463 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2464 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2465 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2466 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2467 if (EltRotateAmt < 0)
2468 continue;
2469 RotateAmt = EltRotateAmt * EltSizeInBits;
2470 return true;
2471 }
2472
2473 return false;
2474}
2475
2476//===----------------------------------------------------------------------===//
2477// InsertValueInst Class
2478//===----------------------------------------------------------------------===//
2479
2480void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &Name) {
2482 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2483
2484 // There's no fundamental reason why we require at least one index
2485 // (other than weirdness with &*IdxBegin being invalid; see
2486 // getelementptr's init routine for example). But there's no
2487 // present need to support it.
2488 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2489
2491 Val->getType() && "Inserted value must match indexed type!");
2492 Op<0>() = Agg;
2493 Op<1>() = Val;
2494
2495 Indices.append(Idxs.begin(), Idxs.end());
2496 setName(Name);
2497}
2498
2499InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2500 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2501 Indices(IVI.Indices) {
2502 Op<0>() = IVI.getOperand(0);
2503 Op<1>() = IVI.getOperand(1);
2505}
2506
2507//===----------------------------------------------------------------------===//
2508// ExtractValueInst Class
2509//===----------------------------------------------------------------------===//
2510
2511void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2512 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2513
2514 // There's no fundamental reason why we require at least one index.
2515 // But there's no present need to support it.
2516 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2517
2518 Indices.append(Idxs.begin(), Idxs.end());
2519 setName(Name);
2520}
2521
2522ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2523 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2524 (BasicBlock *)nullptr),
2525 Indices(EVI.Indices) {
2527}
2528
2529// getIndexedType - Returns the type of the element that would be extracted
2530// with an extractvalue instruction with the specified parameters.
2531//
2532// A null type is returned if the indices are invalid for the specified
2533// pointer type.
2534//
2536 ArrayRef<unsigned> Idxs) {
2537 for (unsigned Index : Idxs) {
2538 // We can't use CompositeType::indexValid(Index) here.
2539 // indexValid() always returns true for arrays because getelementptr allows
2540 // out-of-bounds indices. Since we don't allow those for extractvalue and
2541 // insertvalue we need to check array indexing manually.
2542 // Since the only other types we can index into are struct types it's just
2543 // as easy to check those manually as well.
2544 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2545 if (Index >= AT->getNumElements())
2546 return nullptr;
2547 Agg = AT->getElementType();
2548 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2549 if (Index >= ST->getNumElements())
2550 return nullptr;
2551 Agg = ST->getElementType(Index);
2552 } else {
2553 // Not a valid type to index into.
2554 return nullptr;
2555 }
2556 }
2557 return const_cast<Type*>(Agg);
2558}
2559
2560//===----------------------------------------------------------------------===//
2561// UnaryOperator Class
2562//===----------------------------------------------------------------------===//
2563
2565 const Twine &Name, InsertPosition InsertBefore)
2566 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2567 Op<0>() = S;
2568 setName(Name);
2569 AssertOK();
2570}
2571
2573 InsertPosition InsertBefore) {
2574 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2575}
2576
2577void UnaryOperator::AssertOK() {
2578 Value *LHS = getOperand(0);
2579 (void)LHS; // Silence warnings.
2580#ifndef NDEBUG
2581 switch (getOpcode()) {
2582 case FNeg:
2583 assert(getType() == LHS->getType() &&
2584 "Unary operation should return same type as operand!");
2585 assert(getType()->isFPOrFPVectorTy() &&
2586 "Tried to create a floating-point operation on a "
2587 "non-floating-point type!");
2588 break;
2589 default: llvm_unreachable("Invalid opcode provided");
2590 }
2591#endif
2592}
2593
2594//===----------------------------------------------------------------------===//
2595// BinaryOperator Class
2596//===----------------------------------------------------------------------===//
2597
2599 const Twine &Name, InsertPosition InsertBefore)
2600 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2601 Op<0>() = S1;
2602 Op<1>() = S2;
2603 setName(Name);
2604 AssertOK();
2605}
2606
2607void BinaryOperator::AssertOK() {
2608 Value *LHS = getOperand(0), *RHS = getOperand(1);
2609 (void)LHS; (void)RHS; // Silence warnings.
2610 assert(LHS->getType() == RHS->getType() &&
2611 "Binary operator operand types must match!");
2612#ifndef NDEBUG
2613 switch (getOpcode()) {
2614 case Add: case Sub:
2615 case Mul:
2616 assert(getType() == LHS->getType() &&
2617 "Arithmetic operation should return same type as operands!");
2618 assert(getType()->isIntOrIntVectorTy() &&
2619 "Tried to create an integer operation on a non-integer type!");
2620 break;
2621 case FAdd: case FSub:
2622 case FMul:
2623 assert(getType() == LHS->getType() &&
2624 "Arithmetic operation should return same type as operands!");
2625 assert(getType()->isFPOrFPVectorTy() &&
2626 "Tried to create a floating-point operation on a "
2627 "non-floating-point type!");
2628 break;
2629 case UDiv:
2630 case SDiv:
2631 assert(getType() == LHS->getType() &&
2632 "Arithmetic operation should return same type as operands!");
2633 assert(getType()->isIntOrIntVectorTy() &&
2634 "Incorrect operand type (not integer) for S/UDIV");
2635 break;
2636 case FDiv:
2637 assert(getType() == LHS->getType() &&
2638 "Arithmetic operation should return same type as operands!");
2639 assert(getType()->isFPOrFPVectorTy() &&
2640 "Incorrect operand type (not floating point) for FDIV");
2641 break;
2642 case URem:
2643 case SRem:
2644 assert(getType() == LHS->getType() &&
2645 "Arithmetic operation should return same type as operands!");
2646 assert(getType()->isIntOrIntVectorTy() &&
2647 "Incorrect operand type (not integer) for S/UREM");
2648 break;
2649 case FRem:
2650 assert(getType() == LHS->getType() &&
2651 "Arithmetic operation should return same type as operands!");
2652 assert(getType()->isFPOrFPVectorTy() &&
2653 "Incorrect operand type (not floating point) for FREM");
2654 break;
2655 case Shl:
2656 case LShr:
2657 case AShr:
2658 assert(getType() == LHS->getType() &&
2659 "Shift operation should return same type as operands!");
2660 assert(getType()->isIntOrIntVectorTy() &&
2661 "Tried to create a shift operation on a non-integral type!");
2662 break;
2663 case And: case Or:
2664 case Xor:
2665 assert(getType() == LHS->getType() &&
2666 "Logical operation should return same type as operands!");
2667 assert(getType()->isIntOrIntVectorTy() &&
2668 "Tried to create a logical operation on a non-integral type!");
2669 break;
2670 default: llvm_unreachable("Invalid opcode provided");
2671 }
2672#endif
2673}
2674
2676 const Twine &Name,
2677 InsertPosition InsertBefore) {
2678 assert(S1->getType() == S2->getType() &&
2679 "Cannot create binary operator with two operands of differing type!");
2680 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2681}
2682
2684 InsertPosition InsertBefore) {
2685 Value *Zero = ConstantInt::get(Op->getType(), 0);
2686 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2687 InsertBefore);
2688}
2689
2691 InsertPosition InsertBefore) {
2692 Value *Zero = ConstantInt::get(Op->getType(), 0);
2693 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2694}
2695
2697 InsertPosition InsertBefore) {
2698 Constant *C = Constant::getAllOnesValue(Op->getType());
2699 return new BinaryOperator(Instruction::Xor, Op, C,
2700 Op->getType(), Name, InsertBefore);
2701}
2702
2703// Exchange the two operands to this instruction. This instruction is safe to
2704// use on any binary instruction and does not modify the semantics of the
2705// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2706// is changed.
2708 if (!isCommutative())
2709 return true; // Can't commute operands
2710 Op<0>().swap(Op<1>());
2711 return false;
2712}
2713
2714//===----------------------------------------------------------------------===//
2715// FPMathOperator Class
2716//===----------------------------------------------------------------------===//
2717
2719 const MDNode *MD =
2720 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2721 if (!MD)
2722 return 0.0;
2723 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2724 return Accuracy->getValueAPF().convertToFloat();
2725}
2726
2727//===----------------------------------------------------------------------===//
2728// CastInst Class
2729//===----------------------------------------------------------------------===//
2730
2731// Just determine if this cast only deals with integral->integral conversion.
2733 switch (getOpcode()) {
2734 default: return false;
2735 case Instruction::ZExt:
2736 case Instruction::SExt:
2737 case Instruction::Trunc:
2738 return true;
2739 case Instruction::BitCast:
2740 return getOperand(0)->getType()->isIntegerTy() &&
2741 getType()->isIntegerTy();
2742 }
2743}
2744
2745/// This function determines if the CastInst does not require any bits to be
2746/// changed in order to effect the cast. Essentially, it identifies cases where
2747/// no code gen is necessary for the cast, hence the name no-op cast. For
2748/// example, the following are all no-op casts:
2749/// # bitcast i32* %x to i8*
2750/// # bitcast <2 x i32> %x to <4 x i16>
2751/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2752/// Determine if the described cast is a no-op.
2754 Type *SrcTy,
2755 Type *DestTy,
2756 const DataLayout &DL) {
2757 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2758 switch (Opcode) {
2759 default: llvm_unreachable("Invalid CastOp");
2760 case Instruction::Trunc:
2761 case Instruction::ZExt:
2762 case Instruction::SExt:
2763 case Instruction::FPTrunc:
2764 case Instruction::FPExt:
2765 case Instruction::UIToFP:
2766 case Instruction::SIToFP:
2767 case Instruction::FPToUI:
2768 case Instruction::FPToSI:
2769 case Instruction::AddrSpaceCast:
2770 // TODO: Target informations may give a more accurate answer here.
2771 return false;
2772 case Instruction::BitCast:
2773 return true; // BitCast never modifies bits.
2774 case Instruction::PtrToInt:
2775 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2776 DestTy->getScalarSizeInBits();
2777 case Instruction::IntToPtr:
2778 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2779 SrcTy->getScalarSizeInBits();
2780 }
2781}
2782
2784 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2785}
2786
2787/// This function determines if a pair of casts can be eliminated and what
2788/// opcode should be used in the elimination. This assumes that there are two
2789/// instructions like this:
2790/// * %F = firstOpcode SrcTy %x to MidTy
2791/// * %S = secondOpcode MidTy %F to DstTy
2792/// The function returns a resultOpcode so these two casts can be replaced with:
2793/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2794/// If no such cast is permitted, the function returns 0.
2797 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2798 Type *DstIntPtrTy) {
2799 // Define the 144 possibilities for these two cast instructions. The values
2800 // in this matrix determine what to do in a given situation and select the
2801 // case in the switch below. The rows correspond to firstOp, the columns
2802 // correspond to secondOp. In looking at the table below, keep in mind
2803 // the following cast properties:
2804 //
2805 // Size Compare Source Destination
2806 // Operator Src ? Size Type Sign Type Sign
2807 // -------- ------------ ------------------- ---------------------
2808 // TRUNC > Integer Any Integral Any
2809 // ZEXT < Integral Unsigned Integer Any
2810 // SEXT < Integral Signed Integer Any
2811 // FPTOUI n/a FloatPt n/a Integral Unsigned
2812 // FPTOSI n/a FloatPt n/a Integral Signed
2813 // UITOFP n/a Integral Unsigned FloatPt n/a
2814 // SITOFP n/a Integral Signed FloatPt n/a
2815 // FPTRUNC > FloatPt n/a FloatPt n/a
2816 // FPEXT < FloatPt n/a FloatPt n/a
2817 // PTRTOINT n/a Pointer n/a Integral Unsigned
2818 // INTTOPTR n/a Integral Unsigned Pointer n/a
2819 // BITCAST = FirstClass n/a FirstClass n/a
2820 // ADDRSPCST n/a Pointer n/a Pointer n/a
2821 //
2822 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2823 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2824 // into "fptoui double to i64", but this loses information about the range
2825 // of the produced value (we no longer know the top-part is all zeros).
2826 // Further this conversion is often much more expensive for typical hardware,
2827 // and causes issues when building libgcc. We disallow fptosi+sext for the
2828 // same reason.
2829 const unsigned numCastOps =
2830 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2831 static const uint8_t CastResults[numCastOps][numCastOps] = {
2832 // T F F U S F F P I B A -+
2833 // R Z S P P I I T P 2 N T S |
2834 // U E E 2 2 2 2 R E I T C C +- secondOp
2835 // N X X U S F F N X N 2 V V |
2836 // C T T I I P P C T T P T T -+
2837 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2838 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2839 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2840 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2841 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2842 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2843 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2844 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2845 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2846 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2847 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2848 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
2849 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2850 };
2851
2852 // TODO: This logic could be encoded into the table above and handled in the
2853 // switch below.
2854 // If either of the casts are a bitcast from scalar to vector, disallow the
2855 // merging. However, any pair of bitcasts are allowed.
2856 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2857 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2858 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2859
2860 // Check if any of the casts convert scalars <-> vectors.
2861 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2862 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2863 if (!AreBothBitcasts)
2864 return 0;
2865
2866 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2867 [secondOp-Instruction::CastOpsBegin];
2868 switch (ElimCase) {
2869 case 0:
2870 // Categorically disallowed.
2871 return 0;
2872 case 1:
2873 // Allowed, use first cast's opcode.
2874 return firstOp;
2875 case 2:
2876 // Allowed, use second cast's opcode.
2877 return secondOp;
2878 case 3:
2879 // No-op cast in second op implies firstOp as long as the DestTy
2880 // is integer and we are not converting between a vector and a
2881 // non-vector type.
2882 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2883 return firstOp;
2884 return 0;
2885 case 4:
2886 // No-op cast in second op implies firstOp as long as the DestTy
2887 // matches MidTy.
2888 if (DstTy == MidTy)
2889 return firstOp;
2890 return 0;
2891 case 5:
2892 // No-op cast in first op implies secondOp as long as the SrcTy
2893 // is an integer.
2894 if (SrcTy->isIntegerTy())
2895 return secondOp;
2896 return 0;
2897 case 7: {
2898 // Disable inttoptr/ptrtoint optimization if enabled.
2899 if (DisableI2pP2iOpt)
2900 return 0;
2901
2902 // Cannot simplify if address spaces are different!
2903 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2904 return 0;
2905
2906 unsigned MidSize = MidTy->getScalarSizeInBits();
2907 // We can still fold this without knowing the actual sizes as long we
2908 // know that the intermediate pointer is the largest possible
2909 // pointer size.
2910 // FIXME: Is this always true?
2911 if (MidSize == 64)
2912 return Instruction::BitCast;
2913
2914 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2915 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2916 return 0;
2917 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2918 if (MidSize >= PtrSize)
2919 return Instruction::BitCast;
2920 return 0;
2921 }
2922 case 8: {
2923 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2924 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2925 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2926 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2927 unsigned DstSize = DstTy->getScalarSizeInBits();
2928 if (SrcTy == DstTy)
2929 return Instruction::BitCast;
2930 if (SrcSize < DstSize)
2931 return firstOp;
2932 if (SrcSize > DstSize)
2933 return secondOp;
2934 return 0;
2935 }
2936 case 9:
2937 // zext, sext -> zext, because sext can't sign extend after zext
2938 return Instruction::ZExt;
2939 case 11: {
2940 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2941 if (!MidIntPtrTy)
2942 return 0;
2943 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2944 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2945 unsigned DstSize = DstTy->getScalarSizeInBits();
2946 if (SrcSize <= PtrSize && SrcSize == DstSize)
2947 return Instruction::BitCast;
2948 return 0;
2949 }
2950 case 12:
2951 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2952 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2953 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2954 return Instruction::AddrSpaceCast;
2955 return Instruction::BitCast;
2956 case 13:
2957 // FIXME: this state can be merged with (1), but the following assert
2958 // is useful to check the correcteness of the sequence due to semantic
2959 // change of bitcast.
2960 assert(
2961 SrcTy->isPtrOrPtrVectorTy() &&
2962 MidTy->isPtrOrPtrVectorTy() &&
2963 DstTy->isPtrOrPtrVectorTy() &&
2964 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2965 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2966 "Illegal addrspacecast, bitcast sequence!");
2967 // Allowed, use first cast's opcode
2968 return firstOp;
2969 case 14:
2970 // bitcast, addrspacecast -> addrspacecast
2971 return Instruction::AddrSpaceCast;
2972 case 15:
2973 // FIXME: this state can be merged with (1), but the following assert
2974 // is useful to check the correcteness of the sequence due to semantic
2975 // change of bitcast.
2976 assert(
2977 SrcTy->isIntOrIntVectorTy() &&
2978 MidTy->isPtrOrPtrVectorTy() &&
2979 DstTy->isPtrOrPtrVectorTy() &&
2980 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2981 "Illegal inttoptr, bitcast sequence!");
2982 // Allowed, use first cast's opcode
2983 return firstOp;
2984 case 16:
2985 // FIXME: this state can be merged with (2), but the following assert
2986 // is useful to check the correcteness of the sequence due to semantic
2987 // change of bitcast.
2988 assert(
2989 SrcTy->isPtrOrPtrVectorTy() &&
2990 MidTy->isPtrOrPtrVectorTy() &&
2991 DstTy->isIntOrIntVectorTy() &&
2992 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2993 "Illegal bitcast, ptrtoint sequence!");
2994 // Allowed, use second cast's opcode
2995 return secondOp;
2996 case 17:
2997 // (sitofp (zext x)) -> (uitofp x)
2998 return Instruction::UIToFP;
2999 case 99:
3000 // Cast combination can't happen (error in input). This is for all cases
3001 // where the MidTy is not the same for the two cast instructions.
3002 llvm_unreachable("Invalid Cast Combination");
3003 default:
3004 llvm_unreachable("Error in CastResults table!!!");
3005 }
3006}
3007
3009 const Twine &Name, InsertPosition InsertBefore) {
3010 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3011 // Construct and return the appropriate CastInst subclass
3012 switch (op) {
3013 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3014 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3015 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3016 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3017 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3018 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3019 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3020 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3021 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3022 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3023 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3024 case BitCast:
3025 return new BitCastInst(S, Ty, Name, InsertBefore);
3026 case AddrSpaceCast:
3027 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3028 default:
3029 llvm_unreachable("Invalid opcode provided");
3030 }
3031}
3032
3034 InsertPosition InsertBefore) {
3035 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3036 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3037 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3038}
3039
3041 InsertPosition InsertBefore) {
3042 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3043 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3044 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3045}
3046
3048 InsertPosition InsertBefore) {
3049 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3050 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3051 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3052}
3053
3054/// Create a BitCast or a PtrToInt cast instruction
3056 InsertPosition InsertBefore) {
3057 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3058 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3059 "Invalid cast");
3060 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3061 assert((!Ty->isVectorTy() ||
3062 cast<VectorType>(Ty)->getElementCount() ==
3063 cast<VectorType>(S->getType())->getElementCount()) &&
3064 "Invalid cast");
3065
3066 if (Ty->isIntOrIntVectorTy())
3067 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3068
3069 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3070}
3071
3073 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3074 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3075 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3076
3078 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3079
3080 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3081}
3082
3084 const Twine &Name,
3085 InsertPosition InsertBefore) {
3086 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3087 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3088 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3089 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3090
3091 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3092}
3093
3095 const Twine &Name,
3096 InsertPosition InsertBefore) {
3097 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3098 "Invalid integer cast");
3099 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3100 unsigned DstBits = Ty->getScalarSizeInBits();
3101 Instruction::CastOps opcode =
3102 (SrcBits == DstBits ? Instruction::BitCast :
3103 (SrcBits > DstBits ? Instruction::Trunc :
3104 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3105 return Create(opcode, C, Ty, Name, InsertBefore);
3106}
3107
3109 InsertPosition InsertBefore) {
3110 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3111 "Invalid cast");
3112 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3113 unsigned DstBits = Ty->getScalarSizeInBits();
3114 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3115 Instruction::CastOps opcode =
3116 (SrcBits == DstBits ? Instruction::BitCast :
3117 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3118 return Create(opcode, C, Ty, Name, InsertBefore);
3119}
3120
3121bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3122 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3123 return false;
3124
3125 if (SrcTy == DestTy)
3126 return true;
3127
3128 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3129 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3130 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3131 // An element by element cast. Valid if casting the elements is valid.
3132 SrcTy = SrcVecTy->getElementType();
3133 DestTy = DestVecTy->getElementType();
3134 }
3135 }
3136 }
3137
3138 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3139 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3140 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3141 }
3142 }
3143
3144 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3145 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3146
3147 // Could still have vectors of pointers if the number of elements doesn't
3148 // match
3149 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3150 return false;
3151
3152 if (SrcBits != DestBits)
3153 return false;
3154
3155 return true;
3156}
3157
3159 const DataLayout &DL) {
3160 // ptrtoint and inttoptr are not allowed on non-integral pointers
3161 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3162 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3163 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3164 !DL.isNonIntegralPointerType(PtrTy));
3165 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3166 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3167 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3168 !DL.isNonIntegralPointerType(PtrTy));
3169
3170 return isBitCastable(SrcTy, DestTy);
3171}
3172
3173// Provide a way to get a "cast" where the cast opcode is inferred from the
3174// types and size of the operand. This, basically, is a parallel of the
3175// logic in the castIsValid function below. This axiom should hold:
3176// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3177// should not assert in castIsValid. In other words, this produces a "correct"
3178// casting opcode for the arguments passed to it.
3181 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3182 Type *SrcTy = Src->getType();
3183
3184 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3185 "Only first class types are castable!");
3186
3187 if (SrcTy == DestTy)
3188 return BitCast;
3189
3190 // FIXME: Check address space sizes here
3191 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3192 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3193 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3194 // An element by element cast. Find the appropriate opcode based on the
3195 // element types.
3196 SrcTy = SrcVecTy->getElementType();
3197 DestTy = DestVecTy->getElementType();
3198 }
3199
3200 // Get the bit sizes, we'll need these
3201 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3202 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3203
3204 // Run through the possibilities ...
3205 if (DestTy->isIntegerTy()) { // Casting to integral
3206 if (SrcTy->isIntegerTy()) { // Casting from integral
3207 if (DestBits < SrcBits)
3208 return Trunc; // int -> smaller int
3209 else if (DestBits > SrcBits) { // its an extension
3210 if (SrcIsSigned)
3211 return SExt; // signed -> SEXT
3212 else
3213 return ZExt; // unsigned -> ZEXT
3214 } else {
3215 return BitCast; // Same size, No-op cast
3216 }
3217 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3218 if (DestIsSigned)
3219 return FPToSI; // FP -> sint
3220 else
3221 return FPToUI; // FP -> uint
3222 } else if (SrcTy->isVectorTy()) {
3223 assert(DestBits == SrcBits &&
3224 "Casting vector to integer of different width");
3225 return BitCast; // Same size, no-op cast
3226 } else {
3227 assert(SrcTy->isPointerTy() &&
3228 "Casting from a value that is not first-class type");
3229 return PtrToInt; // ptr -> int
3230 }
3231 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3232 if (SrcTy->isIntegerTy()) { // Casting from integral
3233 if (SrcIsSigned)
3234 return SIToFP; // sint -> FP
3235 else
3236 return UIToFP; // uint -> FP
3237 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3238 if (DestBits < SrcBits) {
3239 return FPTrunc; // FP -> smaller FP
3240 } else if (DestBits > SrcBits) {
3241 return FPExt; // FP -> larger FP
3242 } else {
3243 return BitCast; // same size, no-op cast
3244 }
3245 } else if (SrcTy->isVectorTy()) {
3246 assert(DestBits == SrcBits &&
3247 "Casting vector to floating point of different width");
3248 return BitCast; // same size, no-op cast
3249 }
3250 llvm_unreachable("Casting pointer or non-first class to float");
3251 } else if (DestTy->isVectorTy()) {
3252 assert(DestBits == SrcBits &&
3253 "Illegal cast to vector (wrong type or size)");
3254 return BitCast;
3255 } else if (DestTy->isPointerTy()) {
3256 if (SrcTy->isPointerTy()) {
3257 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3258 return AddrSpaceCast;
3259 return BitCast; // ptr -> ptr
3260 } else if (SrcTy->isIntegerTy()) {
3261 return IntToPtr; // int -> ptr
3262 }
3263 llvm_unreachable("Casting pointer to other than pointer or int");
3264 }
3265 llvm_unreachable("Casting to type that is not first-class");
3266}
3267
3268//===----------------------------------------------------------------------===//
3269// CastInst SubClass Constructors
3270//===----------------------------------------------------------------------===//
3271
3272/// Check that the construction parameters for a CastInst are correct. This
3273/// could be broken out into the separate constructors but it is useful to have
3274/// it in one place and to eliminate the redundant code for getting the sizes
3275/// of the types involved.
3276bool
3278 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3279 SrcTy->isAggregateType() || DstTy->isAggregateType())
3280 return false;
3281
3282 // Get the size of the types in bits, and whether we are dealing
3283 // with vector types, we'll need this later.
3284 bool SrcIsVec = isa<VectorType>(SrcTy);
3285 bool DstIsVec = isa<VectorType>(DstTy);
3286 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3287 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3288
3289 // If these are vector types, get the lengths of the vectors (using zero for
3290 // scalar types means that checking that vector lengths match also checks that
3291 // scalars are not being converted to vectors or vectors to scalars).
3292 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3294 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3296
3297 // Switch on the opcode provided
3298 switch (op) {
3299 default: return false; // This is an input error
3300 case Instruction::Trunc:
3301 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3302 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3303 case Instruction::ZExt:
3304 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3305 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3306 case Instruction::SExt:
3307 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3308 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3309 case Instruction::FPTrunc:
3310 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3311 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3312 case Instruction::FPExt:
3313 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3314 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3315 case Instruction::UIToFP:
3316 case Instruction::SIToFP:
3317 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3318 SrcEC == DstEC;
3319 case Instruction::FPToUI:
3320 case Instruction::FPToSI:
3321 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3322 SrcEC == DstEC;
3323 case Instruction::PtrToInt:
3324 if (SrcEC != DstEC)
3325 return false;
3326 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3327 case Instruction::IntToPtr:
3328 if (SrcEC != DstEC)
3329 return false;
3330 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3331 case Instruction::BitCast: {
3332 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3333 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3334
3335 // BitCast implies a no-op cast of type only. No bits change.
3336 // However, you can't cast pointers to anything but pointers.
3337 if (!SrcPtrTy != !DstPtrTy)
3338 return false;
3339
3340 // For non-pointer cases, the cast is okay if the source and destination bit
3341 // widths are identical.
3342 if (!SrcPtrTy)
3343 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3344
3345 // If both are pointers then the address spaces must match.
3346 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3347 return false;
3348
3349 // A vector of pointers must have the same number of elements.
3350 if (SrcIsVec && DstIsVec)
3351 return SrcEC == DstEC;
3352 if (SrcIsVec)
3353 return SrcEC == ElementCount::getFixed(1);
3354 if (DstIsVec)
3355 return DstEC == ElementCount::getFixed(1);
3356
3357 return true;
3358 }
3359 case Instruction::AddrSpaceCast: {
3360 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3361 if (!SrcPtrTy)
3362 return false;
3363
3364 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3365 if (!DstPtrTy)
3366 return false;
3367
3368 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3369 return false;
3370
3371 return SrcEC == DstEC;
3372 }
3373 }
3374}
3375
3377 InsertPosition InsertBefore)
3378 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3379 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3380}
3381
3383 InsertPosition InsertBefore)
3384 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3385 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3386}
3387
3389 InsertPosition InsertBefore)
3390 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3391 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3392}
3393
3395 InsertPosition InsertBefore)
3396 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3397 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3398}
3399
3401 InsertPosition InsertBefore)
3402 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3403 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3404}
3405
3407 InsertPosition InsertBefore)
3408 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3409 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3410}
3411
3413 InsertPosition InsertBefore)
3414 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3415 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3416}
3417
3419 InsertPosition InsertBefore)
3420 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3421 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3422}
3423
3425 InsertPosition InsertBefore)
3426 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3427 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3428}
3429
3431 InsertPosition InsertBefore)
3432 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3433 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3434}
3435
3437 InsertPosition InsertBefore)
3438 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3439 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3440}
3441
3443 InsertPosition InsertBefore)
3444 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3445 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3446}
3447
3449 InsertPosition InsertBefore)
3450 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3451 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3452}
3453
3454//===----------------------------------------------------------------------===//
3455// CmpInst Classes
3456//===----------------------------------------------------------------------===//
3457
3459 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3460 Instruction *FlagsSource)
3461 : Instruction(ty, op, AllocMarker, InsertBefore) {
3462 Op<0>() = LHS;
3463 Op<1>() = RHS;
3464 setPredicate((Predicate)predicate);
3465 setName(Name);
3466 if (FlagsSource)
3467 copyIRFlags(FlagsSource);
3468}
3469
3471 const Twine &Name, InsertPosition InsertBefore) {
3472 if (Op == Instruction::ICmp) {
3473 if (InsertBefore.isValid())
3474 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3475 S1, S2, Name);
3476 else
3477 return new ICmpInst(CmpInst::Predicate(predicate),
3478 S1, S2, Name);
3479 }
3480
3481 if (InsertBefore.isValid())
3482 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3483 S1, S2, Name);
3484 else
3485 return new FCmpInst(CmpInst::Predicate(predicate),
3486 S1, S2, Name);
3487}
3488
3490 Value *S2,
3491 const Instruction *FlagsSource,
3492 const Twine &Name,
3493 InsertPosition InsertBefore) {
3494 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3495 Inst->copyIRFlags(FlagsSource);
3496 return Inst;
3497}
3498
3500 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3501 IC->swapOperands();
3502 else
3503 cast<FCmpInst>(this)->swapOperands();
3504}
3505
3507 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3508 return IC->isCommutative();
3509 return cast<FCmpInst>(this)->isCommutative();
3510}
3511
3514 return ICmpInst::isEquality(P);
3516 return FCmpInst::isEquality(P);
3517 llvm_unreachable("Unsupported predicate kind");
3518}
3519
3520// Returns true if either operand of CmpInst is a provably non-zero
3521// floating-point constant.
3522static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3523 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3524 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3525 if (auto *Const = LHS ? LHS : RHS) {
3526 using namespace llvm::PatternMatch;
3527 return match(Const, m_NonZeroNotDenormalFP());
3528 }
3529 return false;
3530}
3531
3532// Floating-point equality is not an equivalence when comparing +0.0 with
3533// -0.0, when comparing NaN with another value, or when flushing
3534// denormals-to-zero.
3535bool CmpInst::isEquivalence(bool Invert) const {
3536 switch (Invert ? getInversePredicate() : getPredicate()) {
3538 return true;
3540 if (!hasNoNaNs())
3541 return false;
3542 [[fallthrough]];
3544 return hasNonZeroFPOperands(this);
3545 default:
3546 return false;
3547 }
3548}
3549
3551 switch (pred) {
3552 default: llvm_unreachable("Unknown cmp predicate!");
3553 case ICMP_EQ: return ICMP_NE;
3554 case ICMP_NE: return ICMP_EQ;
3555 case ICMP_UGT: return ICMP_ULE;
3556 case ICMP_ULT: return ICMP_UGE;
3557 case ICMP_UGE: return ICMP_ULT;
3558 case ICMP_ULE: return ICMP_UGT;
3559 case ICMP_SGT: return ICMP_SLE;
3560 case ICMP_SLT: return ICMP_SGE;
3561 case ICMP_SGE: return ICMP_SLT;
3562 case ICMP_SLE: return ICMP_SGT;
3563
3564 case FCMP_OEQ: return FCMP_UNE;
3565 case FCMP_ONE: return FCMP_UEQ;
3566 case FCMP_OGT: return FCMP_ULE;
3567 case FCMP_OLT: return FCMP_UGE;
3568 case FCMP_OGE: return FCMP_ULT;
3569 case FCMP_OLE: return FCMP_UGT;
3570 case FCMP_UEQ: return FCMP_ONE;
3571 case FCMP_UNE: return FCMP_OEQ;
3572 case FCMP_UGT: return FCMP_OLE;
3573 case FCMP_ULT: return FCMP_OGE;
3574 case FCMP_UGE: return FCMP_OLT;
3575 case FCMP_ULE: return FCMP_OGT;
3576 case FCMP_ORD: return FCMP_UNO;
3577 case FCMP_UNO: return FCMP_ORD;
3578 case FCMP_TRUE: return FCMP_FALSE;
3579 case FCMP_FALSE: return FCMP_TRUE;
3580 }
3581}
3582
3584 switch (Pred) {
3585 default: return "unknown";
3586 case FCmpInst::FCMP_FALSE: return "false";
3587 case FCmpInst::FCMP_OEQ: return "oeq";
3588 case FCmpInst::FCMP_OGT: return "ogt";
3589 case FCmpInst::FCMP_OGE: return "oge";
3590 case FCmpInst::FCMP_OLT: return "olt";
3591 case FCmpInst::FCMP_OLE: return "ole";
3592 case FCmpInst::FCMP_ONE: return "one";
3593 case FCmpInst::FCMP_ORD: return "ord";
3594 case FCmpInst::FCMP_UNO: return "uno";
3595 case FCmpInst::FCMP_UEQ: return "ueq";
3596 case FCmpInst::FCMP_UGT: return "ugt";
3597 case FCmpInst::FCMP_UGE: return "uge";
3598 case FCmpInst::FCMP_ULT: return "ult";
3599 case FCmpInst::FCMP_ULE: return "ule";
3600 case FCmpInst::FCMP_UNE: return "une";
3601 case FCmpInst::FCMP_TRUE: return "true";
3602 case ICmpInst::ICMP_EQ: return "eq";
3603 case ICmpInst::ICMP_NE: return "ne";
3604 case ICmpInst::ICMP_SGT: return "sgt";
3605 case ICmpInst::ICMP_SGE: return "sge";
3606 case ICmpInst::ICMP_SLT: return "slt";
3607 case ICmpInst::ICMP_SLE: return "sle";
3608 case ICmpInst::ICMP_UGT: return "ugt";
3609 case ICmpInst::ICMP_UGE: return "uge";
3610 case ICmpInst::ICMP_ULT: return "ult";
3611 case ICmpInst::ICMP_ULE: return "ule";
3612 }
3613}
3614
3617 return OS;
3618}
3619
3621 switch (pred) {
3622 default: llvm_unreachable("Unknown icmp predicate!");
3623 case ICMP_EQ: case ICMP_NE:
3624 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3625 return pred;
3626 case ICMP_UGT: return ICMP_SGT;
3627 case ICMP_ULT: return ICMP_SLT;
3628 case ICMP_UGE: return ICMP_SGE;
3629 case ICMP_ULE: return ICMP_SLE;
3630 }
3631}
3632
3634 switch (pred) {
3635 default: llvm_unreachable("Unknown icmp predicate!");
3636 case ICMP_EQ: case ICMP_NE:
3637 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3638 return pred;
3639 case ICMP_SGT: return ICMP_UGT;
3640 case ICMP_SLT: return ICMP_ULT;
3641 case ICMP_SGE: return ICMP_UGE;
3642 case ICMP_SLE: return ICMP_ULE;
3643 }
3644}
3645
3647 switch (pred) {
3648 default: llvm_unreachable("Unknown cmp predicate!");
3649 case ICMP_EQ: case ICMP_NE:
3650 return pred;
3651 case ICMP_SGT: return ICMP_SLT;
3652 case ICMP_SLT: return ICMP_SGT;
3653 case ICMP_SGE: return ICMP_SLE;
3654 case ICMP_SLE: return ICMP_SGE;
3655 case ICMP_UGT: return ICMP_ULT;
3656 case ICMP_ULT: return ICMP_UGT;
3657 case ICMP_UGE: return ICMP_ULE;
3658 case ICMP_ULE: return ICMP_UGE;
3659
3660 case FCMP_FALSE: case FCMP_TRUE:
3661 case FCMP_OEQ: case FCMP_ONE:
3662 case FCMP_UEQ: case FCMP_UNE:
3663 case FCMP_ORD: case FCMP_UNO:
3664 return pred;
3665 case FCMP_OGT: return FCMP_OLT;
3666 case FCMP_OLT: return FCMP_OGT;
3667 case FCMP_OGE: return FCMP_OLE;
3668 case FCMP_OLE: return FCMP_OGE;
3669 case FCMP_UGT: return FCMP_ULT;
3670 case FCMP_ULT: return FCMP_UGT;
3671 case FCMP_UGE: return FCMP_ULE;
3672 case FCMP_ULE: return FCMP_UGE;
3673 }
3674}
3675
3677 switch (pred) {
3678 case ICMP_SGE:
3679 case ICMP_SLE:
3680 case ICMP_UGE:
3681 case ICMP_ULE:
3682 case FCMP_OGE:
3683 case FCMP_OLE:
3684 case FCMP_UGE:
3685 case FCMP_ULE:
3686 return true;
3687 default:
3688 return false;
3689 }
3690}
3691
3693 switch (pred) {
3694 case ICMP_SGT:
3695 case ICMP_SLT:
3696 case ICMP_UGT:
3697 case ICMP_ULT:
3698 case FCMP_OGT:
3699 case FCMP_OLT:
3700 case FCMP_UGT:
3701 case FCMP_ULT:
3702 return true;
3703 default:
3704 return false;
3705 }
3706}
3707
3709 switch (pred) {
3710 case ICMP_SGE:
3711 return ICMP_SGT;
3712 case ICMP_SLE:
3713 return ICMP_SLT;
3714 case ICMP_UGE:
3715 return ICMP_UGT;
3716 case ICMP_ULE:
3717 return ICMP_ULT;
3718 case FCMP_OGE:
3719 return FCMP_OGT;
3720 case FCMP_OLE:
3721 return FCMP_OLT;
3722 case FCMP_UGE:
3723 return FCMP_UGT;
3724 case FCMP_ULE:
3725 return FCMP_ULT;
3726 default:
3727 return pred;
3728 }
3729}
3730
3732 switch (pred) {
3733 case ICMP_SGT:
3734 return ICMP_SGE;
3735 case ICMP_SLT:
3736 return ICMP_SLE;
3737 case ICMP_UGT:
3738 return ICMP_UGE;
3739 case ICMP_ULT:
3740 return ICMP_ULE;
3741 case FCMP_OGT:
3742 return FCMP_OGE;
3743 case FCMP_OLT:
3744 return FCMP_OLE;
3745 case FCMP_UGT:
3746 return FCMP_UGE;
3747 case FCMP_ULT:
3748 return FCMP_ULE;
3749 default:
3750 return pred;
3751 }
3752}
3753
3755 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3756
3760 return getStrictPredicate(pred);
3761
3762 llvm_unreachable("Unknown predicate!");
3763}
3764
3766 switch (predicate) {
3767 default: return false;
3769 case ICmpInst::ICMP_UGE: return true;
3770 }
3771}
3772
3774 switch (predicate) {
3775 default: return false;
3777 case ICmpInst::ICMP_SGE: return true;
3778 }
3779}
3780
3781bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3782 ICmpInst::Predicate Pred) {
3783 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3784 switch (Pred) {
3786 return LHS.eq(RHS);
3788 return LHS.ne(RHS);
3790 return LHS.ugt(RHS);
3792 return LHS.uge(RHS);
3794 return LHS.ult(RHS);
3796 return LHS.ule(RHS);
3798 return LHS.sgt(RHS);
3800 return LHS.sge(RHS);
3802 return LHS.slt(RHS);
3804 return LHS.sle(RHS);
3805 default:
3806 llvm_unreachable("Unexpected non-integer predicate.");
3807 };
3808}
3809
3810bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3811 FCmpInst::Predicate Pred) {
3812 APFloat::cmpResult R = LHS.compare(RHS);
3813 switch (Pred) {
3814 default:
3815 llvm_unreachable("Invalid FCmp Predicate");
3817 return false;
3819 return true;
3820 case FCmpInst::FCMP_UNO:
3821 return R == APFloat::cmpUnordered;
3822 case FCmpInst::FCMP_ORD:
3823 return R != APFloat::cmpUnordered;
3824 case FCmpInst::FCMP_UEQ:
3825 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3826 case FCmpInst::FCMP_OEQ:
3827 return R == APFloat::cmpEqual;
3828 case FCmpInst::FCMP_UNE:
3829 return R != APFloat::cmpEqual;
3830 case FCmpInst::FCMP_ONE:
3832 case FCmpInst::FCMP_ULT:
3833 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3834 case FCmpInst::FCMP_OLT:
3835 return R == APFloat::cmpLessThan;
3836 case FCmpInst::FCMP_UGT:
3838 case FCmpInst::FCMP_OGT:
3839 return R == APFloat::cmpGreaterThan;
3840 case FCmpInst::FCMP_ULE:
3841 return R != APFloat::cmpGreaterThan;
3842 case FCmpInst::FCMP_OLE:
3843 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3844 case FCmpInst::FCMP_UGE:
3845 return R != APFloat::cmpLessThan;
3846 case FCmpInst::FCMP_OGE:
3847 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3848 }
3849}
3850
3851std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3852 const KnownBits &RHS,
3853 ICmpInst::Predicate Pred) {
3854 switch (Pred) {
3855 case ICmpInst::ICMP_EQ:
3856 return KnownBits::eq(LHS, RHS);
3857 case ICmpInst::ICMP_NE:
3858 return KnownBits::ne(LHS, RHS);
3859 case ICmpInst::ICMP_UGE:
3860 return KnownBits::uge(LHS, RHS);
3861 case ICmpInst::ICMP_UGT:
3862 return KnownBits::ugt(LHS, RHS);
3863 case ICmpInst::ICMP_ULE:
3864 return KnownBits::ule(LHS, RHS);
3865 case ICmpInst::ICMP_ULT:
3866 return KnownBits::ult(LHS, RHS);
3867 case ICmpInst::ICMP_SGE:
3868 return KnownBits::sge(LHS, RHS);
3869 case ICmpInst::ICMP_SGT:
3870 return KnownBits::sgt(LHS, RHS);
3871 case ICmpInst::ICMP_SLE:
3872 return KnownBits::sle(LHS, RHS);
3873 case ICmpInst::ICMP_SLT:
3874 return KnownBits::slt(LHS, RHS);
3875 default:
3876 llvm_unreachable("Unexpected non-integer predicate.");
3877 }
3878}
3879
3882 return pred;
3883 if (isSigned(pred))
3884 return getUnsignedPredicate(pred);
3885 if (isUnsigned(pred))
3886 return getSignedPredicate(pred);
3887
3888 llvm_unreachable("Unknown predicate!");
3889}
3890
3892 switch (predicate) {
3893 default: return false;
3896 case FCmpInst::FCMP_ORD: return true;
3897 }
3898}
3899
3901 switch (predicate) {
3902 default: return false;
3905 case FCmpInst::FCMP_UNO: return true;
3906 }
3907}
3908
3910 switch(predicate) {
3911 default: return false;
3912 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3913 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3914 }
3915}
3916
3918 switch(predicate) {
3919 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3920 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3921 default: return false;
3922 }
3923}
3924
3926 // If the predicates match, then we know the first condition implies the
3927 // second is true.
3928 if (CmpPredicate::getMatching(Pred1, Pred2))
3929 return true;
3930
3931 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3933 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3935
3936 switch (Pred1) {
3937 default:
3938 break;
3939 case CmpInst::ICMP_EQ:
3940 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3941 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3942 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3943 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3944 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3945 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3946 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3947 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3948 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3949 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3950 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3951 }
3952 return false;
3953}
3954
3956 CmpPredicate Pred2) {
3957 return isImpliedTrueByMatchingCmp(Pred1,
3959}
3960
3962 CmpPredicate Pred2) {
3963 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
3964 return true;
3965 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
3966 return false;
3967 return std::nullopt;
3968}
3969
3970//===----------------------------------------------------------------------===//
3971// CmpPredicate Implementation
3972//===----------------------------------------------------------------------===//
3973
3974std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
3975 CmpPredicate B) {
3976 if (A.Pred == B.Pred)
3977 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
3979 return {};
3980 if (A.HasSameSign &&
3982 return B.Pred;
3983 if (B.HasSameSign &&
3985 return A.Pred;
3986 return {};
3987}
3988
3990 return HasSameSign ? ICmpInst::getSignedPredicate(Pred) : Pred;
3991}
3992
3994 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
3995 return ICI->getCmpPredicate();
3996 return Cmp->getPredicate();
3997}
3998
4000 return {CmpInst::getSwappedPredicate(P), P.hasSameSign()};
4001}
4002
4004 return getSwapped(get(Cmp));
4005}
4006
4007//===----------------------------------------------------------------------===//
4008// SwitchInst Implementation
4009//===----------------------------------------------------------------------===//
4010
4011void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4012 assert(Value && Default && NumReserved);
4013 ReservedSpace = NumReserved;
4015 allocHungoffUses(ReservedSpace);
4016
4017 Op<0>() = Value;
4018 Op<1>() = Default;
4019}
4020
4021/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4022/// switch on and a default destination. The number of additional cases can
4023/// be specified here to make memory allocation more efficient. This
4024/// constructor can also autoinsert before another instruction.
4025SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4026 InsertPosition InsertBefore)
4027 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4028 AllocMarker, InsertBefore) {
4029 init(Value, Default, 2+NumCases*2);
4030}
4031
4032SwitchInst::SwitchInst(const SwitchInst &SI)
4033 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4034 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4035 setNumHungOffUseOperands(SI.getNumOperands());
4036 Use *OL = getOperandList();
4037 const Use *InOL = SI.getOperandList();
4038 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4039 OL[i] = InOL[i];
4040 OL[i+1] = InOL[i+1];
4041 }
4042 SubclassOptionalData = SI.SubclassOptionalData;
4043}
4044
4045/// addCase - Add an entry to the switch instruction...
4046///
4048 unsigned NewCaseIdx = getNumCases();
4049 unsigned OpNo = getNumOperands();
4050 if (OpNo+2 > ReservedSpace)
4051 growOperands(); // Get more space!
4052 // Initialize some new operands.
4053 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4055 CaseHandle Case(this, NewCaseIdx);
4056 Case.setValue(OnVal);
4057 Case.setSuccessor(Dest);
4058}
4059
4060/// removeCase - This method removes the specified case and its successor
4061/// from the switch instruction.
4063 unsigned idx = I->getCaseIndex();
4064
4065 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4066
4067 unsigned NumOps = getNumOperands();
4068 Use *OL = getOperandList();
4069
4070 // Overwrite this case with the end of the list.
4071 if (2 + (idx + 1) * 2 != NumOps) {
4072 OL[2 + idx * 2] = OL[NumOps - 2];
4073 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4074 }
4075
4076 // Nuke the last value.
4077 OL[NumOps-2].set(nullptr);
4078 OL[NumOps-2+1].set(nullptr);
4079 setNumHungOffUseOperands(NumOps-2);
4080
4081 return CaseIt(this, idx);
4082}
4083
4084/// growOperands - grow operands - This grows the operand list in response
4085/// to a push_back style of operation. This grows the number of ops by 3 times.
4086///
4087void SwitchInst::growOperands() {
4088 unsigned e = getNumOperands();
4089 unsigned NumOps = e*3;
4090
4091 ReservedSpace = NumOps;
4092 growHungoffUses(ReservedSpace);
4093}
4094
4096 assert(Changed && "called only if metadata has changed");
4097
4098 if (!Weights)
4099 return nullptr;
4100
4101 assert(SI.getNumSuccessors() == Weights->size() &&
4102 "num of prof branch_weights must accord with num of successors");
4103
4104 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4105
4106 if (AllZeroes || Weights->size() < 2)
4107 return nullptr;
4108
4109 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4110}
4111
4113 MDNode *ProfileData = getBranchWeightMDNode(SI);
4114 if (!ProfileData)
4115 return;
4116
4117 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4118 llvm_unreachable("number of prof branch_weights metadata operands does "
4119 "not correspond to number of succesors");
4120 }
4121
4123 if (!extractBranchWeights(ProfileData, Weights))
4124 return;
4125 this->Weights = std::move(Weights);
4126}
4127
4130 if (Weights) {
4131 assert(SI.getNumSuccessors() == Weights->size() &&
4132 "num of prof branch_weights must accord with num of successors");
4133 Changed = true;
4134 // Copy the last case to the place of the removed one and shrink.
4135 // This is tightly coupled with the way SwitchInst::removeCase() removes
4136 // the cases in SwitchInst::removeCase(CaseIt).
4137 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4138 Weights->pop_back();
4139 }
4140 return SI.removeCase(I);
4141}
4142
4144 ConstantInt *OnVal, BasicBlock *Dest,
4146 SI.addCase(OnVal, Dest);
4147
4148 if (!Weights && W && *W) {
4149 Changed = true;
4150 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4151 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4152 } else if (Weights) {
4153 Changed = true;
4154 Weights->push_back(W.value_or(0));
4155 }
4156 if (Weights)
4157 assert(SI.getNumSuccessors() == Weights->size() &&
4158 "num of prof branch_weights must accord with num of successors");
4159}
4160
4163 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4164 Changed = false;
4165 if (Weights)
4166 Weights->resize(0);
4167 return SI.eraseFromParent();
4168}
4169
4172 if (!Weights)
4173 return std::nullopt;
4174 return (*Weights)[idx];
4175}
4176
4179 if (!W)
4180 return;
4181
4182 if (!Weights && *W)
4183 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4184
4185 if (Weights) {
4186 auto &OldW = (*Weights)[idx];
4187 if (*W != OldW) {
4188 Changed = true;
4189 OldW = *W;
4190 }
4191 }
4192}
4193
4196 unsigned idx) {
4197 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4198 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4199 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4200 ->getValue()
4201 .getZExtValue();
4202
4203 return std::nullopt;
4204}
4205
4206//===----------------------------------------------------------------------===//
4207// IndirectBrInst Implementation
4208//===----------------------------------------------------------------------===//
4209
4210void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4211 assert(Address && Address->getType()->isPointerTy() &&
4212 "Address of indirectbr must be a pointer");
4213 ReservedSpace = 1+NumDests;
4215 allocHungoffUses(ReservedSpace);
4216
4217 Op<0>() = Address;
4218}
4219
4220
4221/// growOperands - grow operands - This grows the operand list in response
4222/// to a push_back style of operation. This grows the number of ops by 2 times.
4223///
4224void IndirectBrInst::growOperands() {
4225 unsigned e = getNumOperands();
4226 unsigned NumOps = e*2;
4227
4228 ReservedSpace = NumOps;
4229 growHungoffUses(ReservedSpace);
4230}
4231
4232IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4233 InsertPosition InsertBefore)
4234 : Instruction(Type::getVoidTy(Address->getContext()),
4235 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4236 init(Address, NumCases);
4237}
4238
4239IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4240 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4241 AllocMarker) {
4242 NumUserOperands = IBI.NumUserOperands;
4243 allocHungoffUses(IBI.getNumOperands());
4244 Use *OL = getOperandList();
4245 const Use *InOL = IBI.getOperandList();
4246 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4247 OL[i] = InOL[i];
4248 SubclassOptionalData = IBI.SubclassOptionalData;
4249}
4250
4251/// addDestination - Add a destination.
4252///
4254 unsigned OpNo = getNumOperands();
4255 if (OpNo+1 > ReservedSpace)
4256 growOperands(); // Get more space!
4257 // Initialize some new operands.
4258 assert(OpNo < ReservedSpace && "Growing didn't work!");
4260 getOperandList()[OpNo] = DestBB;
4261}
4262
4263/// removeDestination - This method removes the specified successor from the
4264/// indirectbr instruction.
4266 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4267
4268 unsigned NumOps = getNumOperands();
4269 Use *OL = getOperandList();
4270
4271 // Replace this value with the last one.
4272 OL[idx+1] = OL[NumOps-1];
4273
4274 // Nuke the last value.
4275 OL[NumOps-1].set(nullptr);
4276 setNumHungOffUseOperands(NumOps-1);
4277}
4278
4279//===----------------------------------------------------------------------===//
4280// FreezeInst Implementation
4281//===----------------------------------------------------------------------===//
4282
4284 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4285 setName(Name);
4286}
4287
4288//===----------------------------------------------------------------------===//
4289// cloneImpl() implementations
4290//===----------------------------------------------------------------------===//
4291
4292// Define these methods here so vtables don't get emitted into every translation
4293// unit that uses these classes.
4294
4297 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4298}
4299
4301 return Create(getOpcode(), Op<0>());
4302}
4303
4305 return Create(getOpcode(), Op<0>(), Op<1>());
4306}
4307
4309 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4310}
4311
4313 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4314}
4315
4317 return new ExtractValueInst(*this);
4318}
4319
4321 return new InsertValueInst(*this);
4322}
4323
4326 getOperand(0), getAlign());
4327 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4328 Result->setSwiftError(isSwiftError());
4329 return Result;
4330}
4331
4333 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4335}
4336
4338 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4340}
4341
4346 Result->setVolatile(isVolatile());
4347 Result->setWeak(isWeak());
4348 return Result;
4349}
4350
4352 AtomicRMWInst *Result =
4355 Result->setVolatile(isVolatile());
4356 return Result;
4357}
4358
4360 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4361}
4362
4364 return new TruncInst(getOperand(0), getType());
4365}
4366
4368 return new ZExtInst(getOperand(0), getType());
4369}
4370
4372 return new SExtInst(getOperand(0), getType());
4373}
4374
4376 return new FPTruncInst(getOperand(0), getType());
4377}
4378
4380 return new FPExtInst(getOperand(0), getType());
4381}
4382
4384 return new UIToFPInst(getOperand(0), getType());
4385}
4386
4388 return new SIToFPInst(getOperand(0), getType());
4389}
4390
4392 return new FPToUIInst(getOperand(0), getType());
4393}
4394
4396 return new FPToSIInst(getOperand(0), getType());
4397}
4398
4400 return new PtrToIntInst(getOperand(0), getType());
4401}
4402
4404 return new IntToPtrInst(getOperand(0), getType());
4405}
4406
4408 return new BitCastInst(getOperand(0), getType());
4409}
4410
4412 return new AddrSpaceCastInst(getOperand(0), getType());
4413}
4414
4416 if (hasOperandBundles()) {
4420 return new (AllocMarker) CallInst(*this, AllocMarker);
4421 }
4423 return new (AllocMarker) CallInst(*this, AllocMarker);
4424}
4425
4428}
4429
4431 return new VAArgInst(getOperand(0), getType());
4432}
4433
4436}
4437
4440}
4441
4444}
4445
4446PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4447
4449 return new LandingPadInst(*this);
4450}
4451
4454 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4455}
4456
4459 return new (AllocMarker) BranchInst(*this, AllocMarker);
4460}
4461
4462SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4463
4465 return new IndirectBrInst(*this);
4466}
4467
4469 if (hasOperandBundles()) {
4473 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4474 }
4476 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4477}
4478
4480 if (hasOperandBundles()) {
4484 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4485 }
4487 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4488}
4489
4491 return new (AllocMarker) ResumeInst(*this);
4492}
4493
4496 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4497}
4498
4500 return new (AllocMarker) CatchReturnInst(*this);
4501}
4502
4504 return new CatchSwitchInst(*this);
4505}
4506
4509 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4510}
4511
4513 LLVMContext &Context = getContext();
4514 return new UnreachableInst(Context);
4515}
4516
4518 return new FreezeInst(getOperand(0));
4519}
static const LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
hexagon gen pred
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:5553
Class for arbitrary precision integers.
Definition: APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1330
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1618
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1577
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:200
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:139
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:104
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:128
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:157
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
iterator begin() const
Definition: ArrayRef.h:156
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:198
Class to represent array types.
Definition: DerivedTypes.h:395
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:555
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:594
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:544
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:562
void setAlignment(Align Align)
Definition: Instructions.h:548
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:620
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:827
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:837
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
@ Add
*p = old + v
Definition: Instructions.h:720
@ FAdd
*p = old + v
Definition: Instructions.h:741
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:764
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:734
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:768
@ FSub
*p = old - v
Definition: Instructions.h:744
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:756
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:732
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:738
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:752
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:736
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:748
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:760
@ Nand
*p = ~(old & v)
Definition: Instructions.h:726
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:866
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:852
void setOperation(BinOp Operation)
Definition: Instructions.h:821
BinOp getOperation() const
Definition: Instructions.h:805
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:861
void setAlignment(Align Align)
Definition: Instructions.h:831
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:834
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
CaptureInfo getCaptureInfo() const
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:506
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:281
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:209
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:593
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:220
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1408
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
Definition: InstrTypes.h:1580
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1403
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2220
bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1474
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2028
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
Definition: InstrTypes.h:2165
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2059
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1341
void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
Definition: InstrTypes.h:1428
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1573
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1972
uint64_t getParamDereferenceableBytes(unsigned i) const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1825
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1399
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2237
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1261
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1685
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2253
void setOnlyReadsMemory()
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
Value * getCalledOperand() const
Definition: InstrTypes.h:1334
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1126
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2133
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
FunctionType * FTy
Definition: InstrTypes.h:1127
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1816
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1267
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1199
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2288
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1377
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1284
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:1977
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition: ModRef.h:318
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition: ModRef.h:331
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition: ModRef.h:334
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:608
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:856
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:913
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:766
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:946
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:690
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:702
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:703
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:679
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:688
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:677
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:678
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:697
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:700
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:687
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:681
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:684
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:685
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:680
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:682
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:701
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:686
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:675
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition: InstrTypes.h:928
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:940
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:869
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition: InstrTypes.h:850
bool isFPPredicate() const
Definition: InstrTypes.h:780
void swapOperands()
This is just a convenience that dispatches to the subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
bool isStrictPredicate() const
Definition: InstrTypes.h:841
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:891
bool isIntPredicate() const
Definition: InstrTypes.h:781
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition: InstrTypes.h:934
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:924
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
static std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
Definition: CmpPredicate.h:28
static CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:42
static CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:460
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:455
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:563
unsigned getNumElements() const
Definition: DerivedTypes.h:606
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2366
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2365
FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:105
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:137
bool isVarArg() const
Definition: DerivedTypes.h:125
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition: Instruction.h:61
BasicBlock * getBasicBlock()
Definition: Instruction.h:62
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:1063
bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:511
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:94
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:310
This class represents a cast from an integer to a pointer.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:69
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
An instruction for reading from memory.
Definition: Instructions.h:176
void setAlignment(Align Align)
Definition: Instructions.h:215
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:241
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:208
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:198
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1065
StringRef getTag() const
Definition: InstrTypes.h:1088
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:670
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:703
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:610
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:298
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:704
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:342
Align getAlign() const
Definition: Instructions.h:333
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:328
void setAlignment(Align Align)
Definition: Instructions.h:337
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:353
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:325
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:364
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Class to represent struct types.
Definition: DerivedTypes.h:218
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition: TypeSize.h:342
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition: Type.h:289
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:303
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:267
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:225
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition: InstrTypes.h:153
This function has undefined behavior.
UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:886
const Use * getOperandList() const
Definition: User.h:221
op_range operands()
Definition: User.h:288
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:280
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:261
Use & Op()
Definition: User.h:192
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
unsigned NumUserOperands
Definition: Value.h:108
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:665
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
size_type size() const
Definition: DenseSet.h:81
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:193
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
Definition: PatternMatch.h:801
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:262
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:292
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1187
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:256
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1778
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:303
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1841
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2087
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:292
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2144
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2155
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2151
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
Definition: KnownBits.cpp:488
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
Definition: KnownBits.cpp:496
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
Definition: KnownBits.cpp:536
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
Definition: KnownBits.cpp:502
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
Definition: KnownBits.cpp:542
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
Definition: KnownBits.cpp:518
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
Definition: KnownBits.cpp:522
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
Definition: KnownBits.cpp:546
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
Definition: KnownBits.cpp:526
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Definition: KnownBits.cpp:512
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Indicates this User has operands co-allocated.
Definition: User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition: User.h:66