LLVM 22.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanHelpers.h"
18#include "VPlanPatternMatch.h"
19#include "VPlanUtils.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Twine.h"
26#include "llvm/IR/BasicBlock.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/Instruction.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/Type.h"
32#include "llvm/IR/Value.h"
35#include "llvm/Support/Debug.h"
40#include <cassert>
41
42using namespace llvm;
43
45
46#define LV_NAME "loop-vectorize"
47#define DEBUG_TYPE LV_NAME
48
50 switch (getVPDefID()) {
51 case VPExpressionSC:
52 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
53 case VPInstructionSC:
54 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
55 case VPInterleaveEVLSC:
56 case VPInterleaveSC:
57 return cast<VPInterleaveBase>(this)->getNumStoreOperands() > 0;
58 case VPWidenStoreEVLSC:
59 case VPWidenStoreSC:
60 return true;
61 case VPReplicateSC:
62 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
63 ->mayWriteToMemory();
64 case VPWidenCallSC:
65 return !cast<VPWidenCallRecipe>(this)
66 ->getCalledScalarFunction()
67 ->onlyReadsMemory();
68 case VPWidenIntrinsicSC:
69 return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
70 case VPCanonicalIVPHISC:
71 case VPBranchOnMaskSC:
72 case VPFirstOrderRecurrencePHISC:
73 case VPReductionPHISC:
74 case VPScalarIVStepsSC:
75 case VPPredInstPHISC:
76 return false;
77 case VPBlendSC:
78 case VPReductionEVLSC:
79 case VPReductionSC:
80 case VPVectorPointerSC:
81 case VPWidenCanonicalIVSC:
82 case VPWidenCastSC:
83 case VPWidenGEPSC:
84 case VPWidenIntOrFpInductionSC:
85 case VPWidenLoadEVLSC:
86 case VPWidenLoadSC:
87 case VPWidenPHISC:
88 case VPWidenSC:
89 case VPWidenSelectSC: {
90 const Instruction *I =
91 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
92 (void)I;
93 assert((!I || !I->mayWriteToMemory()) &&
94 "underlying instruction may write to memory");
95 return false;
96 }
97 default:
98 return true;
99 }
100}
101
103 switch (getVPDefID()) {
104 case VPExpressionSC:
105 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
106 case VPInstructionSC:
107 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
108 case VPWidenLoadEVLSC:
109 case VPWidenLoadSC:
110 return true;
111 case VPReplicateSC:
112 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
113 ->mayReadFromMemory();
114 case VPWidenCallSC:
115 return !cast<VPWidenCallRecipe>(this)
116 ->getCalledScalarFunction()
117 ->onlyWritesMemory();
118 case VPWidenIntrinsicSC:
119 return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
120 case VPBranchOnMaskSC:
121 case VPFirstOrderRecurrencePHISC:
122 case VPPredInstPHISC:
123 case VPScalarIVStepsSC:
124 case VPWidenStoreEVLSC:
125 case VPWidenStoreSC:
126 return false;
127 case VPBlendSC:
128 case VPReductionEVLSC:
129 case VPReductionSC:
130 case VPVectorPointerSC:
131 case VPWidenCanonicalIVSC:
132 case VPWidenCastSC:
133 case VPWidenGEPSC:
134 case VPWidenIntOrFpInductionSC:
135 case VPWidenPHISC:
136 case VPWidenSC:
137 case VPWidenSelectSC: {
138 const Instruction *I =
139 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
140 (void)I;
141 assert((!I || !I->mayReadFromMemory()) &&
142 "underlying instruction may read from memory");
143 return false;
144 }
145 default:
146 // FIXME: Return false if the recipe represents an interleaved store.
147 return true;
148 }
149}
150
152 switch (getVPDefID()) {
153 case VPExpressionSC:
154 return cast<VPExpressionRecipe>(this)->mayHaveSideEffects();
155 case VPDerivedIVSC:
156 case VPFirstOrderRecurrencePHISC:
157 case VPPredInstPHISC:
158 case VPVectorEndPointerSC:
159 return false;
160 case VPInstructionSC:
161 return mayWriteToMemory();
162 case VPWidenCallSC: {
163 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
164 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
165 }
166 case VPWidenIntrinsicSC:
167 return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
168 case VPBlendSC:
169 case VPReductionEVLSC:
170 case VPReductionSC:
171 case VPScalarIVStepsSC:
172 case VPVectorPointerSC:
173 case VPWidenCanonicalIVSC:
174 case VPWidenCastSC:
175 case VPWidenGEPSC:
176 case VPWidenIntOrFpInductionSC:
177 case VPWidenPHISC:
178 case VPWidenPointerInductionSC:
179 case VPWidenSC:
180 case VPWidenSelectSC: {
181 const Instruction *I =
182 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
183 (void)I;
184 assert((!I || !I->mayHaveSideEffects()) &&
185 "underlying instruction has side-effects");
186 return false;
187 }
188 case VPInterleaveEVLSC:
189 case VPInterleaveSC:
190 return mayWriteToMemory();
191 case VPWidenLoadEVLSC:
192 case VPWidenLoadSC:
193 case VPWidenStoreEVLSC:
194 case VPWidenStoreSC:
195 assert(
196 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
198 "mayHaveSideffects result for ingredient differs from this "
199 "implementation");
200 return mayWriteToMemory();
201 case VPReplicateSC: {
202 auto *R = cast<VPReplicateRecipe>(this);
203 return R->getUnderlyingInstr()->mayHaveSideEffects();
204 }
205 default:
206 return true;
207 }
208}
209
211 assert(!Parent && "Recipe already in some VPBasicBlock");
212 assert(InsertPos->getParent() &&
213 "Insertion position not in any VPBasicBlock");
214 InsertPos->getParent()->insert(this, InsertPos->getIterator());
215}
216
219 assert(!Parent && "Recipe already in some VPBasicBlock");
220 assert(I == BB.end() || I->getParent() == &BB);
221 BB.insert(this, I);
222}
223
225 assert(!Parent && "Recipe already in some VPBasicBlock");
226 assert(InsertPos->getParent() &&
227 "Insertion position not in any VPBasicBlock");
228 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
229}
230
232 assert(getParent() && "Recipe not in any VPBasicBlock");
234 Parent = nullptr;
235}
236
238 assert(getParent() && "Recipe not in any VPBasicBlock");
240}
241
244 insertAfter(InsertPos);
245}
246
250 insertBefore(BB, I);
251}
252
254 // Get the underlying instruction for the recipe, if there is one. It is used
255 // to
256 // * decide if cost computation should be skipped for this recipe,
257 // * apply forced target instruction cost.
258 Instruction *UI = nullptr;
259 if (auto *S = dyn_cast<VPSingleDefRecipe>(this))
260 UI = dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
261 else if (auto *IG = dyn_cast<VPInterleaveBase>(this))
262 UI = IG->getInsertPos();
263 else if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(this))
264 UI = &WidenMem->getIngredient();
265
266 InstructionCost RecipeCost;
267 if (UI && Ctx.skipCostComputation(UI, VF.isVector())) {
268 RecipeCost = 0;
269 } else {
270 RecipeCost = computeCost(VF, Ctx);
271 if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
272 RecipeCost.isValid())
274 }
275
276 LLVM_DEBUG({
277 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
278 dump();
279 });
280 return RecipeCost;
281}
282
284 VPCostContext &Ctx) const {
285 llvm_unreachable("subclasses should implement computeCost");
286}
287
289 return (getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC) ||
290 isa<VPPhi, VPIRPhi>(this);
291}
292
294 auto *VPI = dyn_cast<VPInstruction>(this);
295 return VPI && Instruction::isCast(VPI->getOpcode());
296}
297
300 VPCostContext &Ctx) const {
301 std::optional<unsigned> Opcode;
302 VPValue *Op = getOperand(0);
303 VPRecipeBase *OpR = Op->getDefiningRecipe();
304
305 // If the partial reduction is predicated, a select will be operand 0
306 using namespace llvm::VPlanPatternMatch;
307 if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) {
308 OpR = Op->getDefiningRecipe();
309 }
310
311 Type *InputTypeA = nullptr, *InputTypeB = nullptr;
313 ExtBType = TTI::PR_None;
314
315 auto GetExtendKind = [](VPRecipeBase *R) {
316 if (!R)
317 return TTI::PR_None;
318 auto *WidenCastR = dyn_cast<VPWidenCastRecipe>(R);
319 if (!WidenCastR)
320 return TTI::PR_None;
321 if (WidenCastR->getOpcode() == Instruction::CastOps::ZExt)
322 return TTI::PR_ZeroExtend;
323 if (WidenCastR->getOpcode() == Instruction::CastOps::SExt)
324 return TTI::PR_SignExtend;
325 return TTI::PR_None;
326 };
327
328 // Pick out opcode, type/ext information and use sub side effects from a widen
329 // recipe.
330 auto HandleWiden = [&](VPWidenRecipe *Widen) {
331 if (match(Widen, m_Sub(m_SpecificInt(0), m_VPValue(Op)))) {
332 Widen = dyn_cast<VPWidenRecipe>(Op->getDefiningRecipe());
333 }
334 Opcode = Widen->getOpcode();
335 VPRecipeBase *ExtAR = Widen->getOperand(0)->getDefiningRecipe();
336 VPRecipeBase *ExtBR = Widen->getOperand(1)->getDefiningRecipe();
337 InputTypeA = Ctx.Types.inferScalarType(ExtAR ? ExtAR->getOperand(0)
338 : Widen->getOperand(0));
339 InputTypeB = Ctx.Types.inferScalarType(ExtBR ? ExtBR->getOperand(0)
340 : Widen->getOperand(1));
341 ExtAType = GetExtendKind(ExtAR);
342 ExtBType = GetExtendKind(ExtBR);
343 };
344
345 if (isa<VPWidenCastRecipe>(OpR)) {
346 InputTypeA = Ctx.Types.inferScalarType(OpR->getOperand(0));
347 ExtAType = GetExtendKind(OpR);
348 } else if (isa<VPReductionPHIRecipe>(OpR)) {
349 auto RedPhiOp1R = getOperand(1)->getDefiningRecipe();
350 if (isa<VPWidenCastRecipe>(RedPhiOp1R)) {
351 InputTypeA = Ctx.Types.inferScalarType(RedPhiOp1R->getOperand(0));
352 ExtAType = GetExtendKind(RedPhiOp1R);
353 } else if (auto Widen = dyn_cast<VPWidenRecipe>(RedPhiOp1R))
354 HandleWiden(Widen);
355 } else if (auto Widen = dyn_cast<VPWidenRecipe>(OpR)) {
356 HandleWiden(Widen);
357 } else if (auto Reduction = dyn_cast<VPPartialReductionRecipe>(OpR)) {
358 return Reduction->computeCost(VF, Ctx);
359 }
360 auto *PhiType = Ctx.Types.inferScalarType(getOperand(1));
361 return Ctx.TTI.getPartialReductionCost(getOpcode(), InputTypeA, InputTypeB,
362 PhiType, VF, ExtAType, ExtBType,
363 Opcode, Ctx.CostKind);
364}
365
367 auto &Builder = State.Builder;
368
369 assert(getOpcode() == Instruction::Add &&
370 "Unhandled partial reduction opcode");
371
372 Value *BinOpVal = State.get(getOperand(1));
373 Value *PhiVal = State.get(getOperand(0));
374 assert(PhiVal && BinOpVal && "Phi and Mul must be set");
375
376 Type *RetTy = PhiVal->getType();
377
378 CallInst *V = Builder.CreateIntrinsic(
379 RetTy, Intrinsic::experimental_vector_partial_reduce_add,
380 {PhiVal, BinOpVal}, nullptr, "partial.reduce");
381
382 State.set(this, V);
383}
384
385#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
387 VPSlotTracker &SlotTracker) const {
388 O << Indent << "PARTIAL-REDUCE ";
390 O << " = " << Instruction::getOpcodeName(getOpcode()) << " ";
392}
393#endif
394
396 assert(OpType == OperationType::FPMathOp &&
397 "recipe doesn't have fast math flags");
398 FastMathFlags Res;
399 Res.setAllowReassoc(FMFs.AllowReassoc);
400 Res.setNoNaNs(FMFs.NoNaNs);
401 Res.setNoInfs(FMFs.NoInfs);
402 Res.setNoSignedZeros(FMFs.NoSignedZeros);
403 Res.setAllowReciprocal(FMFs.AllowReciprocal);
404 Res.setAllowContract(FMFs.AllowContract);
405 Res.setApproxFunc(FMFs.ApproxFunc);
406 return Res;
407}
408
409#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
411#endif
412
413template <unsigned PartOpIdx>
414VPValue *
416 if (U.getNumOperands() == PartOpIdx + 1)
417 return U.getOperand(PartOpIdx);
418 return nullptr;
419}
420
421template <unsigned PartOpIdx>
423 if (auto *UnrollPartOp = getUnrollPartOperand(U))
424 return cast<ConstantInt>(UnrollPartOp->getLiveInIRValue())->getZExtValue();
425 return 0;
426}
427
428namespace llvm {
429template class VPUnrollPartAccessor<1>;
430template class VPUnrollPartAccessor<2>;
431template class VPUnrollPartAccessor<3>;
432}
433
435 const VPIRFlags &Flags, DebugLoc DL,
436 const Twine &Name)
437 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, Flags, DL),
438 VPIRMetadata(), Opcode(Opcode), Name(Name.str()) {
440 "Set flags not supported for the provided opcode");
441 assert((getNumOperandsForOpcode(Opcode) == -1u ||
442 getNumOperandsForOpcode(Opcode) == getNumOperands()) &&
443 "number of operands does not match opcode");
444}
445
446#ifndef NDEBUG
447unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) {
448 if (Instruction::isUnaryOp(Opcode) || Instruction::isCast(Opcode))
449 return 1;
450
451 if (Instruction::isBinaryOp(Opcode))
452 return 2;
453
454 switch (Opcode) {
457 return 0;
458 case Instruction::Alloca:
459 case Instruction::ExtractValue:
460 case Instruction::Freeze:
461 case Instruction::Load:
473 return 1;
474 case Instruction::ICmp:
475 case Instruction::FCmp:
476 case Instruction::Store:
484 return 2;
485 case Instruction::Select:
489 return 3;
491 return 4;
492 case Instruction::Call:
493 case Instruction::GetElementPtr:
494 case Instruction::PHI:
495 case Instruction::Switch:
496 // Cannot determine the number of operands from the opcode.
497 return -1u;
498 }
499 llvm_unreachable("all cases should be handled above");
500}
501#endif
502
503bool VPInstruction::doesGeneratePerAllLanes() const {
504 return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
505}
506
507bool VPInstruction::canGenerateScalarForFirstLane() const {
509 return true;
511 return true;
512 switch (Opcode) {
513 case Instruction::Freeze:
514 case Instruction::ICmp:
515 case Instruction::PHI:
516 case Instruction::Select:
525 return true;
526 default:
527 return false;
528 }
529}
530
531Value *VPInstruction::generatePerLane(VPTransformState &State,
532 const VPLane &Lane) {
533 IRBuilderBase &Builder = State.Builder;
534
536 "only PtrAdd opcodes are supported for now");
537 return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
538 State.get(getOperand(1), Lane), Name);
539}
540
541/// Create a conditional branch using \p Cond branching to the successors of \p
542/// VPBB. Note that the first successor is always forward (i.e. not created yet)
543/// while the second successor may already have been created (if it is a header
544/// block and VPBB is a latch).
546 VPTransformState &State) {
547 // Replace the temporary unreachable terminator with a new conditional
548 // branch, hooking it up to backward destination (header) for latch blocks
549 // now, and to forward destination(s) later when they are created.
550 // Second successor may be backwards - iff it is already in VPBB2IRBB.
551 VPBasicBlock *SecondVPSucc = cast<VPBasicBlock>(VPBB->getSuccessors()[1]);
552 BasicBlock *SecondIRSucc = State.CFG.VPBB2IRBB.lookup(SecondVPSucc);
553 BasicBlock *IRBB = State.CFG.VPBB2IRBB[VPBB];
554 BranchInst *CondBr = State.Builder.CreateCondBr(Cond, IRBB, SecondIRSucc);
555 // First successor is always forward, reset it to nullptr
556 CondBr->setSuccessor(0, nullptr);
558 return CondBr;
559}
560
561Value *VPInstruction::generate(VPTransformState &State) {
562 IRBuilderBase &Builder = State.Builder;
563
565 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
566 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
567 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
568 auto *Res =
569 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
570 if (auto *I = dyn_cast<Instruction>(Res))
571 applyFlags(*I);
572 return Res;
573 }
574
575 switch (getOpcode()) {
576 case VPInstruction::Not: {
577 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
578 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
579 return Builder.CreateNot(A, Name);
580 }
581 case Instruction::ExtractElement: {
582 assert(State.VF.isVector() && "Only extract elements from vectors");
583 if (getOperand(1)->isLiveIn()) {
584 unsigned IdxToExtract =
585 cast<ConstantInt>(getOperand(1)->getLiveInIRValue())->getZExtValue();
586 return State.get(getOperand(0), VPLane(IdxToExtract));
587 }
588 Value *Vec = State.get(getOperand(0));
589 Value *Idx = State.get(getOperand(1), /*IsScalar=*/true);
590 return Builder.CreateExtractElement(Vec, Idx, Name);
591 }
592 case Instruction::Freeze: {
594 return Builder.CreateFreeze(Op, Name);
595 }
596 case Instruction::FCmp:
597 case Instruction::ICmp: {
598 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
599 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
600 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
601 return Builder.CreateCmp(getPredicate(), A, B, Name);
602 }
603 case Instruction::PHI: {
604 llvm_unreachable("should be handled by VPPhi::execute");
605 }
606 case Instruction::Select: {
607 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
608 Value *Cond = State.get(getOperand(0), OnlyFirstLaneUsed);
609 Value *Op1 = State.get(getOperand(1), OnlyFirstLaneUsed);
610 Value *Op2 = State.get(getOperand(2), OnlyFirstLaneUsed);
611 return Builder.CreateSelect(Cond, Op1, Op2, Name);
612 }
614 // Get first lane of vector induction variable.
615 Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
616 // Get the original loop tripcount.
617 Value *ScalarTC = State.get(getOperand(1), VPLane(0));
618
619 // If this part of the active lane mask is scalar, generate the CMP directly
620 // to avoid unnecessary extracts.
621 if (State.VF.isScalar())
622 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
623 Name);
624
625 auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
626 auto PredTy = VectorType::get(
627 Int1Ty, State.VF * cast<ConstantInt>(getOperand(2)->getLiveInIRValue())
628 ->getZExtValue());
629 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
630 {PredTy, ScalarTC->getType()},
631 {VIVElem0, ScalarTC}, nullptr, Name);
632 }
634 // Generate code to combine the previous and current values in vector v3.
635 //
636 // vector.ph:
637 // v_init = vector(..., ..., ..., a[-1])
638 // br vector.body
639 //
640 // vector.body
641 // i = phi [0, vector.ph], [i+4, vector.body]
642 // v1 = phi [v_init, vector.ph], [v2, vector.body]
643 // v2 = a[i, i+1, i+2, i+3];
644 // v3 = vector(v1(3), v2(0, 1, 2))
645
646 auto *V1 = State.get(getOperand(0));
647 if (!V1->getType()->isVectorTy())
648 return V1;
649 Value *V2 = State.get(getOperand(1));
650 return Builder.CreateVectorSplice(V1, V2, -1, Name);
651 }
653 unsigned UF = getParent()->getPlan()->getUF();
654 Value *ScalarTC = State.get(getOperand(0), VPLane(0));
655 Value *Step = createStepForVF(Builder, ScalarTC->getType(), State.VF, UF);
656 Value *Sub = Builder.CreateSub(ScalarTC, Step);
657 Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
658 Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
659 return Builder.CreateSelect(Cmp, Sub, Zero);
660 }
662 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
663 // be outside of the main loop.
664 Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
665 // Compute EVL
666 assert(AVL->getType()->isIntegerTy() &&
667 "Requested vector length should be an integer.");
668
669 assert(State.VF.isScalable() && "Expected scalable vector factor.");
670 Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
671
672 Value *EVL = State.Builder.CreateIntrinsic(
673 State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
674 {AVL, VFArg, State.Builder.getTrue()});
675 return EVL;
676 }
678 unsigned Part = getUnrollPart(*this);
679 auto *IV = State.get(getOperand(0), VPLane(0));
680 assert(Part != 0 && "Must have a positive part");
681 // The canonical IV is incremented by the vectorization factor (num of
682 // SIMD elements) times the unroll part.
683 Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
684 return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
686 }
688 Value *Cond = State.get(getOperand(0), VPLane(0));
689 auto *Br = createCondBranch(Cond, getParent(), State);
690 applyMetadata(*Br);
691 return Br;
692 }
694 // First create the compare.
695 Value *IV = State.get(getOperand(0), /*IsScalar*/ true);
696 Value *TC = State.get(getOperand(1), /*IsScalar*/ true);
697 Value *Cond = Builder.CreateICmpEQ(IV, TC);
698 return createCondBranch(Cond, getParent(), State);
699 }
701 return Builder.CreateVectorSplat(
702 State.VF, State.get(getOperand(0), /*IsScalar*/ true), "broadcast");
703 }
705 // For struct types, we need to build a new 'wide' struct type, where each
706 // element is widened, i.e., we create a struct of vectors.
707 auto *StructTy =
708 cast<StructType>(State.TypeAnalysis.inferScalarType(getOperand(0)));
709 Value *Res = PoisonValue::get(toVectorizedTy(StructTy, State.VF));
710 for (const auto &[LaneIndex, Op] : enumerate(operands())) {
711 for (unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
712 FieldIndex++) {
713 Value *ScalarValue =
714 Builder.CreateExtractValue(State.get(Op, true), FieldIndex);
715 Value *VectorValue = Builder.CreateExtractValue(Res, FieldIndex);
716 VectorValue =
717 Builder.CreateInsertElement(VectorValue, ScalarValue, LaneIndex);
718 Res = Builder.CreateInsertValue(Res, VectorValue, FieldIndex);
719 }
720 }
721 return Res;
722 }
724 auto *ScalarTy = State.TypeAnalysis.inferScalarType(getOperand(0));
725 auto NumOfElements = ElementCount::getFixed(getNumOperands());
726 Value *Res = PoisonValue::get(toVectorizedTy(ScalarTy, NumOfElements));
727 for (const auto &[Idx, Op] : enumerate(operands()))
728 Res = State.Builder.CreateInsertElement(Res, State.get(Op, true),
729 State.Builder.getInt32(Idx));
730 return Res;
731 }
733 if (State.VF.isScalar())
734 return State.get(getOperand(0), true);
737 // If this start vector is scaled then it should produce a vector with fewer
738 // elements than the VF.
740 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue());
741 auto *Iden = Builder.CreateVectorSplat(VF, State.get(getOperand(1), true));
742 Constant *Zero = Builder.getInt32(0);
743 return Builder.CreateInsertElement(Iden, State.get(getOperand(0), true),
744 Zero);
745 }
747 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
748 // and will be removed by breaking up the recipe further.
749 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
750 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
751 Value *ReducedPartRdx = State.get(getOperand(2));
752 for (unsigned Idx = 3; Idx < getNumOperands(); ++Idx)
753 ReducedPartRdx = Builder.CreateBinOp(
756 State.get(getOperand(Idx)), ReducedPartRdx, "bin.rdx");
757 return createAnyOfReduction(Builder, ReducedPartRdx,
758 State.get(getOperand(1), VPLane(0)), OrigPhi);
759 }
761 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
762 // and will be removed by breaking up the recipe further.
763 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
764 // Get its reduction variable descriptor.
765 RecurKind RK = PhiR->getRecurrenceKind();
767 "Unexpected reduction kind");
768 assert(!PhiR->isInLoop() &&
769 "In-loop FindLastIV reduction is not supported yet");
770
771 // The recipe's operands are the reduction phi, the start value, the
772 // sentinel value, followed by one operand for each part of the reduction.
773 unsigned UF = getNumOperands() - 3;
774 Value *ReducedPartRdx = State.get(getOperand(3));
775 RecurKind MinMaxKind;
778 MinMaxKind = IsSigned ? RecurKind::SMax : RecurKind::UMax;
779 else
780 MinMaxKind = IsSigned ? RecurKind::SMin : RecurKind::UMin;
781 for (unsigned Part = 1; Part < UF; ++Part)
782 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
783 State.get(getOperand(3 + Part)));
784
785 Value *Start = State.get(getOperand(1), true);
787 return createFindLastIVReduction(Builder, ReducedPartRdx, RK, Start,
788 Sentinel);
789 }
791 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
792 // and will be removed by breaking up the recipe further.
793 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
794 // Get its reduction variable descriptor.
795
796 RecurKind RK = PhiR->getRecurrenceKind();
798 "should be handled by ComputeFindIVResult");
799
800 // The recipe's operands are the reduction phi, followed by one operand for
801 // each part of the reduction.
802 unsigned UF = getNumOperands() - 1;
803 VectorParts RdxParts(UF);
804 for (unsigned Part = 0; Part < UF; ++Part)
805 RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
806
808 if (hasFastMathFlags())
810
811 // Reduce all of the unrolled parts into a single vector.
812 Value *ReducedPartRdx = RdxParts[0];
813 if (PhiR->isOrdered()) {
814 ReducedPartRdx = RdxParts[UF - 1];
815 } else {
816 // Floating-point operations should have some FMF to enable the reduction.
817 for (unsigned Part = 1; Part < UF; ++Part) {
818 Value *RdxPart = RdxParts[Part];
820 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
821 else {
823 // For sub-recurrences, each UF's reduction variable is already
824 // negative, we need to do: reduce.add(-acc_uf0 + -acc_uf1)
825 if (RK == RecurKind::Sub)
826 Opcode = Instruction::Add;
827 else
828 Opcode =
830 ReducedPartRdx =
831 Builder.CreateBinOp(Opcode, RdxPart, ReducedPartRdx, "bin.rdx");
832 }
833 }
834 }
835
836 // Create the reduction after the loop. Note that inloop reductions create
837 // the target reduction in the loop using a Reduction recipe.
838 if (State.VF.isVector() && !PhiR->isInLoop()) {
839 // TODO: Support in-order reductions based on the recurrence descriptor.
840 // All ops in the reduction inherit fast-math-flags from the recurrence
841 // descriptor.
842 ReducedPartRdx = createSimpleReduction(Builder, ReducedPartRdx, RK);
843 }
844
845 return ReducedPartRdx;
846 }
849 unsigned Offset = getOpcode() == VPInstruction::ExtractLastElement ? 1 : 2;
850 Value *Res;
851 if (State.VF.isVector()) {
852 assert(Offset <= State.VF.getKnownMinValue() &&
853 "invalid offset to extract from");
854 // Extract lane VF - Offset from the operand.
855 Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
856 } else {
857 assert(Offset <= 1 && "invalid offset to extract from");
858 Res = State.get(getOperand(0));
859 }
860 if (isa<ExtractElementInst>(Res))
861 Res->setName(Name);
862 return Res;
863 }
865 Value *A = State.get(getOperand(0));
866 Value *B = State.get(getOperand(1));
867 return Builder.CreateLogicalAnd(A, B, Name);
868 }
871 "can only generate first lane for PtrAdd");
872 Value *Ptr = State.get(getOperand(0), VPLane(0));
873 Value *Addend = State.get(getOperand(1), VPLane(0));
874 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
875 }
877 Value *Ptr =
879 Value *Addend = State.get(getOperand(1));
880 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
881 }
883 Value *Res = Builder.CreateFreeze(State.get(getOperand(0)));
884 for (VPValue *Op : drop_begin(operands()))
885 Res = Builder.CreateOr(Res, Builder.CreateFreeze(State.get(Op)));
886 return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res);
887 }
889 Value *LaneToExtract = State.get(getOperand(0), true);
890 Type *IdxTy = State.TypeAnalysis.inferScalarType(getOperand(0));
891 Value *Res = nullptr;
892 Value *RuntimeVF = getRuntimeVF(State.Builder, IdxTy, State.VF);
893
894 for (unsigned Idx = 1; Idx != getNumOperands(); ++Idx) {
895 Value *VectorStart =
896 Builder.CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
897 Value *VectorIdx = Idx == 1
898 ? LaneToExtract
899 : Builder.CreateSub(LaneToExtract, VectorStart);
900 Value *Ext = State.VF.isScalar()
901 ? State.get(getOperand(Idx))
902 : Builder.CreateExtractElement(
903 State.get(getOperand(Idx)), VectorIdx);
904 if (Res) {
905 Value *Cmp = Builder.CreateICmpUGE(LaneToExtract, VectorStart);
906 Res = Builder.CreateSelect(Cmp, Ext, Res);
907 } else {
908 Res = Ext;
909 }
910 }
911 return Res;
912 }
914 if (getNumOperands() == 1) {
915 Value *Mask = State.get(getOperand(0));
916 return Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(), Mask,
917 true, Name);
918 }
919 // If there are multiple operands, create a chain of selects to pick the
920 // first operand with an active lane and add the number of lanes of the
921 // preceding operands.
922 Value *RuntimeVF =
923 getRuntimeVF(State.Builder, State.Builder.getInt64Ty(), State.VF);
924 unsigned LastOpIdx = getNumOperands() - 1;
925 Value *Res = nullptr;
926 for (int Idx = LastOpIdx; Idx >= 0; --Idx) {
927 Value *TrailingZeros =
928 State.VF.isScalar()
929 ? Builder.CreateZExt(
931 Builder.getFalse()),
932 Builder.getInt64Ty())
933 : Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(),
934 State.get(getOperand(Idx)),
935 true, Name);
936 Value *Current = Builder.CreateAdd(
937 Builder.CreateMul(RuntimeVF, Builder.getInt64(Idx)), TrailingZeros);
938 if (Res) {
939 Value *Cmp = Builder.CreateICmpNE(TrailingZeros, RuntimeVF);
940 Res = Builder.CreateSelect(Cmp, Current, Res);
941 } else {
942 Res = Current;
943 }
944 }
945
946 return Res;
947 }
949 return State.get(getOperand(0), true);
950 default:
951 llvm_unreachable("Unsupported opcode for instruction");
952 }
953}
954
956 unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const {
957 Type *ScalarTy = Ctx.Types.inferScalarType(this);
958 Type *ResultTy = VF.isVector() ? toVectorTy(ScalarTy, VF) : ScalarTy;
959 switch (Opcode) {
960 case Instruction::FNeg:
961 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
962 case Instruction::UDiv:
963 case Instruction::SDiv:
964 case Instruction::SRem:
965 case Instruction::URem:
966 case Instruction::Add:
967 case Instruction::FAdd:
968 case Instruction::Sub:
969 case Instruction::FSub:
970 case Instruction::Mul:
971 case Instruction::FMul:
972 case Instruction::FDiv:
973 case Instruction::FRem:
974 case Instruction::Shl:
975 case Instruction::LShr:
976 case Instruction::AShr:
977 case Instruction::And:
978 case Instruction::Or:
979 case Instruction::Xor: {
982
983 if (VF.isVector()) {
984 // Certain instructions can be cheaper to vectorize if they have a
985 // constant second vector operand. One example of this are shifts on x86.
986 VPValue *RHS = getOperand(1);
987 RHSInfo = Ctx.getOperandInfo(RHS);
988
989 if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
992 }
993
994 Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
996 if (CtxI)
997 Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
998 return Ctx.TTI.getArithmeticInstrCost(
999 Opcode, ResultTy, Ctx.CostKind,
1000 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1001 RHSInfo, Operands, CtxI, &Ctx.TLI);
1002 }
1003 case Instruction::Freeze:
1004 // This opcode is unknown. Assume that it is the same as 'mul'.
1005 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, ResultTy,
1006 Ctx.CostKind);
1007 case Instruction::ExtractValue:
1008 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
1009 Ctx.CostKind);
1010 case Instruction::ICmp:
1011 case Instruction::FCmp: {
1012 Type *ScalarOpTy = Ctx.Types.inferScalarType(getOperand(0));
1013 Type *OpTy = VF.isVector() ? toVectorTy(ScalarOpTy, VF) : ScalarOpTy;
1014 Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1015 return Ctx.TTI.getCmpSelInstrCost(
1016 Opcode, OpTy, CmpInst::makeCmpResultType(OpTy), getPredicate(),
1017 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
1018 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
1019 }
1020 }
1021 return std::nullopt;
1022}
1023
1025 VPCostContext &Ctx) const {
1027 if (!getUnderlyingValue() && getOpcode() != Instruction::FMul) {
1028 // TODO: Compute cost for VPInstructions without underlying values once
1029 // the legacy cost model has been retired.
1030 return 0;
1031 }
1032
1033 assert(!doesGeneratePerAllLanes() &&
1034 "Should only generate a vector value or single scalar, not scalars "
1035 "for all lanes.");
1037 getOpcode(),
1039 }
1040
1041 switch (getOpcode()) {
1042 case Instruction::Select: {
1043 // TODO: It may be possible to improve this by analyzing where the
1044 // condition operand comes from.
1046 auto *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1047 auto *VecTy = Ctx.Types.inferScalarType(getOperand(1));
1048 if (!vputils::onlyFirstLaneUsed(this)) {
1049 CondTy = toVectorTy(CondTy, VF);
1050 VecTy = toVectorTy(VecTy, VF);
1051 }
1052 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1053 Ctx.CostKind);
1054 }
1055 case Instruction::ExtractElement:
1057 if (VF.isScalar()) {
1058 // ExtractLane with VF=1 takes care of handling extracting across multiple
1059 // parts.
1060 return 0;
1061 }
1062
1063 // Add on the cost of extracting the element.
1064 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1065 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1066 Ctx.CostKind);
1067 }
1068 case VPInstruction::AnyOf: {
1069 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1071 Instruction::Or, cast<VectorType>(VecTy), std::nullopt, Ctx.CostKind);
1072 }
1074 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1075 if (VF.isScalar())
1076 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1079 // Calculate the cost of determining the lane index.
1080 auto *PredTy = toVectorTy(ScalarTy, VF);
1081 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts,
1083 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1084 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1085 }
1087 assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?");
1089 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
1090 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1091
1093 cast<VectorType>(VectorTy),
1094 cast<VectorType>(VectorTy), Mask,
1095 Ctx.CostKind, VF.getKnownMinValue() - 1);
1096 }
1098 Type *ArgTy = Ctx.Types.inferScalarType(getOperand(0));
1099 unsigned Multiplier =
1100 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue();
1101 Type *RetTy = toVectorTy(Type::getInt1Ty(Ctx.LLVMCtx), VF * Multiplier);
1102 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
1103 {ArgTy, ArgTy});
1104 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1105 }
1107 Type *Arg0Ty = Ctx.Types.inferScalarType(getOperand(0));
1108 Type *I32Ty = Type::getInt32Ty(Ctx.LLVMCtx);
1109 Type *I1Ty = Type::getInt1Ty(Ctx.LLVMCtx);
1110 IntrinsicCostAttributes Attrs(Intrinsic::experimental_get_vector_length,
1111 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1112 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1113 }
1115 // Add on the cost of extracting the element.
1116 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1117 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1118 VecTy, Ctx.CostKind, 0);
1119 }
1121 if (VF == ElementCount::getScalable(1))
1124 default:
1125 // TODO: Compute cost other VPInstructions once the legacy cost model has
1126 // been retired.
1128 "unexpected VPInstruction witht underlying value");
1129 return 0;
1130 }
1131}
1132
1136 getOpcode() == Instruction::ExtractElement ||
1143}
1144
1146 switch (getOpcode()) {
1147 case Instruction::PHI:
1151 return true;
1152 default:
1153 return isScalarCast();
1154 }
1155}
1156
1158 assert(!State.Lane && "VPInstruction executing an Lane");
1161 "Set flags not supported for the provided opcode");
1162 if (hasFastMathFlags())
1164 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1167 bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
1168 if (GeneratesPerAllLanes) {
1169 for (unsigned Lane = 0, NumLanes = State.VF.getFixedValue();
1170 Lane != NumLanes; ++Lane) {
1171 Value *GeneratedValue = generatePerLane(State, VPLane(Lane));
1172 assert(GeneratedValue && "generatePerLane must produce a value");
1173 State.set(this, GeneratedValue, VPLane(Lane));
1174 }
1175 return;
1176 }
1177
1178 Value *GeneratedValue = generate(State);
1179 if (!hasResult())
1180 return;
1181 assert(GeneratedValue && "generate must produce a value");
1182 assert((((GeneratedValue->getType()->isVectorTy() ||
1183 GeneratedValue->getType()->isStructTy()) ==
1184 !GeneratesPerFirstLaneOnly) ||
1185 State.VF.isScalar()) &&
1186 "scalar value but not only first lane defined");
1187 State.set(this, GeneratedValue,
1188 /*IsScalar*/ GeneratesPerFirstLaneOnly);
1189}
1190
1193 return false;
1194 switch (getOpcode()) {
1195 case Instruction::ExtractElement:
1196 case Instruction::Freeze:
1197 case Instruction::FCmp:
1198 case Instruction::ICmp:
1199 case Instruction::Select:
1200 case Instruction::PHI:
1212 case VPInstruction::Not:
1219 return false;
1220 default:
1221 return true;
1222 }
1223}
1224
1226 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1228 return vputils::onlyFirstLaneUsed(this);
1229
1230 switch (getOpcode()) {
1231 default:
1232 return false;
1233 case Instruction::ExtractElement:
1234 return Op == getOperand(1);
1235 case Instruction::PHI:
1236 return true;
1237 case Instruction::FCmp:
1238 case Instruction::ICmp:
1239 case Instruction::Select:
1240 case Instruction::Or:
1241 case Instruction::Freeze:
1242 case VPInstruction::Not:
1243 // TODO: Cover additional opcodes.
1244 return vputils::onlyFirstLaneUsed(this);
1253 return true;
1255 return Op == getOperand(0) || vputils::onlyFirstLaneUsed(this);
1257 return Op == getOperand(0);
1260 return Op == getOperand(1);
1262 return Op == getOperand(0);
1263 };
1264 llvm_unreachable("switch should return");
1265}
1266
1268 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1270 return vputils::onlyFirstPartUsed(this);
1271
1272 switch (getOpcode()) {
1273 default:
1274 return false;
1275 case Instruction::FCmp:
1276 case Instruction::ICmp:
1277 case Instruction::Select:
1278 return vputils::onlyFirstPartUsed(this);
1282 return true;
1283 };
1284 llvm_unreachable("switch should return");
1285}
1286
1287#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1289 VPSlotTracker SlotTracker(getParent()->getPlan());
1290 print(dbgs(), "", SlotTracker);
1291}
1292
1294 VPSlotTracker &SlotTracker) const {
1295 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1296
1297 if (hasResult()) {
1299 O << " = ";
1300 }
1301
1302 switch (getOpcode()) {
1303 case VPInstruction::Not:
1304 O << "not";
1305 break;
1307 O << "combined load";
1308 break;
1310 O << "combined store";
1311 break;
1313 O << "active lane mask";
1314 break;
1316 O << "EXPLICIT-VECTOR-LENGTH";
1317 break;
1319 O << "first-order splice";
1320 break;
1322 O << "branch-on-cond";
1323 break;
1325 O << "TC > VF ? TC - VF : 0";
1326 break;
1328 O << "VF * Part +";
1329 break;
1331 O << "branch-on-count";
1332 break;
1334 O << "broadcast";
1335 break;
1337 O << "buildstructvector";
1338 break;
1340 O << "buildvector";
1341 break;
1343 O << "extract-lane";
1344 break;
1346 O << "extract-last-element";
1347 break;
1349 O << "extract-penultimate-element";
1350 break;
1352 O << "compute-anyof-result";
1353 break;
1355 O << "compute-find-iv-result";
1356 break;
1358 O << "compute-reduction-result";
1359 break;
1361 O << "logical-and";
1362 break;
1364 O << "ptradd";
1365 break;
1367 O << "wide-ptradd";
1368 break;
1370 O << "any-of";
1371 break;
1373 O << "first-active-lane";
1374 break;
1376 O << "reduction-start-vector";
1377 break;
1379 O << "resume-for-epilogue";
1380 break;
1381 default:
1383 }
1384
1385 printFlags(O);
1387
1388 if (auto DL = getDebugLoc()) {
1389 O << ", !dbg ";
1390 DL.print(O);
1391 }
1392}
1393#endif
1394
1397 if (isScalarCast()) {
1398 Value *Op = State.get(getOperand(0), VPLane(0));
1400 Op, ResultTy);
1401 State.set(this, Cast, VPLane(0));
1402 return;
1403 }
1404 switch (getOpcode()) {
1406 Value *StepVector =
1407 State.Builder.CreateStepVector(VectorType::get(ResultTy, State.VF));
1408 State.set(this, StepVector);
1409 break;
1410 }
1411 case VPInstruction::VScale: {
1412 Value *VScale = State.Builder.CreateVScale(ResultTy);
1413 State.set(this, VScale, true);
1414 break;
1415 }
1416
1417 default:
1418 llvm_unreachable("opcode not implemented yet");
1419 }
1420}
1421
1422#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1424 VPSlotTracker &SlotTracker) const {
1425 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1427 O << " = ";
1428
1429 switch (getOpcode()) {
1431 O << "wide-iv-step ";
1433 break;
1435 O << "step-vector " << *ResultTy;
1436 break;
1438 O << "vscale " << *ResultTy;
1439 break;
1440 default:
1441 assert(Instruction::isCast(getOpcode()) && "unhandled opcode");
1444 O << " to " << *ResultTy;
1445 }
1446}
1447#endif
1448
1451 PHINode *NewPhi = State.Builder.CreatePHI(
1452 State.TypeAnalysis.inferScalarType(this), 2, getName());
1453 unsigned NumIncoming = getNumIncoming();
1454 if (getParent() != getParent()->getPlan()->getScalarPreheader()) {
1455 // TODO: Fixup all incoming values of header phis once recipes defining them
1456 // are introduced.
1457 NumIncoming = 1;
1458 }
1459 for (unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1460 Value *IncV = State.get(getIncomingValue(Idx), VPLane(0));
1461 BasicBlock *PredBB = State.CFG.VPBB2IRBB.at(getIncomingBlock(Idx));
1462 NewPhi->addIncoming(IncV, PredBB);
1463 }
1464 State.set(this, NewPhi, VPLane(0));
1465}
1466
1467#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1468void VPPhi::print(raw_ostream &O, const Twine &Indent,
1469 VPSlotTracker &SlotTracker) const {
1470 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1472 O << " = phi ";
1474}
1475#endif
1476
1477VPIRInstruction *VPIRInstruction ::create(Instruction &I) {
1478 if (auto *Phi = dyn_cast<PHINode>(&I))
1479 return new VPIRPhi(*Phi);
1480 return new VPIRInstruction(I);
1481}
1482
1484 assert(!isa<VPIRPhi>(this) && getNumOperands() == 0 &&
1485 "PHINodes must be handled by VPIRPhi");
1486 // Advance the insert point after the wrapped IR instruction. This allows
1487 // interleaving VPIRInstructions and other recipes.
1488 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1489}
1490
1492 VPCostContext &Ctx) const {
1493 // The recipe wraps an existing IR instruction on the border of VPlan's scope,
1494 // hence it does not contribute to the cost-modeling for the VPlan.
1495 return 0;
1496}
1497
1499 assert(isa<PHINode>(getInstruction()) &&
1500 "can only update exiting operands to phi nodes");
1501 assert(getNumOperands() > 0 && "must have at least one operand");
1502 VPValue *Exiting = getOperand(0);
1503 if (Exiting->isLiveIn())
1504 return;
1505
1506 Exiting = Builder.createNaryOp(VPInstruction::ExtractLastElement, {Exiting});
1507 setOperand(0, Exiting);
1508}
1509
1510#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1512 VPSlotTracker &SlotTracker) const {
1513 O << Indent << "IR " << I;
1514}
1515#endif
1516
1518 PHINode *Phi = &getIRPhi();
1519 for (const auto &[Idx, Op] : enumerate(operands())) {
1520 VPValue *ExitValue = Op;
1521 auto Lane = vputils::isSingleScalar(ExitValue)
1525 auto *PredVPBB = Pred->getExitingBasicBlock();
1526 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1527 // Set insertion point in PredBB in case an extract needs to be generated.
1528 // TODO: Model extracts explicitly.
1529 State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
1530 Value *V = State.get(ExitValue, VPLane(Lane));
1531 // If there is no existing block for PredBB in the phi, add a new incoming
1532 // value. Otherwise update the existing incoming value for PredBB.
1533 if (Phi->getBasicBlockIndex(PredBB) == -1)
1534 Phi->addIncoming(V, PredBB);
1535 else
1536 Phi->setIncomingValueForBlock(PredBB, V);
1537 }
1538
1539 // Advance the insert point after the wrapped IR instruction. This allows
1540 // interleaving VPIRInstructions and other recipes.
1541 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1542}
1543
1545 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1546 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1547 "Number of phi operands must match number of predecessors");
1548 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1549 R->removeOperand(Position);
1550}
1551
1552#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1554 VPSlotTracker &SlotTracker) const {
1555 interleaveComma(enumerate(getAsRecipe()->operands()), O,
1556 [this, &O, &SlotTracker](auto Op) {
1557 O << "[ ";
1558 Op.value()->printAsOperand(O, SlotTracker);
1559 O << ", ";
1560 getIncomingBlock(Op.index())->printAsOperand(O);
1561 O << " ]";
1562 });
1563}
1564#endif
1565
1566#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1567void VPIRPhi::print(raw_ostream &O, const Twine &Indent,
1568 VPSlotTracker &SlotTracker) const {
1570
1571 if (getNumOperands() != 0) {
1572 O << " (extra operand" << (getNumOperands() > 1 ? "s" : "") << ": ";
1574 [&O, &SlotTracker](auto Op) {
1575 std::get<0>(Op)->printAsOperand(O, SlotTracker);
1576 O << " from ";
1577 std::get<1>(Op)->printAsOperand(O);
1578 });
1579 O << ")";
1580 }
1581}
1582#endif
1583
1585 : VPIRMetadata(I) {
1586 if (!LVer || !isa<LoadInst, StoreInst>(&I))
1587 return;
1588 const auto &[AliasScopeMD, NoAliasMD] = LVer->getNoAliasMetadataFor(&I);
1589 if (AliasScopeMD)
1590 Metadata.emplace_back(LLVMContext::MD_alias_scope, AliasScopeMD);
1591 if (NoAliasMD)
1592 Metadata.emplace_back(LLVMContext::MD_noalias, NoAliasMD);
1593}
1594
1596 for (const auto &[Kind, Node] : Metadata)
1597 I.setMetadata(Kind, Node);
1598}
1599
1601 SmallVector<std::pair<unsigned, MDNode *>> MetadataIntersection;
1602 for (const auto &[KindA, MDA] : Metadata) {
1603 for (const auto &[KindB, MDB] : Other.Metadata) {
1604 if (KindA == KindB && MDA == MDB) {
1605 MetadataIntersection.emplace_back(KindA, MDA);
1606 break;
1607 }
1608 }
1609 }
1610 Metadata = std::move(MetadataIntersection);
1611}
1612
1614 assert(State.VF.isVector() && "not widening");
1615 assert(Variant != nullptr && "Can't create vector function.");
1616
1617 FunctionType *VFTy = Variant->getFunctionType();
1618 // Add return type if intrinsic is overloaded on it.
1620 for (const auto &I : enumerate(args())) {
1621 Value *Arg;
1622 // Some vectorized function variants may also take a scalar argument,
1623 // e.g. linear parameters for pointers. This needs to be the scalar value
1624 // from the start of the respective part when interleaving.
1625 if (!VFTy->getParamType(I.index())->isVectorTy())
1626 Arg = State.get(I.value(), VPLane(0));
1627 else
1628 Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));
1629 Args.push_back(Arg);
1630 }
1631
1632 auto *CI = cast_or_null<CallInst>(getUnderlyingValue());
1634 if (CI)
1635 CI->getOperandBundlesAsDefs(OpBundles);
1636
1637 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1638 applyFlags(*V);
1639 applyMetadata(*V);
1640 V->setCallingConv(Variant->getCallingConv());
1641
1642 if (!V->getType()->isVoidTy())
1643 State.set(this, V);
1644}
1645
1647 VPCostContext &Ctx) const {
1648 return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
1649 Variant->getFunctionType()->params(),
1650 Ctx.CostKind);
1651}
1652
1653#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1655 VPSlotTracker &SlotTracker) const {
1656 O << Indent << "WIDEN-CALL ";
1657
1658 Function *CalledFn = getCalledScalarFunction();
1659 if (CalledFn->getReturnType()->isVoidTy())
1660 O << "void ";
1661 else {
1663 O << " = ";
1664 }
1665
1666 O << "call";
1667 printFlags(O);
1668 O << " @" << CalledFn->getName() << "(";
1669 interleaveComma(args(), O, [&O, &SlotTracker](VPValue *Op) {
1670 Op->printAsOperand(O, SlotTracker);
1671 });
1672 O << ")";
1673
1674 O << " (using library function";
1675 if (Variant->hasName())
1676 O << ": " << Variant->getName();
1677 O << ")";
1678}
1679#endif
1680
1682 assert(State.VF.isVector() && "not widening");
1683
1684 SmallVector<Type *, 2> TysForDecl;
1685 // Add return type if intrinsic is overloaded on it.
1686 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1, State.TTI))
1687 TysForDecl.push_back(VectorType::get(getResultType(), State.VF));
1689 for (const auto &I : enumerate(operands())) {
1690 // Some intrinsics have a scalar argument - don't replace it with a
1691 // vector.
1692 Value *Arg;
1693 if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
1694 State.TTI))
1695 Arg = State.get(I.value(), VPLane(0));
1696 else
1697 Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));
1698 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
1699 State.TTI))
1700 TysForDecl.push_back(Arg->getType());
1701 Args.push_back(Arg);
1702 }
1703
1704 // Use vector version of the intrinsic.
1705 Module *M = State.Builder.GetInsertBlock()->getModule();
1706 Function *VectorF =
1707 Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
1708 assert(VectorF &&
1709 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1710
1711 auto *CI = cast_or_null<CallInst>(getUnderlyingValue());
1713 if (CI)
1714 CI->getOperandBundlesAsDefs(OpBundles);
1715
1716 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1717
1718 applyFlags(*V);
1719 applyMetadata(*V);
1720
1721 if (!V->getType()->isVoidTy())
1722 State.set(this, V);
1723}
1724
1725/// Compute the cost for the intrinsic \p ID with \p Operands, produced by \p R.
1728 const VPRecipeWithIRFlags &R,
1729 ElementCount VF,
1730 VPCostContext &Ctx) {
1731 // Some backends analyze intrinsic arguments to determine cost. Use the
1732 // underlying value for the operand if it has one. Otherwise try to use the
1733 // operand of the underlying call instruction, if there is one. Otherwise
1734 // clear Arguments.
1735 // TODO: Rework TTI interface to be independent of concrete IR values.
1737 for (const auto &[Idx, Op] : enumerate(Operands)) {
1738 auto *V = Op->getUnderlyingValue();
1739 if (!V) {
1740 if (auto *UI = dyn_cast_or_null<CallBase>(R.getUnderlyingValue())) {
1741 Arguments.push_back(UI->getArgOperand(Idx));
1742 continue;
1743 }
1744 Arguments.clear();
1745 break;
1746 }
1747 Arguments.push_back(V);
1748 }
1749
1750 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
1751 Type *RetTy = VF.isVector() ? toVectorizedTy(ScalarRetTy, VF) : ScalarRetTy;
1752 SmallVector<Type *> ParamTys;
1753 for (const VPValue *Op : Operands) {
1754 ParamTys.push_back(VF.isVector()
1756 : Ctx.Types.inferScalarType(Op));
1757 }
1758
1759 // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
1760 FastMathFlags FMF =
1761 R.hasFastMathFlags() ? R.getFastMathFlags() : FastMathFlags();
1762 IntrinsicCostAttributes CostAttrs(
1763 ID, RetTy, Arguments, ParamTys, FMF,
1764 dyn_cast_or_null<IntrinsicInst>(R.getUnderlyingValue()),
1766 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
1767}
1768
1770 VPCostContext &Ctx) const {
1772 return getCostForIntrinsics(VectorIntrinsicID, ArgOps, *this, VF, Ctx);
1773}
1774
1776 return Intrinsic::getBaseName(VectorIntrinsicID);
1777}
1778
1780 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1781 return all_of(enumerate(operands()), [this, &Op](const auto &X) {
1782 auto [Idx, V] = X;
1784 Idx, nullptr);
1785 });
1786}
1787
1788#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1790 VPSlotTracker &SlotTracker) const {
1791 O << Indent << "WIDEN-INTRINSIC ";
1792 if (ResultTy->isVoidTy()) {
1793 O << "void ";
1794 } else {
1796 O << " = ";
1797 }
1798
1799 O << "call";
1800 printFlags(O);
1801 O << getIntrinsicName() << "(";
1802
1804 Op->printAsOperand(O, SlotTracker);
1805 });
1806 O << ")";
1807}
1808#endif
1809
1811 IRBuilderBase &Builder = State.Builder;
1812
1813 Value *Address = State.get(getOperand(0));
1814 Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
1815 VectorType *VTy = cast<VectorType>(Address->getType());
1816
1817 // The histogram intrinsic requires a mask even if the recipe doesn't;
1818 // if the mask operand was omitted then all lanes should be executed and
1819 // we just need to synthesize an all-true mask.
1820 Value *Mask = nullptr;
1821 if (VPValue *VPMask = getMask())
1822 Mask = State.get(VPMask);
1823 else
1824 Mask =
1825 Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
1826
1827 // If this is a subtract, we want to invert the increment amount. We may
1828 // add a separate intrinsic in future, but for now we'll try this.
1829 if (Opcode == Instruction::Sub)
1830 IncAmt = Builder.CreateNeg(IncAmt);
1831 else
1832 assert(Opcode == Instruction::Add && "only add or sub supported for now");
1833
1834 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1835 {VTy, IncAmt->getType()},
1836 {Address, IncAmt, Mask});
1837}
1838
1840 VPCostContext &Ctx) const {
1841 // FIXME: Take the gather and scatter into account as well. For now we're
1842 // generating the same cost as the fallback path, but we'll likely
1843 // need to create a new TTI method for determining the cost, including
1844 // whether we can use base + vec-of-smaller-indices or just
1845 // vec-of-pointers.
1846 assert(VF.isVector() && "Invalid VF for histogram cost");
1847 Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
1848 VPValue *IncAmt = getOperand(1);
1849 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
1850 VectorType *VTy = VectorType::get(IncTy, VF);
1851
1852 // Assume that a non-constant update value (or a constant != 1) requires
1853 // a multiply, and add that into the cost.
1854 InstructionCost MulCost =
1855 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
1856 if (IncAmt->isLiveIn()) {
1857 ConstantInt *CI = dyn_cast<ConstantInt>(IncAmt->getLiveInIRValue());
1858
1859 if (CI && CI->getZExtValue() == 1)
1860 MulCost = TTI::TCC_Free;
1861 }
1862
1863 // Find the cost of the histogram operation itself.
1864 Type *PtrTy = VectorType::get(AddressTy, VF);
1865 Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
1866 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
1868 {PtrTy, IncTy, MaskTy});
1869
1870 // Add the costs together with the add/sub operation.
1871 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
1872 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
1873}
1874
1875#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1877 VPSlotTracker &SlotTracker) const {
1878 O << Indent << "WIDEN-HISTOGRAM buckets: ";
1880
1881 if (Opcode == Instruction::Sub)
1882 O << ", dec: ";
1883 else {
1884 assert(Opcode == Instruction::Add);
1885 O << ", inc: ";
1886 }
1888
1889 if (VPValue *Mask = getMask()) {
1890 O << ", mask: ";
1891 Mask->printAsOperand(O, SlotTracker);
1892 }
1893}
1894
1896 VPSlotTracker &SlotTracker) const {
1897 O << Indent << "WIDEN-SELECT ";
1899 O << " = select ";
1900 printFlags(O);
1902 O << ", ";
1904 O << ", ";
1906 O << (isInvariantCond() ? " (condition is loop invariant)" : "");
1907}
1908#endif
1909
1911 // The condition can be loop invariant but still defined inside the
1912 // loop. This means that we can't just use the original 'cond' value.
1913 // We have to take the 'vectorized' value and pick the first lane.
1914 // Instcombine will make this a no-op.
1915 auto *InvarCond =
1916 isInvariantCond() ? State.get(getCond(), VPLane(0)) : nullptr;
1917
1918 Value *Cond = InvarCond ? InvarCond : State.get(getCond());
1919 Value *Op0 = State.get(getOperand(1));
1920 Value *Op1 = State.get(getOperand(2));
1921 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
1922 State.set(this, Sel);
1923 if (auto *I = dyn_cast<Instruction>(Sel)) {
1924 if (isa<FPMathOperator>(I))
1925 applyFlags(*I);
1926 applyMetadata(*I);
1927 }
1928}
1929
1931 VPCostContext &Ctx) const {
1932 SelectInst *SI = cast<SelectInst>(getUnderlyingValue());
1933 bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
1934 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1935 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1936
1937 VPValue *Op0, *Op1;
1938 using namespace llvm::VPlanPatternMatch;
1939 if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
1940 (match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) ||
1941 match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) {
1942 // select x, y, false --> x & y
1943 // select x, true, y --> x | y
1944 const auto [Op1VK, Op1VP] = Ctx.getOperandInfo(Op0);
1945 const auto [Op2VK, Op2VP] = Ctx.getOperandInfo(Op1);
1946
1948 if (all_of(operands(),
1949 [](VPValue *Op) { return Op->getUnderlyingValue(); }))
1950 Operands.append(SI->op_begin(), SI->op_end());
1951 bool IsLogicalOr = match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1)));
1952 return Ctx.TTI.getArithmeticInstrCost(
1953 IsLogicalOr ? Instruction::Or : Instruction::And, VectorTy,
1954 Ctx.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, SI);
1955 }
1956
1957 Type *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1958 if (!ScalarCond)
1959 CondTy = VectorType::get(CondTy, VF);
1960
1962 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
1963 Pred = Cmp->getPredicate();
1964 return Ctx.TTI.getCmpSelInstrCost(
1965 Instruction::Select, VectorTy, CondTy, Pred, Ctx.CostKind,
1966 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, SI);
1967}
1968
1969VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(const FastMathFlags &FMF) {
1970 AllowReassoc = FMF.allowReassoc();
1971 NoNaNs = FMF.noNaNs();
1972 NoInfs = FMF.noInfs();
1973 NoSignedZeros = FMF.noSignedZeros();
1974 AllowReciprocal = FMF.allowReciprocal();
1975 AllowContract = FMF.allowContract();
1976 ApproxFunc = FMF.approxFunc();
1977}
1978
1979#if !defined(NDEBUG)
1980bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
1981 switch (OpType) {
1982 case OperationType::OverflowingBinOp:
1983 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
1984 Opcode == Instruction::Mul ||
1985 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
1986 case OperationType::Trunc:
1987 return Opcode == Instruction::Trunc;
1988 case OperationType::DisjointOp:
1989 return Opcode == Instruction::Or;
1990 case OperationType::PossiblyExactOp:
1991 return Opcode == Instruction::AShr;
1992 case OperationType::GEPOp:
1993 return Opcode == Instruction::GetElementPtr ||
1994 Opcode == VPInstruction::PtrAdd ||
1995 Opcode == VPInstruction::WidePtrAdd;
1996 case OperationType::FPMathOp:
1997 return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
1998 Opcode == Instruction::FSub || Opcode == Instruction::FNeg ||
1999 Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
2000 Opcode == Instruction::FCmp || Opcode == Instruction::Select ||
2001 Opcode == VPInstruction::WideIVStep ||
2004 case OperationType::NonNegOp:
2005 return Opcode == Instruction::ZExt;
2006 break;
2007 case OperationType::Cmp:
2008 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2009 case OperationType::Other:
2010 return true;
2011 }
2012 llvm_unreachable("Unknown OperationType enum");
2013}
2014#endif
2015
2016#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2018 switch (OpType) {
2019 case OperationType::Cmp:
2021 break;
2022 case OperationType::DisjointOp:
2024 O << " disjoint";
2025 break;
2026 case OperationType::PossiblyExactOp:
2027 if (ExactFlags.IsExact)
2028 O << " exact";
2029 break;
2030 case OperationType::OverflowingBinOp:
2031 if (WrapFlags.HasNUW)
2032 O << " nuw";
2033 if (WrapFlags.HasNSW)
2034 O << " nsw";
2035 break;
2036 case OperationType::Trunc:
2037 if (TruncFlags.HasNUW)
2038 O << " nuw";
2039 if (TruncFlags.HasNSW)
2040 O << " nsw";
2041 break;
2042 case OperationType::FPMathOp:
2044 break;
2045 case OperationType::GEPOp:
2046 if (GEPFlags.isInBounds())
2047 O << " inbounds";
2049 O << " nusw";
2051 O << " nuw";
2052 break;
2053 case OperationType::NonNegOp:
2054 if (NonNegFlags.NonNeg)
2055 O << " nneg";
2056 break;
2057 case OperationType::Other:
2058 break;
2059 }
2060 O << " ";
2061}
2062#endif
2063
2065 auto &Builder = State.Builder;
2066 switch (Opcode) {
2067 case Instruction::Call:
2068 case Instruction::Br:
2069 case Instruction::PHI:
2070 case Instruction::GetElementPtr:
2071 case Instruction::Select:
2072 llvm_unreachable("This instruction is handled by a different recipe.");
2073 case Instruction::UDiv:
2074 case Instruction::SDiv:
2075 case Instruction::SRem:
2076 case Instruction::URem:
2077 case Instruction::Add:
2078 case Instruction::FAdd:
2079 case Instruction::Sub:
2080 case Instruction::FSub:
2081 case Instruction::FNeg:
2082 case Instruction::Mul:
2083 case Instruction::FMul:
2084 case Instruction::FDiv:
2085 case Instruction::FRem:
2086 case Instruction::Shl:
2087 case Instruction::LShr:
2088 case Instruction::AShr:
2089 case Instruction::And:
2090 case Instruction::Or:
2091 case Instruction::Xor: {
2092 // Just widen unops and binops.
2094 for (VPValue *VPOp : operands())
2095 Ops.push_back(State.get(VPOp));
2096
2097 Value *V = Builder.CreateNAryOp(Opcode, Ops);
2098
2099 if (auto *VecOp = dyn_cast<Instruction>(V)) {
2100 applyFlags(*VecOp);
2101 applyMetadata(*VecOp);
2102 }
2103
2104 // Use this vector value for all users of the original instruction.
2105 State.set(this, V);
2106 break;
2107 }
2108 case Instruction::ExtractValue: {
2109 assert(getNumOperands() == 2 && "expected single level extractvalue");
2110 Value *Op = State.get(getOperand(0));
2111 auto *CI = cast<ConstantInt>(getOperand(1)->getLiveInIRValue());
2112 Value *Extract = Builder.CreateExtractValue(Op, CI->getZExtValue());
2113 State.set(this, Extract);
2114 break;
2115 }
2116 case Instruction::Freeze: {
2117 Value *Op = State.get(getOperand(0));
2118 Value *Freeze = Builder.CreateFreeze(Op);
2119 State.set(this, Freeze);
2120 break;
2121 }
2122 case Instruction::ICmp:
2123 case Instruction::FCmp: {
2124 // Widen compares. Generate vector compares.
2125 bool FCmp = Opcode == Instruction::FCmp;
2126 Value *A = State.get(getOperand(0));
2127 Value *B = State.get(getOperand(1));
2128 Value *C = nullptr;
2129 if (FCmp) {
2130 // Propagate fast math flags.
2131 C = Builder.CreateFCmpFMF(
2132 getPredicate(), A, B,
2133 dyn_cast_or_null<Instruction>(getUnderlyingValue()));
2134 } else {
2135 C = Builder.CreateICmp(getPredicate(), A, B);
2136 }
2137 if (auto *I = dyn_cast<Instruction>(C))
2138 applyMetadata(*I);
2139 State.set(this, C);
2140 break;
2141 }
2142 default:
2143 // This instruction is not vectorized by simple widening.
2144 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
2145 << Instruction::getOpcodeName(Opcode));
2146 llvm_unreachable("Unhandled instruction!");
2147 } // end of switch.
2148
2149#if !defined(NDEBUG)
2150 // Verify that VPlan type inference results agree with the type of the
2151 // generated values.
2153 State.get(this)->getType() &&
2154 "inferred type and type from generated instructions do not match");
2155#endif
2156}
2157
2159 VPCostContext &Ctx) const {
2160 switch (Opcode) {
2161 case Instruction::UDiv:
2162 case Instruction::SDiv:
2163 case Instruction::SRem:
2164 case Instruction::URem:
2165 // If the div/rem operation isn't safe to speculate and requires
2166 // predication, then the only way we can even create a vplan is to insert
2167 // a select on the second input operand to ensure we use the value of 1
2168 // for the inactive lanes. The select will be costed separately.
2169 case Instruction::FNeg:
2170 case Instruction::Add:
2171 case Instruction::FAdd:
2172 case Instruction::Sub:
2173 case Instruction::FSub:
2174 case Instruction::Mul:
2175 case Instruction::FMul:
2176 case Instruction::FDiv:
2177 case Instruction::FRem:
2178 case Instruction::Shl:
2179 case Instruction::LShr:
2180 case Instruction::AShr:
2181 case Instruction::And:
2182 case Instruction::Or:
2183 case Instruction::Xor:
2184 case Instruction::Freeze:
2185 case Instruction::ExtractValue:
2186 case Instruction::ICmp:
2187 case Instruction::FCmp:
2188 return *getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2189 default:
2190 llvm_unreachable("Unsupported opcode for instruction");
2191 }
2192}
2193
2194#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2196 VPSlotTracker &SlotTracker) const {
2197 O << Indent << "WIDEN ";
2199 O << " = " << Instruction::getOpcodeName(Opcode);
2200 printFlags(O);
2202}
2203#endif
2204
2206 auto &Builder = State.Builder;
2207 /// Vectorize casts.
2208 assert(State.VF.isVector() && "Not vectorizing?");
2209 Type *DestTy = VectorType::get(getResultType(), State.VF);
2210 VPValue *Op = getOperand(0);
2211 Value *A = State.get(Op);
2212 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
2213 State.set(this, Cast);
2214 if (auto *CastOp = dyn_cast<Instruction>(Cast)) {
2215 applyFlags(*CastOp);
2216 applyMetadata(*CastOp);
2217 }
2218}
2219
2221 VPCostContext &Ctx) const {
2222 // TODO: In some cases, VPWidenCastRecipes are created but not considered in
2223 // the legacy cost model, including truncates/extends when evaluating a
2224 // reduction in a smaller type.
2225 if (!getUnderlyingValue())
2226 return 0;
2227 // Computes the CastContextHint from a recipes that may access memory.
2228 auto ComputeCCH = [&](const VPRecipeBase *R) -> TTI::CastContextHint {
2229 if (VF.isScalar())
2231 if (isa<VPInterleaveBase>(R))
2233 if (const auto *ReplicateRecipe = dyn_cast<VPReplicateRecipe>(R))
2234 return ReplicateRecipe->isPredicated() ? TTI::CastContextHint::Masked
2236 const auto *WidenMemoryRecipe = dyn_cast<VPWidenMemoryRecipe>(R);
2237 if (WidenMemoryRecipe == nullptr)
2239 if (!WidenMemoryRecipe->isConsecutive())
2241 if (WidenMemoryRecipe->isReverse())
2243 if (WidenMemoryRecipe->isMasked())
2246 };
2247
2248 VPValue *Operand = getOperand(0);
2250 // For Trunc/FPTrunc, get the context from the only user.
2251 if ((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
2253 if (auto *StoreRecipe = dyn_cast<VPRecipeBase>(*user_begin()))
2254 CCH = ComputeCCH(StoreRecipe);
2255 }
2256 // For Z/Sext, get the context from the operand.
2257 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
2258 Opcode == Instruction::FPExt) {
2259 if (Operand->isLiveIn())
2261 else if (Operand->getDefiningRecipe())
2262 CCH = ComputeCCH(Operand->getDefiningRecipe());
2263 }
2264
2265 auto *SrcTy =
2266 cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
2267 auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
2268 // Arm TTI will use the underlying instruction to determine the cost.
2269 return Ctx.TTI.getCastInstrCost(
2270 Opcode, DestTy, SrcTy, CCH, Ctx.CostKind,
2271 dyn_cast_if_present<Instruction>(getUnderlyingValue()));
2272}
2273
2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2276 VPSlotTracker &SlotTracker) const {
2277 O << Indent << "WIDEN-CAST ";
2279 O << " = " << Instruction::getOpcodeName(Opcode);
2280 printFlags(O);
2282 O << " to " << *getResultType();
2283}
2284#endif
2285
2287 VPCostContext &Ctx) const {
2288 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2289}
2290
2291/// A helper function that returns an integer or floating-point constant with
2292/// value C.
2294 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
2295 : ConstantFP::get(Ty, C);
2296}
2297
2298#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2300 VPSlotTracker &SlotTracker) const {
2301 O << Indent;
2303 O << " = WIDEN-INDUCTION ";
2305
2306 if (auto *TI = getTruncInst())
2307 O << " (truncated to " << *TI->getType() << ")";
2308}
2309#endif
2310
2312 // The step may be defined by a recipe in the preheader (e.g. if it requires
2313 // SCEV expansion), but for the canonical induction the step is required to be
2314 // 1, which is represented as live-in.
2316 return false;
2317 auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
2318 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
2319 auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
2320 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
2321 getScalarType() == CanIV->getScalarType();
2322}
2323
2324#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2326 VPSlotTracker &SlotTracker) const {
2327 O << Indent;
2329 O << " = DERIVED-IV ";
2331 O << " + ";
2333 O << " * ";
2335}
2336#endif
2337
2339 // Fast-math-flags propagate from the original induction instruction.
2341 if (hasFastMathFlags())
2343
2344 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2345 /// variable on which to base the steps, \p Step is the size of the step.
2346
2347 Value *BaseIV = State.get(getOperand(0), VPLane(0));
2348 Value *Step = State.get(getStepValue(), VPLane(0));
2349 IRBuilderBase &Builder = State.Builder;
2350
2351 // Ensure step has the same type as that of scalar IV.
2352 Type *BaseIVTy = BaseIV->getType()->getScalarType();
2353 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
2354
2355 // We build scalar steps for both integer and floating-point induction
2356 // variables. Here, we determine the kind of arithmetic we will perform.
2359 if (BaseIVTy->isIntegerTy()) {
2360 AddOp = Instruction::Add;
2361 MulOp = Instruction::Mul;
2362 } else {
2363 AddOp = InductionOpcode;
2364 MulOp = Instruction::FMul;
2365 }
2366
2367 // Determine the number of scalars we need to generate for each unroll
2368 // iteration.
2369 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
2370 // Compute the scalar steps and save the results in State.
2371 Type *IntStepTy =
2372 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
2373 Type *VecIVTy = nullptr;
2374 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2375 if (!FirstLaneOnly && State.VF.isScalable()) {
2376 VecIVTy = VectorType::get(BaseIVTy, State.VF);
2377 UnitStepVec =
2378 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2379 SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2380 SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
2381 }
2382
2383 unsigned StartLane = 0;
2384 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2385 if (State.Lane) {
2386 StartLane = State.Lane->getKnownLane();
2387 EndLane = StartLane + 1;
2388 }
2389 Value *StartIdx0;
2390 if (getUnrollPart(*this) == 0)
2391 StartIdx0 = ConstantInt::get(IntStepTy, 0);
2392 else {
2393 StartIdx0 = State.get(getOperand(2), true);
2394 if (getUnrollPart(*this) != 1) {
2395 StartIdx0 =
2396 Builder.CreateMul(StartIdx0, ConstantInt::get(StartIdx0->getType(),
2397 getUnrollPart(*this)));
2398 }
2399 StartIdx0 = Builder.CreateSExtOrTrunc(StartIdx0, IntStepTy);
2400 }
2401
2402 if (!FirstLaneOnly && State.VF.isScalable()) {
2403 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2404 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2405 if (BaseIVTy->isFloatingPointTy())
2406 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2407 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2408 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2409 State.set(this, Add);
2410 // It's useful to record the lane values too for the known minimum number
2411 // of elements so we do those below. This improves the code quality when
2412 // trying to extract the first element, for example.
2413 }
2414
2415 if (BaseIVTy->isFloatingPointTy())
2416 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
2417
2418 for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
2419 Value *StartIdx = Builder.CreateBinOp(
2420 AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
2421 // The step returned by `createStepForVF` is a runtime-evaluated value
2422 // when VF is scalable. Otherwise, it should be folded into a Constant.
2423 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2424 "Expected StartIdx to be folded to a constant when VF is not "
2425 "scalable");
2426 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2427 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
2428 State.set(this, Add, VPLane(Lane));
2429 }
2430}
2431
2432#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2434 VPSlotTracker &SlotTracker) const {
2435 O << Indent;
2437 O << " = SCALAR-STEPS ";
2439}
2440#endif
2441
2443 assert(State.VF.isVector() && "not widening");
2444 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
2445 // Construct a vector GEP by widening the operands of the scalar GEP as
2446 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
2447 // results in a vector of pointers when at least one operand of the GEP
2448 // is vector-typed. Thus, to keep the representation compact, we only use
2449 // vector-typed operands for loop-varying values.
2450
2451 if (areAllOperandsInvariant()) {
2452 // If we are vectorizing, but the GEP has only loop-invariant operands,
2453 // the GEP we build (by only using vector-typed operands for
2454 // loop-varying values) would be a scalar pointer. Thus, to ensure we
2455 // produce a vector of pointers, we need to either arbitrarily pick an
2456 // operand to broadcast, or broadcast a clone of the original GEP.
2457 // Here, we broadcast a clone of the original.
2458 //
2459 // TODO: If at some point we decide to scalarize instructions having
2460 // loop-invariant operands, this special case will no longer be
2461 // required. We would add the scalarization decision to
2462 // collectLoopScalars() and teach getVectorValue() to broadcast
2463 // the lane-zero scalar value.
2465 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
2466 Ops.push_back(State.get(getOperand(I), VPLane(0)));
2467
2468 auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
2469 ArrayRef(Ops).drop_front(), "",
2471 Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
2472 State.set(this, Splat);
2473 } else {
2474 // If the GEP has at least one loop-varying operand, we are sure to
2475 // produce a vector of pointers unless VF is scalar.
2476 // The pointer operand of the new GEP. If it's loop-invariant, we
2477 // won't broadcast it.
2478 auto *Ptr = isPointerLoopInvariant() ? State.get(getOperand(0), VPLane(0))
2479 : State.get(getOperand(0));
2480
2481 // Collect all the indices for the new GEP. If any index is
2482 // loop-invariant, we won't broadcast it.
2484 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
2485 VPValue *Operand = getOperand(I);
2486 if (isIndexLoopInvariant(I - 1))
2487 Indices.push_back(State.get(Operand, VPLane(0)));
2488 else
2489 Indices.push_back(State.get(Operand));
2490 }
2491
2492 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
2493 // but it should be a vector, otherwise.
2494 auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
2495 Indices, "", getGEPNoWrapFlags());
2496 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2497 "NewGEP is not a pointer vector");
2498 State.set(this, NewGEP);
2499 }
2500}
2501
2502#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2504 VPSlotTracker &SlotTracker) const {
2505 O << Indent << "WIDEN-GEP ";
2506 O << (isPointerLoopInvariant() ? "Inv" : "Var");
2507 for (size_t I = 0; I < getNumOperands() - 1; ++I)
2508 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
2509
2510 O << " ";
2512 O << " = getelementptr";
2513 printFlags(O);
2515}
2516#endif
2517
2518static Type *getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride,
2519 unsigned CurrentPart, IRBuilderBase &Builder) {
2520 // Use i32 for the gep index type when the value is constant,
2521 // or query DataLayout for a more suitable index type otherwise.
2522 const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
2523 return !IsUnitStride || (IsScalable && (IsReverse || CurrentPart > 0))
2524 ? DL.getIndexType(Builder.getPtrTy(0))
2525 : Builder.getInt32Ty();
2526}
2527
2529 auto &Builder = State.Builder;
2530 unsigned CurrentPart = getUnrollPart(*this);
2531 bool IsUnitStride = Stride == 1 || Stride == -1;
2532 Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ true,
2533 IsUnitStride, CurrentPart, Builder);
2534
2535 // The wide store needs to start at the last vector element.
2536 Value *RunTimeVF = State.get(getVFValue(), VPLane(0));
2537 if (IndexTy != RunTimeVF->getType())
2538 RunTimeVF = Builder.CreateZExtOrTrunc(RunTimeVF, IndexTy);
2539 // NumElt = Stride * CurrentPart * RunTimeVF
2540 Value *NumElt = Builder.CreateMul(
2541 ConstantInt::get(IndexTy, Stride * (int64_t)CurrentPart), RunTimeVF);
2542 // LastLane = Stride * (RunTimeVF - 1)
2543 Value *LastLane = Builder.CreateSub(RunTimeVF, ConstantInt::get(IndexTy, 1));
2544 if (Stride != 1)
2545 LastLane = Builder.CreateMul(ConstantInt::get(IndexTy, Stride), LastLane);
2546 Value *Ptr = State.get(getOperand(0), VPLane(0));
2547 Value *ResultPtr =
2548 Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", getGEPNoWrapFlags());
2549 ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "",
2551
2552 State.set(this, ResultPtr, /*IsScalar*/ true);
2553}
2554
2555#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2557 VPSlotTracker &SlotTracker) const {
2558 O << Indent;
2560 O << " = vector-end-pointer";
2561 printFlags(O);
2563}
2564#endif
2565
2567 auto &Builder = State.Builder;
2568 unsigned CurrentPart = getUnrollPart(*this);
2569 Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
2570 /*IsUnitStride*/ true, CurrentPart, Builder);
2571 Value *Ptr = State.get(getOperand(0), VPLane(0));
2572
2573 Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
2574 Value *ResultPtr =
2575 Builder.CreateGEP(IndexedTy, Ptr, Increment, "", getGEPNoWrapFlags());
2576
2577 State.set(this, ResultPtr, /*IsScalar*/ true);
2578}
2579
2580#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2582 VPSlotTracker &SlotTracker) const {
2583 O << Indent;
2585 O << " = vector-pointer ";
2586
2588}
2589#endif
2590
2592 VPCostContext &Ctx) const {
2593 // Handle cases where only the first lane is used the same way as the legacy
2594 // cost model.
2596 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2597
2598 Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2599 Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2600 return (getNumIncomingValues() - 1) *
2601 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2603}
2604
2605#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2607 VPSlotTracker &SlotTracker) const {
2608 O << Indent << "BLEND ";
2610 O << " =";
2611 if (getNumIncomingValues() == 1) {
2612 // Not a User of any mask: not really blending, this is a
2613 // single-predecessor phi.
2614 O << " ";
2616 } else {
2617 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
2618 O << " ";
2620 if (I == 0)
2621 continue;
2622 O << "/";
2624 }
2625 }
2626}
2627#endif
2628
2630 assert(!State.Lane && "Reduction being replicated.");
2631 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2634 "In-loop AnyOf reductions aren't currently supported");
2635 // Propagate the fast-math flags carried by the underlying instruction.
2638 Value *NewVecOp = State.get(getVecOp());
2639 if (VPValue *Cond = getCondOp()) {
2640 Value *NewCond = State.get(Cond, State.VF.isScalar());
2641 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
2642 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
2643
2644 Value *Start = getRecurrenceIdentity(Kind, ElementTy, getFastMathFlags());
2645 if (State.VF.isVector())
2646 Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
2647
2648 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2649 NewVecOp = Select;
2650 }
2651 Value *NewRed;
2652 Value *NextInChain;
2653 if (IsOrdered) {
2654 if (State.VF.isVector())
2655 NewRed =
2656 createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain);
2657 else
2658 NewRed = State.Builder.CreateBinOp(
2660 PrevInChain, NewVecOp);
2661 PrevInChain = NewRed;
2662 NextInChain = NewRed;
2663 } else {
2664 PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2665 NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind);
2667 NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2668 else
2669 NextInChain = State.Builder.CreateBinOp(
2671 PrevInChain, NewRed);
2672 }
2673 State.set(this, NextInChain, /*IsScalar*/ true);
2674}
2675
2677 assert(!State.Lane && "Reduction being replicated.");
2678
2679 auto &Builder = State.Builder;
2680 // Propagate the fast-math flags carried by the underlying instruction.
2681 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
2683
2685 Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
2686 Value *VecOp = State.get(getVecOp());
2687 Value *EVL = State.get(getEVL(), VPLane(0));
2688
2689 Value *Mask;
2690 if (VPValue *CondOp = getCondOp())
2691 Mask = State.get(CondOp);
2692 else
2693 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2694
2695 Value *NewRed;
2696 if (isOrdered()) {
2697 NewRed = createOrderedReduction(Builder, Kind, VecOp, Prev, Mask, EVL);
2698 } else {
2699 NewRed = createSimpleReduction(Builder, VecOp, Kind, Mask, EVL);
2701 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2702 else
2703 NewRed = Builder.CreateBinOp(
2705 Prev);
2706 }
2707 State.set(this, NewRed, /*IsScalar*/ true);
2708}
2709
2711 VPCostContext &Ctx) const {
2712 RecurKind RdxKind = getRecurrenceKind();
2713 Type *ElementTy = Ctx.Types.inferScalarType(this);
2714 auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
2715 unsigned Opcode = RecurrenceDescriptor::getOpcode(RdxKind);
2717 std::optional<FastMathFlags> OptionalFMF =
2718 ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt;
2719
2720 // TODO: Support any-of reductions.
2721 assert(
2723 ForceTargetInstructionCost.getNumOccurrences() > 0) &&
2724 "Any-of reduction not implemented in VPlan-based cost model currently.");
2725
2726 // Note that TTI should model the cost of moving result to the scalar register
2727 // and the BinOp cost in the getMinMaxReductionCost().
2730 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy, FMFs, Ctx.CostKind);
2731 }
2732
2733 // Note that TTI should model the cost of moving result to the scalar register
2734 // and the BinOp cost in the getArithmeticReductionCost().
2735 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2736 Ctx.CostKind);
2737}
2738
2740 ExpressionTypes ExpressionType,
2741 ArrayRef<VPSingleDefRecipe *> ExpressionRecipes)
2742 : VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}),
2743 ExpressionRecipes(SetVector<VPSingleDefRecipe *>(
2744 ExpressionRecipes.begin(), ExpressionRecipes.end())
2745 .takeVector()),
2746 ExpressionType(ExpressionType) {
2747 assert(!ExpressionRecipes.empty() && "Nothing to combine?");
2748 assert(
2749 none_of(ExpressionRecipes,
2750 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2751 "expression cannot contain recipes with side-effects");
2752
2753 // Maintain a copy of the expression recipes as a set of users.
2754 SmallPtrSet<VPUser *, 4> ExpressionRecipesAsSetOfUsers;
2755 for (auto *R : ExpressionRecipes)
2756 ExpressionRecipesAsSetOfUsers.insert(R);
2757
2758 // Recipes in the expression, except the last one, must only be used by
2759 // (other) recipes inside the expression. If there are other users, external
2760 // to the expression, use a clone of the recipe for external users.
2761 for (VPSingleDefRecipe *R : ExpressionRecipes) {
2762 if (R != ExpressionRecipes.back() &&
2763 any_of(R->users(), [&ExpressionRecipesAsSetOfUsers](VPUser *U) {
2764 return !ExpressionRecipesAsSetOfUsers.contains(U);
2765 })) {
2766 // There are users outside of the expression. Clone the recipe and use the
2767 // clone those external users.
2768 VPSingleDefRecipe *CopyForExtUsers = R->clone();
2769 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
2770 VPUser &U, unsigned) {
2771 return !ExpressionRecipesAsSetOfUsers.contains(&U);
2772 });
2773 CopyForExtUsers->insertBefore(R);
2774 }
2775 if (R->getParent())
2776 R->removeFromParent();
2777 }
2778
2779 // Internalize all external operands to the expression recipes. To do so,
2780 // create new temporary VPValues for all operands defined by a recipe outside
2781 // the expression. The original operands are added as operands of the
2782 // VPExpressionRecipe itself.
2783 for (auto *R : ExpressionRecipes) {
2784 for (const auto &[Idx, Op] : enumerate(R->operands())) {
2785 auto *Def = Op->getDefiningRecipe();
2786 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
2787 continue;
2788 addOperand(Op);
2789 LiveInPlaceholders.push_back(new VPValue());
2790 R->setOperand(Idx, LiveInPlaceholders.back());
2791 }
2792 }
2793}
2794
2796 for (auto *R : ExpressionRecipes)
2797 R->insertBefore(this);
2798
2799 for (const auto &[Idx, Op] : enumerate(operands()))
2800 LiveInPlaceholders[Idx]->replaceAllUsesWith(Op);
2801
2802 replaceAllUsesWith(ExpressionRecipes.back());
2803 ExpressionRecipes.clear();
2804}
2805
2807 VPCostContext &Ctx) const {
2808 Type *RedTy = Ctx.Types.inferScalarType(this);
2809 auto *SrcVecTy = cast<VectorType>(
2811 assert(RedTy->isIntegerTy() &&
2812 "VPExpressionRecipe only supports integer types currently.");
2813 unsigned Opcode = RecurrenceDescriptor::getOpcode(
2814 cast<VPReductionRecipe>(ExpressionRecipes.back())->getRecurrenceKind());
2815 switch (ExpressionType) {
2816 case ExpressionTypes::ExtendedReduction: {
2817 return Ctx.TTI.getExtendedReductionCost(
2818 Opcode,
2819 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
2820 Instruction::ZExt,
2821 RedTy, SrcVecTy, std::nullopt, Ctx.CostKind);
2822 }
2823 case ExpressionTypes::MulAccReduction:
2824 return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy,
2825 Ctx.CostKind);
2826
2827 case ExpressionTypes::ExtMulAccReduction:
2828 return Ctx.TTI.getMulAccReductionCost(
2829 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
2830 Instruction::ZExt,
2831 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
2832 }
2833 llvm_unreachable("Unknown VPExpressionRecipe::ExpressionTypes enum");
2834}
2835
2837 return any_of(ExpressionRecipes, [](VPSingleDefRecipe *R) {
2838 return R->mayReadFromMemory() || R->mayWriteToMemory();
2839 });
2840}
2841
2843 assert(
2844 none_of(ExpressionRecipes,
2845 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2846 "expression cannot contain recipes with side-effects");
2847 return false;
2848}
2849
2850#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2851
2853 VPSlotTracker &SlotTracker) const {
2854 O << Indent << "EXPRESSION ";
2856 O << " = ";
2857 auto *Red = cast<VPReductionRecipe>(ExpressionRecipes.back());
2858 unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
2859
2860 switch (ExpressionType) {
2861 case ExpressionTypes::ExtendedReduction: {
2863 O << " +";
2864 O << " reduce." << Instruction::getOpcodeName(Opcode) << " (";
2866 Red->printFlags(O);
2867
2868 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2869 O << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2870 << *Ext0->getResultType();
2871 if (Red->isConditional()) {
2872 O << ", ";
2873 Red->getCondOp()->printAsOperand(O, SlotTracker);
2874 }
2875 O << ")";
2876 break;
2877 }
2878 case ExpressionTypes::MulAccReduction:
2879 case ExpressionTypes::ExtMulAccReduction: {
2881 O << " + ";
2882 O << "reduce."
2884 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
2885 << " (";
2886 O << "mul";
2887 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
2888 auto *Mul = cast<VPWidenRecipe>(IsExtended ? ExpressionRecipes[2]
2889 : ExpressionRecipes[0]);
2890 Mul->printFlags(O);
2891 if (IsExtended)
2892 O << "(";
2894 if (IsExtended) {
2895 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2896 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2897 << *Ext0->getResultType() << "), (";
2898 } else {
2899 O << ", ";
2900 }
2902 if (IsExtended) {
2903 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
2904 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
2905 << *Ext1->getResultType() << ")";
2906 }
2907 if (Red->isConditional()) {
2908 O << ", ";
2909 Red->getCondOp()->printAsOperand(O, SlotTracker);
2910 }
2911 O << ")";
2912 break;
2913 }
2914 }
2915}
2916
2918 VPSlotTracker &SlotTracker) const {
2919 O << Indent << "REDUCE ";
2921 O << " = ";
2923 O << " +";
2924 printFlags(O);
2925 O << " reduce."
2928 << " (";
2930 if (isConditional()) {
2931 O << ", ";
2933 }
2934 O << ")";
2935}
2936
2938 VPSlotTracker &SlotTracker) const {
2939 O << Indent << "REDUCE ";
2941 O << " = ";
2943 O << " +";
2944 printFlags(O);
2945 O << " vp.reduce."
2948 << " (";
2950 O << ", ";
2952 if (isConditional()) {
2953 O << ", ";
2955 }
2956 O << ")";
2957}
2958
2959#endif
2960
2961/// A helper function to scalarize a single Instruction in the innermost loop.
2962/// Generates a sequence of scalar instances for lane \p Lane. Uses the VPValue
2963/// operands from \p RepRecipe instead of \p Instr's operands.
2964static void scalarizeInstruction(const Instruction *Instr,
2965 VPReplicateRecipe *RepRecipe,
2966 const VPLane &Lane, VPTransformState &State) {
2967 assert((!Instr->getType()->isAggregateType() ||
2968 canVectorizeTy(Instr->getType())) &&
2969 "Expected vectorizable or non-aggregate type.");
2970
2971 // Does this instruction return a value ?
2972 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2973
2974 Instruction *Cloned = Instr->clone();
2975 if (!IsVoidRetTy) {
2976 Cloned->setName(Instr->getName() + ".cloned");
2977 Type *ResultTy = State.TypeAnalysis.inferScalarType(RepRecipe);
2978 // The operands of the replicate recipe may have been narrowed, resulting in
2979 // a narrower result type. Update the type of the cloned instruction to the
2980 // correct type.
2981 if (ResultTy != Cloned->getType())
2982 Cloned->mutateType(ResultTy);
2983 }
2984
2985 RepRecipe->applyFlags(*Cloned);
2986 RepRecipe->applyMetadata(*Cloned);
2987
2988 if (RepRecipe->hasPredicate())
2989 cast<CmpInst>(Cloned)->setPredicate(RepRecipe->getPredicate());
2990
2991 if (auto DL = RepRecipe->getDebugLoc())
2992 State.setDebugLocFrom(DL);
2993
2994 // Replace the operands of the cloned instructions with their scalar
2995 // equivalents in the new loop.
2996 for (const auto &I : enumerate(RepRecipe->operands())) {
2997 auto InputLane = Lane;
2998 VPValue *Operand = I.value();
2999 if (vputils::isSingleScalar(Operand))
3000 InputLane = VPLane::getFirstLane();
3001 Cloned->setOperand(I.index(), State.get(Operand, InputLane));
3002 }
3003
3004 // Place the cloned scalar in the new loop.
3005 State.Builder.Insert(Cloned);
3006
3007 State.set(RepRecipe, Cloned, Lane);
3008
3009 // If we just cloned a new assumption, add it the assumption cache.
3010 if (auto *II = dyn_cast<AssumeInst>(Cloned))
3011 State.AC->registerAssumption(II);
3012
3013 assert(
3014 (RepRecipe->getParent()->getParent() ||
3015 !RepRecipe->getParent()->getPlan()->getVectorLoopRegion() ||
3016 all_of(RepRecipe->operands(),
3017 [](VPValue *Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
3018 "Expected a recipe is either within a region or all of its operands "
3019 "are defined outside the vectorized region.");
3020}
3021
3024
3025 if (!State.Lane) {
3026 assert(IsSingleScalar && "VPReplicateRecipes outside replicate regions "
3027 "must have already been unrolled");
3028 scalarizeInstruction(UI, this, VPLane(0), State);
3029 return;
3030 }
3031
3032 assert((State.VF.isScalar() || !isSingleScalar()) &&
3033 "uniform recipe shouldn't be predicated");
3034 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
3035 scalarizeInstruction(UI, this, *State.Lane, State);
3036 // Insert scalar instance packing it into a vector.
3037 if (State.VF.isVector() && shouldPack()) {
3038 Value *WideValue =
3039 State.Lane->isFirstLane()
3041 : State.get(this);
3042 State.set(this, State.packScalarIntoVectorizedValue(this, WideValue,
3043 *State.Lane));
3044 }
3045}
3046
3048 // Find if the recipe is used by a widened recipe via an intervening
3049 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
3050 return any_of(users(), [](const VPUser *U) {
3051 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
3052 return !vputils::onlyScalarValuesUsed(PredR);
3053 return false;
3054 });
3055}
3056
3058 VPCostContext &Ctx) const {
3059 Instruction *UI = cast<Instruction>(getUnderlyingValue());
3060 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
3061 // transform, avoid computing their cost multiple times for now.
3062 Ctx.SkipCostComputation.insert(UI);
3063
3064 switch (UI->getOpcode()) {
3065 case Instruction::GetElementPtr:
3066 // We mark this instruction as zero-cost because the cost of GEPs in
3067 // vectorized code depends on whether the corresponding memory instruction
3068 // is scalarized or not. Therefore, we handle GEPs with the memory
3069 // instruction cost.
3070 return 0;
3071 case Instruction::Call: {
3072 auto *CalledFn =
3073 cast<Function>(getOperand(getNumOperands() - 1)->getLiveInIRValue());
3074
3077 for (const VPValue *ArgOp : ArgOps)
3078 Tys.push_back(Ctx.Types.inferScalarType(ArgOp));
3079
3080 if (CalledFn->isIntrinsic())
3081 // Various pseudo-intrinsics with costs of 0 are scalarized instead of
3082 // vectorized via VPWidenIntrinsicRecipe. Return 0 for them early.
3083 switch (CalledFn->getIntrinsicID()) {
3084 case Intrinsic::assume:
3085 case Intrinsic::lifetime_end:
3086 case Intrinsic::lifetime_start:
3087 case Intrinsic::sideeffect:
3088 case Intrinsic::pseudoprobe:
3089 case Intrinsic::experimental_noalias_scope_decl: {
3090 assert(getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3091 ElementCount::getFixed(1), Ctx) == 0 &&
3092 "scalarizing intrinsic should be free");
3093 return InstructionCost(0);
3094 }
3095 default:
3096 break;
3097 }
3098
3099 Type *ResultTy = Ctx.Types.inferScalarType(this);
3100 InstructionCost ScalarCallCost =
3101 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3102 if (isSingleScalar()) {
3103 if (CalledFn->isIntrinsic())
3104 ScalarCallCost = std::min(
3105 ScalarCallCost,
3106 getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3107 ElementCount::getFixed(1), Ctx));
3108 return ScalarCallCost;
3109 }
3110
3111 if (VF.isScalable())
3113
3114 // Compute the cost of scalarizing the result and operands if needed.
3115 InstructionCost ScalarizationCost = 0;
3116 if (VF.isVector()) {
3117 if (!ResultTy->isVoidTy()) {
3118 for (Type *VectorTy :
3119 to_vector(getContainedTypes(toVectorizedTy(ResultTy, VF)))) {
3120 ScalarizationCost += Ctx.TTI.getScalarizationOverhead(
3121 cast<VectorType>(VectorTy), APInt::getAllOnes(VF.getFixedValue()),
3122 /*Insert=*/true,
3123 /*Extract=*/false, Ctx.CostKind);
3124 }
3125 }
3126 // Skip operands that do not require extraction/scalarization and do not
3127 // incur any overhead.
3128 SmallPtrSet<const VPValue *, 4> UniqueOperands;
3129 Tys.clear();
3130 for (auto *Op : ArgOps) {
3131 if (Op->isLiveIn() || isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op) ||
3132 !UniqueOperands.insert(Op).second)
3133 continue;
3135 }
3136 ScalarizationCost +=
3138 }
3139
3140 return ScalarCallCost * VF.getFixedValue() + ScalarizationCost;
3141 }
3142 case Instruction::Add:
3143 case Instruction::Sub:
3144 case Instruction::FAdd:
3145 case Instruction::FSub:
3146 case Instruction::Mul:
3147 case Instruction::FMul:
3148 case Instruction::FDiv:
3149 case Instruction::FRem:
3150 case Instruction::Shl:
3151 case Instruction::LShr:
3152 case Instruction::AShr:
3153 case Instruction::And:
3154 case Instruction::Or:
3155 case Instruction::Xor:
3156 case Instruction::ICmp:
3157 case Instruction::FCmp:
3159 Ctx) *
3160 (isSingleScalar() ? 1 : VF.getFixedValue());
3161 }
3162
3163 return Ctx.getLegacyCost(UI, VF);
3164}
3165
3166#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3168 VPSlotTracker &SlotTracker) const {
3169 O << Indent << (IsSingleScalar ? "CLONE " : "REPLICATE ");
3170
3171 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
3173 O << " = ";
3174 }
3175 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
3176 O << "call";
3177 printFlags(O);
3178 O << "@" << CB->getCalledFunction()->getName() << "(";
3180 O, [&O, &SlotTracker](VPValue *Op) {
3181 Op->printAsOperand(O, SlotTracker);
3182 });
3183 O << ")";
3184 } else {
3186 printFlags(O);
3188 }
3189
3190 if (shouldPack())
3191 O << " (S->V)";
3192}
3193#endif
3194
3196 assert(State.Lane && "Branch on Mask works only on single instance.");
3197
3198 VPValue *BlockInMask = getOperand(0);
3199 Value *ConditionBit = State.get(BlockInMask, *State.Lane);
3200
3201 // Replace the temporary unreachable terminator with a new conditional branch,
3202 // whose two destinations will be set later when they are created.
3203 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
3204 assert(isa<UnreachableInst>(CurrentTerminator) &&
3205 "Expected to replace unreachable terminator with conditional branch.");
3206 auto CondBr =
3207 State.Builder.CreateCondBr(ConditionBit, State.CFG.PrevBB, nullptr);
3208 CondBr->setSuccessor(0, nullptr);
3209 CurrentTerminator->eraseFromParent();
3210}
3211
3213 VPCostContext &Ctx) const {
3214 // The legacy cost model doesn't assign costs to branches for individual
3215 // replicate regions. Match the current behavior in the VPlan cost model for
3216 // now.
3217 return 0;
3218}
3219
3221 assert(State.Lane && "Predicated instruction PHI works per instance.");
3222 Instruction *ScalarPredInst =
3223 cast<Instruction>(State.get(getOperand(0), *State.Lane));
3224 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
3225 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
3226 assert(PredicatingBB && "Predicated block has no single predecessor.");
3227 assert(isa<VPReplicateRecipe>(getOperand(0)) &&
3228 "operand must be VPReplicateRecipe");
3229
3230 // By current pack/unpack logic we need to generate only a single phi node: if
3231 // a vector value for the predicated instruction exists at this point it means
3232 // the instruction has vector users only, and a phi for the vector value is
3233 // needed. In this case the recipe of the predicated instruction is marked to
3234 // also do that packing, thereby "hoisting" the insert-element sequence.
3235 // Otherwise, a phi node for the scalar value is needed.
3236 if (State.hasVectorValue(getOperand(0))) {
3237 Value *VectorValue = State.get(getOperand(0));
3238 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
3239 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
3240 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
3241 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
3242 if (State.hasVectorValue(this))
3243 State.reset(this, VPhi);
3244 else
3245 State.set(this, VPhi);
3246 // NOTE: Currently we need to update the value of the operand, so the next
3247 // predicated iteration inserts its generated value in the correct vector.
3248 State.reset(getOperand(0), VPhi);
3249 } else {
3250 if (vputils::onlyFirstLaneUsed(this) && !State.Lane->isFirstLane())
3251 return;
3252
3253 Type *PredInstType = State.TypeAnalysis.inferScalarType(getOperand(0));
3254 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
3255 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
3256 PredicatingBB);
3257 Phi->addIncoming(ScalarPredInst, PredicatedBB);
3258 if (State.hasScalarValue(this, *State.Lane))
3259 State.reset(this, Phi, *State.Lane);
3260 else
3261 State.set(this, Phi, *State.Lane);
3262 // NOTE: Currently we need to update the value of the operand, so the next
3263 // predicated iteration inserts its generated value in the correct vector.
3264 State.reset(getOperand(0), Phi, *State.Lane);
3265 }
3266}
3267
3268#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3270 VPSlotTracker &SlotTracker) const {
3271 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
3273 O << " = ";
3275}
3276#endif
3277
3279 VPCostContext &Ctx) const {
3281 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3282 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3283 ->getAddressSpace();
3284 unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
3285 ? Instruction::Load
3286 : Instruction::Store;
3287
3288 if (!Consecutive) {
3289 // TODO: Using the original IR may not be accurate.
3290 // Currently, ARM will use the underlying IR to calculate gather/scatter
3291 // instruction cost.
3292 assert(!Reverse &&
3293 "Inconsecutive memory access should not have the order.");
3294
3296 Type *PtrTy = Ptr->getType();
3297
3298 // If the address value is uniform across all lanes, then the address can be
3299 // calculated with scalar type and broadcast.
3301 PtrTy = toVectorTy(PtrTy, VF);
3302
3303 return Ctx.TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
3304 Ctx.CostKind) +
3305 Ctx.TTI.getGatherScatterOpCost(Opcode, Ty, Ptr, IsMasked, Alignment,
3306 Ctx.CostKind, &Ingredient);
3307 }
3308
3310 if (IsMasked) {
3311 Cost +=
3312 Ctx.TTI.getMaskedMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind);
3313 } else {
3315 isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this) ? getOperand(0)
3316 : getOperand(1));
3317 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind,
3318 OpInfo, &Ingredient);
3319 }
3320 if (!Reverse)
3321 return Cost;
3322
3323 return Cost += Ctx.TTI.getShuffleCost(
3324 TargetTransformInfo::SK_Reverse, cast<VectorType>(Ty),
3325 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3326}
3327
3329 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3330 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3331 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3332 bool CreateGather = !isConsecutive();
3333
3334 auto &Builder = State.Builder;
3335 Value *Mask = nullptr;
3336 if (auto *VPMask = getMask()) {
3337 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3338 // of a null all-one mask is a null mask.
3339 Mask = State.get(VPMask);
3340 if (isReverse())
3341 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3342 }
3343
3344 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
3345 Value *NewLI;
3346 if (CreateGather) {
3347 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
3348 "wide.masked.gather");
3349 } else if (Mask) {
3350 NewLI =
3351 Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
3352 PoisonValue::get(DataTy), "wide.masked.load");
3353 } else {
3354 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
3355 }
3356 applyMetadata(*cast<Instruction>(NewLI));
3357 if (Reverse)
3358 NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
3359 State.set(this, NewLI);
3360}
3361
3362#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3364 VPSlotTracker &SlotTracker) const {
3365 O << Indent << "WIDEN ";
3367 O << " = load ";
3369}
3370#endif
3371
3372/// Use all-true mask for reverse rather than actual mask, as it avoids a
3373/// dependence w/o affecting the result.
3375 Value *EVL, const Twine &Name) {
3376 VectorType *ValTy = cast<VectorType>(Operand->getType());
3377 Value *AllTrueMask =
3378 Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
3379 return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
3380 {Operand, AllTrueMask, EVL}, nullptr, Name);
3381}
3382
3384 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3385 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3386 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3387 bool CreateGather = !isConsecutive();
3388
3389 auto &Builder = State.Builder;
3390 CallInst *NewLI;
3391 Value *EVL = State.get(getEVL(), VPLane(0));
3392 Value *Addr = State.get(getAddr(), !CreateGather);
3393 Value *Mask = nullptr;
3394 if (VPValue *VPMask = getMask()) {
3395 Mask = State.get(VPMask);
3396 if (isReverse())
3397 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3398 } else {
3399 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3400 }
3401
3402 if (CreateGather) {
3403 NewLI =
3404 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3405 nullptr, "wide.masked.gather");
3406 } else {
3407 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3408 {Addr, Mask, EVL}, nullptr, "vp.op.load");
3409 }
3410 NewLI->addParamAttr(
3411 0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
3412 applyMetadata(*NewLI);
3413 Instruction *Res = NewLI;
3414 if (isReverse())
3415 Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
3416 State.set(this, Res);
3417}
3418
3420 VPCostContext &Ctx) const {
3421 if (!Consecutive || IsMasked)
3422 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3423
3424 // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
3425 // here because the EVL recipes using EVL to replace the tail mask. But in the
3426 // legacy model, it will always calculate the cost of mask.
3427 // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
3428 // don't need to compare to the legacy cost model.
3430 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3431 unsigned AS = getLoadStoreAddressSpace(&Ingredient);
3433 Instruction::Load, Ty, Alignment, AS, Ctx.CostKind);
3434 if (!Reverse)
3435 return Cost;
3436
3437 return Cost + Ctx.TTI.getShuffleCost(
3438 TargetTransformInfo::SK_Reverse, cast<VectorType>(Ty),
3439 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3440}
3441
3442#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3444 VPSlotTracker &SlotTracker) const {
3445 O << Indent << "WIDEN ";
3447 O << " = vp.load ";
3449}
3450#endif
3451
3453 VPValue *StoredVPValue = getStoredValue();
3454 bool CreateScatter = !isConsecutive();
3455 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3456
3457 auto &Builder = State.Builder;
3458
3459 Value *Mask = nullptr;
3460 if (auto *VPMask = getMask()) {
3461 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3462 // of a null all-one mask is a null mask.
3463 Mask = State.get(VPMask);
3464 if (isReverse())
3465 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3466 }
3467
3468 Value *StoredVal = State.get(StoredVPValue);
3469 if (isReverse()) {
3470 // If we store to reverse consecutive memory locations, then we need
3471 // to reverse the order of elements in the stored value.
3472 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
3473 // We don't want to update the value in the map as it might be used in
3474 // another expression. So don't call resetVectorValue(StoredVal).
3475 }
3476 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
3477 Instruction *NewSI = nullptr;
3478 if (CreateScatter)
3479 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
3480 else if (Mask)
3481 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
3482 else
3483 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
3484 applyMetadata(*NewSI);
3485}
3486
3487#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3489 VPSlotTracker &SlotTracker) const {
3490 O << Indent << "WIDEN store ";
3492}
3493#endif
3494
3496 VPValue *StoredValue = getStoredValue();
3497 bool CreateScatter = !isConsecutive();
3498 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3499
3500 auto &Builder = State.Builder;
3501
3502 CallInst *NewSI = nullptr;
3503 Value *StoredVal = State.get(StoredValue);
3504 Value *EVL = State.get(getEVL(), VPLane(0));
3505 if (isReverse())
3506 StoredVal = createReverseEVL(Builder, StoredVal, EVL, "vp.reverse");
3507 Value *Mask = nullptr;
3508 if (VPValue *VPMask = getMask()) {
3509 Mask = State.get(VPMask);
3510 if (isReverse())
3511 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3512 } else {
3513 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3514 }
3515 Value *Addr = State.get(getAddr(), !CreateScatter);
3516 if (CreateScatter) {
3517 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3518 Intrinsic::vp_scatter,
3519 {StoredVal, Addr, Mask, EVL});
3520 } else {
3521 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3522 Intrinsic::vp_store,
3523 {StoredVal, Addr, Mask, EVL});
3524 }
3525 NewSI->addParamAttr(
3526 1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
3527 applyMetadata(*NewSI);
3528}
3529
3531 VPCostContext &Ctx) const {
3532 if (!Consecutive || IsMasked)
3533 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3534
3535 // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
3536 // here because the EVL recipes using EVL to replace the tail mask. But in the
3537 // legacy model, it will always calculate the cost of mask.
3538 // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
3539 // don't need to compare to the legacy cost model.
3541 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3542 unsigned AS = getLoadStoreAddressSpace(&Ingredient);
3544 Instruction::Store, Ty, Alignment, AS, Ctx.CostKind);
3545 if (!Reverse)
3546 return Cost;
3547
3548 return Cost + Ctx.TTI.getShuffleCost(
3549 TargetTransformInfo::SK_Reverse, cast<VectorType>(Ty),
3550 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3551}
3552
3553#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3555 VPSlotTracker &SlotTracker) const {
3556 O << Indent << "WIDEN vp.store ";
3558}
3559#endif
3560
3562 VectorType *DstVTy, const DataLayout &DL) {
3563 // Verify that V is a vector type with same number of elements as DstVTy.
3564 auto VF = DstVTy->getElementCount();
3565 auto *SrcVecTy = cast<VectorType>(V->getType());
3566 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
3567 Type *SrcElemTy = SrcVecTy->getElementType();
3568 Type *DstElemTy = DstVTy->getElementType();
3569 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3570 "Vector elements must have same size");
3571
3572 // Do a direct cast if element types are castable.
3573 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3574 return Builder.CreateBitOrPointerCast(V, DstVTy);
3575 }
3576 // V cannot be directly casted to desired vector type.
3577 // May happen when V is a floating point vector but DstVTy is a vector of
3578 // pointers or vice-versa. Handle this using a two-step bitcast using an
3579 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3580 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3581 "Only one type should be a pointer type");
3582 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3583 "Only one type should be a floating point type");
3584 Type *IntTy =
3585 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3586 auto *VecIntTy = VectorType::get(IntTy, VF);
3587 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3588 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3589}
3590
3591/// Return a vector containing interleaved elements from multiple
3592/// smaller input vectors.
3594 const Twine &Name) {
3595 unsigned Factor = Vals.size();
3596 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
3597
3598 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
3599#ifndef NDEBUG
3600 for (Value *Val : Vals)
3601 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
3602#endif
3603
3604 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
3605 // must use intrinsics to interleave.
3606 if (VecTy->isScalableTy()) {
3607 assert(Factor <= 8 && "Unsupported interleave factor for scalable vectors");
3608 return Builder.CreateVectorInterleave(Vals, Name);
3609 }
3610
3611 // Fixed length. Start by concatenating all vectors into a wide vector.
3612 Value *WideVec = concatenateVectors(Builder, Vals);
3613
3614 // Interleave the elements into the wide vector.
3615 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3616 return Builder.CreateShuffleVector(
3617 WideVec, createInterleaveMask(NumElts, Factor), Name);
3618}
3619
3620// Try to vectorize the interleave group that \p Instr belongs to.
3621//
3622// E.g. Translate following interleaved load group (factor = 3):
3623// for (i = 0; i < N; i+=3) {
3624// R = Pic[i]; // Member of index 0
3625// G = Pic[i+1]; // Member of index 1
3626// B = Pic[i+2]; // Member of index 2
3627// ... // do something to R, G, B
3628// }
3629// To:
3630// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
3631// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
3632// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
3633// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
3634//
3635// Or translate following interleaved store group (factor = 3):
3636// for (i = 0; i < N; i+=3) {
3637// ... do something to R, G, B
3638// Pic[i] = R; // Member of index 0
3639// Pic[i+1] = G; // Member of index 1
3640// Pic[i+2] = B; // Member of index 2
3641// }
3642// To:
3643// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
3644// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
3645// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
3646// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
3647// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
3649 assert(!State.Lane && "Interleave group being replicated.");
3650 assert((!needsMaskForGaps() || !State.VF.isScalable()) &&
3651 "Masking gaps for scalable vectors is not yet supported.");
3653 Instruction *Instr = Group->getInsertPos();
3654
3655 // Prepare for the vector type of the interleaved load/store.
3656 Type *ScalarTy = getLoadStoreType(Instr);
3657 unsigned InterleaveFactor = Group->getFactor();
3658 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
3659
3660 VPValue *BlockInMask = getMask();
3661 VPValue *Addr = getAddr();
3662 Value *ResAddr = State.get(Addr, VPLane(0));
3663
3664 auto CreateGroupMask = [&BlockInMask, &State,
3665 &InterleaveFactor](Value *MaskForGaps) -> Value * {
3666 if (State.VF.isScalable()) {
3667 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
3668 assert(InterleaveFactor <= 8 &&
3669 "Unsupported deinterleave factor for scalable vectors");
3670 auto *ResBlockInMask = State.get(BlockInMask);
3671 SmallVector<Value *> Ops(InterleaveFactor, ResBlockInMask);
3672 return interleaveVectors(State.Builder, Ops, "interleaved.mask");
3673 }
3674
3675 if (!BlockInMask)
3676 return MaskForGaps;
3677
3678 Value *ResBlockInMask = State.get(BlockInMask);
3679 Value *ShuffledMask = State.Builder.CreateShuffleVector(
3680 ResBlockInMask,
3681 createReplicatedMask(InterleaveFactor, State.VF.getFixedValue()),
3682 "interleaved.mask");
3683 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
3684 ShuffledMask, MaskForGaps)
3685 : ShuffledMask;
3686 };
3687
3688 const DataLayout &DL = Instr->getDataLayout();
3689 // Vectorize the interleaved load group.
3690 if (isa<LoadInst>(Instr)) {
3691 Value *MaskForGaps = nullptr;
3692 if (needsMaskForGaps()) {
3693 MaskForGaps =
3694 createBitMaskForGaps(State.Builder, State.VF.getFixedValue(), *Group);
3695 assert(MaskForGaps && "Mask for Gaps is required but it is null");
3696 }
3697
3698 Instruction *NewLoad;
3699 if (BlockInMask || MaskForGaps) {
3700 Value *GroupMask = CreateGroupMask(MaskForGaps);
3701 Value *PoisonVec = PoisonValue::get(VecTy);
3702 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
3703 Group->getAlign(), GroupMask,
3704 PoisonVec, "wide.masked.vec");
3705 } else
3706 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
3707 Group->getAlign(), "wide.vec");
3708 applyMetadata(*NewLoad);
3709 // TODO: Also manage existing metadata using VPIRMetadata.
3710 Group->addMetadata(NewLoad);
3711
3713 if (VecTy->isScalableTy()) {
3714 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
3715 // so must use intrinsics to deinterleave.
3716 assert(InterleaveFactor <= 8 &&
3717 "Unsupported deinterleave factor for scalable vectors");
3718 NewLoad = State.Builder.CreateIntrinsic(
3719 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
3720 NewLoad->getType(), NewLoad,
3721 /*FMFSource=*/nullptr, "strided.vec");
3722 }
3723
3724 auto CreateStridedVector = [&InterleaveFactor, &State,
3725 &NewLoad](unsigned Index) -> Value * {
3726 assert(Index < InterleaveFactor && "Illegal group index");
3727 if (State.VF.isScalable())
3728 return State.Builder.CreateExtractValue(NewLoad, Index);
3729
3730 // For fixed length VF, use shuffle to extract the sub-vectors from the
3731 // wide load.
3732 auto StrideMask =
3733 createStrideMask(Index, InterleaveFactor, State.VF.getFixedValue());
3734 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
3735 "strided.vec");
3736 };
3737
3738 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
3739 Instruction *Member = Group->getMember(I);
3740
3741 // Skip the gaps in the group.
3742 if (!Member)
3743 continue;
3744
3745 Value *StridedVec = CreateStridedVector(I);
3746
3747 // If this member has different type, cast the result type.
3748 if (Member->getType() != ScalarTy) {
3749 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
3750 StridedVec =
3751 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
3752 }
3753
3754 if (Group->isReverse())
3755 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
3756
3757 State.set(VPDefs[J], StridedVec);
3758 ++J;
3759 }
3760 return;
3761 }
3762
3763 // The sub vector type for current instruction.
3764 auto *SubVT = VectorType::get(ScalarTy, State.VF);
3765
3766 // Vectorize the interleaved store group.
3767 Value *MaskForGaps =
3768 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
3769 assert(((MaskForGaps != nullptr) == needsMaskForGaps()) &&
3770 "Mismatch between NeedsMaskForGaps and MaskForGaps");
3771 ArrayRef<VPValue *> StoredValues = getStoredValues();
3772 // Collect the stored vector from each member.
3773 SmallVector<Value *, 4> StoredVecs;
3774 unsigned StoredIdx = 0;
3775 for (unsigned i = 0; i < InterleaveFactor; i++) {
3776 assert((Group->getMember(i) || MaskForGaps) &&
3777 "Fail to get a member from an interleaved store group");
3778 Instruction *Member = Group->getMember(i);
3779
3780 // Skip the gaps in the group.
3781 if (!Member) {
3782 Value *Undef = PoisonValue::get(SubVT);
3783 StoredVecs.push_back(Undef);
3784 continue;
3785 }
3786
3787 Value *StoredVec = State.get(StoredValues[StoredIdx]);
3788 ++StoredIdx;
3789
3790 if (Group->isReverse())
3791 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
3792
3793 // If this member has different type, cast it to a unified type.
3794
3795 if (StoredVec->getType() != SubVT)
3796 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
3797
3798 StoredVecs.push_back(StoredVec);
3799 }
3800
3801 // Interleave all the smaller vectors into one wider vector.
3802 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
3803 Instruction *NewStoreInstr;
3804 if (BlockInMask || MaskForGaps) {
3805 Value *GroupMask = CreateGroupMask(MaskForGaps);
3806 NewStoreInstr = State.Builder.CreateMaskedStore(
3807 IVec, ResAddr, Group->getAlign(), GroupMask);
3808 } else
3809 NewStoreInstr =
3810 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
3811
3812 applyMetadata(*NewStoreInstr);
3813 // TODO: Also manage existing metadata using VPIRMetadata.
3814 Group->addMetadata(NewStoreInstr);
3815}
3816
3817#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3819 VPSlotTracker &SlotTracker) const {
3821 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
3822 IG->getInsertPos()->printAsOperand(O, false);
3823 O << ", ";
3825 VPValue *Mask = getMask();
3826 if (Mask) {
3827 O << ", ";
3828 Mask->printAsOperand(O, SlotTracker);
3829 }
3830
3831 unsigned OpIdx = 0;
3832 for (unsigned i = 0; i < IG->getFactor(); ++i) {
3833 if (!IG->getMember(i))
3834 continue;
3835 if (getNumStoreOperands() > 0) {
3836 O << "\n" << Indent << " store ";
3838 O << " to index " << i;
3839 } else {
3840 O << "\n" << Indent << " ";
3842 O << " = load from index " << i;
3843 }
3844 ++OpIdx;
3845 }
3846}
3847#endif
3848
3850 assert(!State.Lane && "Interleave group being replicated.");
3851 assert(State.VF.isScalable() &&
3852 "Only support scalable VF for EVL tail-folding.");
3854 "Masking gaps for scalable vectors is not yet supported.");
3856 Instruction *Instr = Group->getInsertPos();
3857
3858 // Prepare for the vector type of the interleaved load/store.
3859 Type *ScalarTy = getLoadStoreType(Instr);
3860 unsigned InterleaveFactor = Group->getFactor();
3861 assert(InterleaveFactor <= 8 &&
3862 "Unsupported deinterleave/interleave factor for scalable vectors");
3863 ElementCount WideVF = State.VF * InterleaveFactor;
3864 auto *VecTy = VectorType::get(ScalarTy, WideVF);
3865
3866 VPValue *Addr = getAddr();
3867 Value *ResAddr = State.get(Addr, VPLane(0));
3868 Value *EVL = State.get(getEVL(), VPLane(0));
3869 Value *InterleaveEVL = State.Builder.CreateMul(
3870 EVL, ConstantInt::get(EVL->getType(), InterleaveFactor), "interleave.evl",
3871 /* NUW= */ true, /* NSW= */ true);
3872 LLVMContext &Ctx = State.Builder.getContext();
3873
3874 Value *GroupMask = nullptr;
3875 if (VPValue *BlockInMask = getMask()) {
3876 SmallVector<Value *> Ops(InterleaveFactor, State.get(BlockInMask));
3877 GroupMask = interleaveVectors(State.Builder, Ops, "interleaved.mask");
3878 } else {
3879 GroupMask =
3880 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
3881 }
3882
3883 // Vectorize the interleaved load group.
3884 if (isa<LoadInst>(Instr)) {
3885 CallInst *NewLoad = State.Builder.CreateIntrinsic(
3886 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL}, nullptr,
3887 "wide.vp.load");
3888 NewLoad->addParamAttr(0,
3889 Attribute::getWithAlignment(Ctx, Group->getAlign()));
3890
3891 applyMetadata(*NewLoad);
3892 // TODO: Also manage existing metadata using VPIRMetadata.
3893 Group->addMetadata(NewLoad);
3894
3895 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
3896 // so must use intrinsics to deinterleave.
3897 NewLoad = State.Builder.CreateIntrinsic(
3898 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
3899 NewLoad->getType(), NewLoad,
3900 /*FMFSource=*/nullptr, "strided.vec");
3901
3902 const DataLayout &DL = Instr->getDataLayout();
3903 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
3904 Instruction *Member = Group->getMember(I);
3905 // Skip the gaps in the group.
3906 if (!Member)
3907 continue;
3908
3909 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad, I);
3910 // If this member has different type, cast the result type.
3911 if (Member->getType() != ScalarTy) {
3912 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
3913 StridedVec =
3914 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
3915 }
3916
3917 State.set(getVPValue(J), StridedVec);
3918 ++J;
3919 }
3920 return;
3921 } // End for interleaved load.
3922
3923 // The sub vector type for current instruction.
3924 auto *SubVT = VectorType::get(ScalarTy, State.VF);
3925 // Vectorize the interleaved store group.
3926 ArrayRef<VPValue *> StoredValues = getStoredValues();
3927 // Collect the stored vector from each member.
3928 SmallVector<Value *, 4> StoredVecs;
3929 const DataLayout &DL = Instr->getDataLayout();
3930 for (unsigned I = 0, StoredIdx = 0; I < InterleaveFactor; I++) {
3931 Instruction *Member = Group->getMember(I);
3932 // Skip the gaps in the group.
3933 if (!Member) {
3934 StoredVecs.push_back(PoisonValue::get(SubVT));
3935 continue;
3936 }
3937
3938 Value *StoredVec = State.get(StoredValues[StoredIdx]);
3939 // If this member has different type, cast it to a unified type.
3940 if (StoredVec->getType() != SubVT)
3941 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
3942
3943 StoredVecs.push_back(StoredVec);
3944 ++StoredIdx;
3945 }
3946
3947 // Interleave all the smaller vectors into one wider vector.
3948 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
3949 CallInst *NewStore =
3950 State.Builder.CreateIntrinsic(Type::getVoidTy(Ctx), Intrinsic::vp_store,
3951 {IVec, ResAddr, GroupMask, InterleaveEVL});
3952 NewStore->addParamAttr(1,
3953 Attribute::getWithAlignment(Ctx, Group->getAlign()));
3954
3955 applyMetadata(*NewStore);
3956 // TODO: Also manage existing metadata using VPIRMetadata.
3957 Group->addMetadata(NewStore);
3958}
3959
3960#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3962 VPSlotTracker &SlotTracker) const {
3964 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
3965 IG->getInsertPos()->printAsOperand(O, false);
3966 O << ", ";
3968 O << ", ";
3970 if (VPValue *Mask = getMask()) {
3971 O << ", ";
3972 Mask->printAsOperand(O, SlotTracker);
3973 }
3974
3975 unsigned OpIdx = 0;
3976 for (unsigned i = 0; i < IG->getFactor(); ++i) {
3977 if (!IG->getMember(i))
3978 continue;
3979 if (getNumStoreOperands() > 0) {
3980 O << "\n" << Indent << " vp.store ";
3982 O << " to index " << i;
3983 } else {
3984 O << "\n" << Indent << " ";
3986 O << " = vp.load from index " << i;
3987 }
3988 ++OpIdx;
3989 }
3990}
3991#endif
3992
3994 VPCostContext &Ctx) const {
3995 Instruction *InsertPos = getInsertPos();
3996 // Find the VPValue index of the interleave group. We need to skip gaps.
3997 unsigned InsertPosIdx = 0;
3998 for (unsigned Idx = 0; IG->getFactor(); ++Idx)
3999 if (auto *Member = IG->getMember(Idx)) {
4000 if (Member == InsertPos)
4001 break;
4002 InsertPosIdx++;
4003 }
4004 Type *ValTy = Ctx.Types.inferScalarType(
4005 getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
4006 : getStoredValues()[InsertPosIdx]);
4007 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4008 unsigned AS = getLoadStoreAddressSpace(InsertPos);
4009
4010 unsigned InterleaveFactor = IG->getFactor();
4011 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4012
4013 // Holds the indices of existing members in the interleaved group.
4015 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4016 if (IG->getMember(IF))
4017 Indices.push_back(IF);
4018
4019 // Calculate the cost of the whole interleaved group.
4021 InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices,
4022 IG->getAlign(), AS, Ctx.CostKind, getMask(), NeedsMaskForGaps);
4023
4024 if (!IG->isReverse())
4025 return Cost;
4026
4027 return Cost + IG->getNumMembers() *
4029 VectorTy, VectorTy, {}, Ctx.CostKind,
4030 0);
4031}
4032
4033#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4035 VPSlotTracker &SlotTracker) const {
4036 O << Indent << "EMIT ";
4038 O << " = CANONICAL-INDUCTION ";
4040}
4041#endif
4042
4044 return IsScalarAfterVectorization &&
4045 (!IsScalable || vputils::onlyFirstLaneUsed(this));
4046}
4047
4048#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4050 VPSlotTracker &SlotTracker) const {
4051 assert((getNumOperands() == 3 || getNumOperands() == 5) &&
4052 "unexpected number of operands");
4053 O << Indent << "EMIT ";
4055 O << " = WIDEN-POINTER-INDUCTION ";
4057 O << ", ";
4059 O << ", ";
4061 if (getNumOperands() == 5) {
4062 O << ", ";
4064 O << ", ";
4066 }
4067}
4068
4070 VPSlotTracker &SlotTracker) const {
4071 O << Indent << "EMIT ";
4073 O << " = EXPAND SCEV " << *Expr;
4074}
4075#endif
4076
4078 Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
4079 Type *STy = CanonicalIV->getType();
4080 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4081 ElementCount VF = State.VF;
4082 Value *VStart = VF.isScalar()
4083 ? CanonicalIV
4084 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
4085 Value *VStep = createStepForVF(Builder, STy, VF, getUnrollPart(*this));
4086 if (VF.isVector()) {
4087 VStep = Builder.CreateVectorSplat(VF, VStep);
4088 VStep =
4089 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
4090 }
4091 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
4092 State.set(this, CanonicalVectorIV);
4093}
4094
4095#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4097 VPSlotTracker &SlotTracker) const {
4098 O << Indent << "EMIT ";
4100 O << " = WIDEN-CANONICAL-INDUCTION ";
4102}
4103#endif
4104
4106 auto &Builder = State.Builder;
4107 // Create a vector from the initial value.
4108 auto *VectorInit = getStartValue()->getLiveInIRValue();
4109
4110 Type *VecTy = State.VF.isScalar()
4111 ? VectorInit->getType()
4112 : VectorType::get(VectorInit->getType(), State.VF);
4113
4114 BasicBlock *VectorPH =
4115 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4116 if (State.VF.isVector()) {
4117 auto *IdxTy = Builder.getInt32Ty();
4118 auto *One = ConstantInt::get(IdxTy, 1);
4119 IRBuilder<>::InsertPointGuard Guard(Builder);
4120 Builder.SetInsertPoint(VectorPH->getTerminator());
4121 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
4122 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4123 VectorInit = Builder.CreateInsertElement(
4124 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
4125 }
4126
4127 // Create a phi node for the new recurrence.
4128 PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
4129 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4130 Phi->addIncoming(VectorInit, VectorPH);
4131 State.set(this, Phi);
4132}
4133
4136 VPCostContext &Ctx) const {
4137 if (VF.isScalar())
4138 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4139
4140 return 0;
4141}
4142
4143#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4145 VPSlotTracker &SlotTracker) const {
4146 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
4148 O << " = phi ";
4150}
4151#endif
4152
4154 // Reductions do not have to start at zero. They can start with
4155 // any loop invariant values.
4156 VPValue *StartVPV = getStartValue();
4157
4158 // In order to support recurrences we need to be able to vectorize Phi nodes.
4159 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4160 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4161 // this value when we vectorize all of the instructions that use the PHI.
4162 BasicBlock *VectorPH =
4163 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4164 bool ScalarPHI = State.VF.isScalar() || IsInLoop;
4165 Value *StartV = State.get(StartVPV, ScalarPHI);
4166 Type *VecTy = StartV->getType();
4167
4168 BasicBlock *HeaderBB = State.CFG.PrevBB;
4169 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4170 "recipe must be in the vector loop header");
4171 auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
4172 Phi->insertBefore(HeaderBB->getFirstInsertionPt());
4173 State.set(this, Phi, IsInLoop);
4174
4175 Phi->addIncoming(StartV, VectorPH);
4176}
4177
4178#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4180 VPSlotTracker &SlotTracker) const {
4181 O << Indent << "WIDEN-REDUCTION-PHI ";
4182
4184 O << " = phi ";
4186 if (VFScaleFactor != 1)
4187 O << " (VF scaled by 1/" << VFScaleFactor << ")";
4188}
4189#endif
4190
4192 Value *Op0 = State.get(getOperand(0));
4193 Type *VecTy = Op0->getType();
4194 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4195 State.set(this, VecPhi);
4196}
4197
4198#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4200 VPSlotTracker &SlotTracker) const {
4201 O << Indent << "WIDEN-PHI ";
4202
4204 O << " = phi ";
4206}
4207#endif
4208
4209// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
4210// remove VPActiveLaneMaskPHIRecipe.
4212 BasicBlock *VectorPH =
4213 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4214 Value *StartMask = State.get(getOperand(0));
4215 PHINode *Phi =
4216 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
4217 Phi->addIncoming(StartMask, VectorPH);
4218 State.set(this, Phi);
4219}
4220
4221#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4223 VPSlotTracker &SlotTracker) const {
4224 O << Indent << "ACTIVE-LANE-MASK-PHI ";
4225
4227 O << " = phi ";
4229}
4230#endif
4231
4232#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4234 VPSlotTracker &SlotTracker) const {
4235 O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
4236
4238 O << " = phi ";
4240}
4241#endif
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:404
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
std::string Name
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Common GEP
loop Loop Strength Reduction
This file provides a LoopVectorizationPlanner class.
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition: Debug.h:119
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file contains the declarations of different VPlan-related auxiliary helpers.
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static InstructionCost getCostForIntrinsics(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost for the intrinsic ID with Operands, produced by R.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
static Type * getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride, unsigned CurrentPart, IRBuilderBase &Builder)
static void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPLane &Lane, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
static Constant * getSignedIntOrFpConstant(Type *Ty, int64_t C)
A helper function that returns an integer or floating-point constant with value C.
static BranchInst * createCondBranch(Value *Cond, VPBasicBlock *VPBB, VPTransformState &State)
Create a conditional branch using Cond branching to the successors of VPBB.
This file contains the declarations of the Vectorization Plan base classes:
Value * RHS
static const uint32_t IV[8]
Definition: blake3_impl.h:83
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
Definition: Attributes.cpp:234
LLVM Basic Block Representation.
Definition: BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:393
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:337
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:437
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:252
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:248
Conditional or Unconditional Branch instruction.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Definition: InstrTypes.h:1506
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:703
@ ICMP_EQ
equal
Definition: InstrTypes.h:699
static LLVM_ABI StringRef getPredicateName(Predicate P)
This is the shared class of boolean and integer constants.
Definition: Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:131
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:124
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:327
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition: TypeSize.h:315
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:312
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:323
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:22
void setAllowContract(bool B=true)
Definition: FMF.h:90
bool noSignedZeros() const
Definition: FMF.h:67
bool noInfs() const
Definition: FMF.h:66
void setAllowReciprocal(bool B=true)
Definition: FMF.h:87
bool allowReciprocal() const
Definition: FMF.h:68
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
Definition: Operator.cpp:271
void setNoSignedZeros(bool B=true)
Definition: FMF.h:84
bool allowReassoc() const
Flag queries.
Definition: FMF.h:64
bool approxFunc() const
Definition: FMF.h:70
void setNoNaNs(bool B=true)
Definition: FMF.h:78
void setAllowReassoc(bool B=true)
Flag setters.
Definition: FMF.h:75
bool noNaNs() const
Definition: FMF.h:65
void setApproxFunc(bool B=true)
Definition: FMF.h:93
void setNoInfs(bool B=true)
Definition: FMF.h:81
bool allowContract() const
Definition: FMF.h:69
Class to represent function types.
Definition: DerivedTypes.h:105
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:137
ArrayRef< Type * > params() const
Definition: DerivedTypes.h:132
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:209
bool willReturn() const
Determine if the function will return.
Definition: Function.h:661
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:270
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition: Function.h:594
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:214
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
bool isInBounds() const
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:114
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition: IRBuilder.h:497
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2571
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2625
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2155
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2559
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1864
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2100
LLVM_ABI Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
Definition: IRBuilder.cpp:1087
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1115
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2618
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:502
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:488
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1005
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2637
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2036
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Definition: IRBuilder.h:2238
Value * CreateVScale(Type *Ty, const Twine &Name="")
Create a call to llvm.vscale.<Ty>().
Definition: IRBuilder.h:958
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:201
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:345
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:567
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Definition: IRBuilder.cpp:1071
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2333
Value * CreateFCmpFMF(CmpInst::Predicate P, Value *LHS, Value *RHS, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2457
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1923
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:527
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1781
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:378
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:834
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:522
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2286
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2463
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2494
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1805
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2329
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Definition: IRBuilder.h:172
Value * CreateCountTrailingZeroElems(Type *ResTy, Value *Mask, bool ZeroIsPoison=true, const Twine &Name="")
Create a call to llvm.experimental_cttz_elts.
Definition: IRBuilder.h:1134
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1420
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1197
LLVM_ABI Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
Definition: IRBuilder.cpp:922
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2082
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2593
LLVMContext & getContext() const
Definition: IRBuilder.h:203
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:508
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition: IRBuilder.h:507
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2508
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:605
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1708
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1725
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2341
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:207
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1883
LLVM_ABI Value * CreateVectorInterleave(ArrayRef< Value * > Ops, const Twine &Name="")
Definition: IRBuilder.cpp:1135
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2439
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition: IRBuilder.h:1573
LLVM_ABI Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Definition: IRBuilder.cpp:137
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2115
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1437
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Definition: IRBuilder.cpp:569
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
Definition: IRBuilder.cpp:538
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2780
This instruction inserts a single (scalar) element into a VectorType value.
VectorType * getType() const
Overload to return most specific vector type.
static InstructionCost getInvalid(CostType Val=0)
bool isCast() const
Definition: Instruction.h:321
bool isBinaryOp() const
Definition: Instruction.h:317
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
Definition: Instruction.h:314
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:312
bool isUnaryOp() const
Definition: Instruction.h:316
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:319
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:524
uint32_t getFactor() const
Definition: VectorUtils.h:540
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:594
bool isReverse() const
Definition: VectorUtils.h:539
InstTy * getInsertPos() const
Definition: VectorUtils.h:610
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
Definition: VectorUtils.h:541
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
BlockT * getHeader() const
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
std::pair< MDNode *, MDNode * > getNoAliasMetadataFor(const Instruction *OrigInst) const
Returns a pair containing the alias_scope and noalias metadata nodes for OrigInst,...
LLVM_ABI void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
Root of the metadata hierarchy.
Definition: Metadata.h:63
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1885
static bool isSignedRecurrenceKind(RecurKind Kind)
Returns true if recurrece kind is a signed redux kind.
unsigned getOpcode() const
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindLastIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
const SDValue & getOperand(unsigned Num) const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:59
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:93
This class provides computation of slot numbers for LLVM Assembly writing.
Definition: AsmWriter.cpp:757
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:401
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:476
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:541
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:938
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr) const
LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing operands with the given types.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:273
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:267
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:261
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:352
value_op_iterator value_op_end()
Definition: User.h:313
void setOperand(unsigned i, Value *Val)
Definition: User.h:237
Value * getOperand(unsigned i) const
Definition: User.h:232
value_op_iterator value_op_begin()
Definition: User.h:310
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:3745
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition: VPlan.h:3798
iterator end()
Definition: VPlan.h:3782
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:3811
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition: VPlan.h:2419
VPValue * getMask(unsigned Idx) const
Return mask number Idx.
Definition: VPlan.h:2424
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
Definition: VPlan.h:2414
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:81
VPRegionBlock * getParent()
Definition: VPlan.h:173
const VPBasicBlock * getExitingBasicBlock() const
Definition: VPlan.cpp:180
const VPBlocksTy & getPredecessors() const
Definition: VPlan.h:204
VPlan * getPlan()
Definition: VPlan.cpp:155
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
Definition: VPlan.h:356
const VPBlocksTy & getSuccessors() const
Definition: VPlan.h:198
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
Definition: VPlanValue.h:300
void dump() const
Dump the VPDef to stderr (for debugging).
Definition: VPlan.cpp:116
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition: VPlanValue.h:422
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:417
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:395
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition: VPlanValue.h:407
unsigned getVPDefID() const
Definition: VPlanValue.h:427
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:3622
VPValue * getStartValue() const
Definition: VPlan.h:3621
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this header phi recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:2001
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Definition: VPlan.h:1707
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Class to record and manage LLVM IR flags.
Definition: VPlan.h:600
FastMathFlagsTy FMFs
Definition: VPlan.h:664
bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
WrapFlagsTy WrapFlags
Definition: VPlan.h:658
void printFlags(raw_ostream &O) const
GEPNoWrapFlags GEPFlags
Definition: VPlan.h:662
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition: VPlan.h:816
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
TruncFlagsTy TruncFlags
Definition: VPlan.h:659
CmpInst::Predicate getPredicate() const
Definition: VPlan.h:798
ExactFlagsTy ExactFlags
Definition: VPlan.h:661
bool hasNoSignedWrap() const
Definition: VPlan.h:840
GEPNoWrapFlags getGEPNoWrapFlags() const
Definition: VPlan.h:810
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
Definition: VPlan.h:813
DisjointFlagsTy DisjointFlags
Definition: VPlan.h:660
bool hasNoUnsignedWrap() const
Definition: VPlan.h:829
NonNegFlagsTy NonNegFlags
Definition: VPlan.h:663
void applyFlags(Instruction &I) const
Apply the IR flags to I.
Definition: VPlan.h:761
A recipe to wrap on original IR instruction not to be modified during execution, except for PHIs.
Definition: VPlan.h:1342
Instruction & getInstruction() const
Definition: VPlan.h:1373
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void extractLastLaneOfFirstOperand(VPBuilder &Builder)
Update the recipes first operand to the last lane of the operand using Builder.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Helper to manage IR metadata for recipes.
Definition: VPlan.h:935
void intersect(const VPIRMetadata &MD)
Intersect this VPIRMetada object with MD, keeping only metadata nodes that are common to both.
void applyMetadata(Instruction &I) const
Add all metadata to I.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Definition: VPlan.h:1098
bool hasResult() const
Definition: VPlan.h:1137
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
Definition: VPlan.h:1053
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition: VPlan.h:1009
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition: VPlan.h:1043
@ ExtractPenultimateElement
Definition: VPlan.h:1019
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition: VPlan.h:1056
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:982
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition: VPlan.h:1047
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition: VPlan.h:1006
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition: VPlan.h:1003
@ VScale
Returns the value for vscale.
Definition: VPlan.h:1058
@ CanonicalIVIncrementForPart
Definition: VPlan.h:996
@ CalculateTripCountMinusVF
Definition: VPlan.h:994
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
Definition: VPlan.h:1177
unsigned getOpcode() const
Definition: VPlan.h:1117
bool onlyFirstPartUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
Definition: VPlan.h:2523
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
Definition: VPlan.h:2527
const InterleaveGroup< Instruction > * getInterleaveGroup() const
Definition: VPlan.h:2525
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2517
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition: VPlan.h:2546
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:2511
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition: VPlan.h:2620
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition: VPlan.h:2639
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition: VPlan.h:2590
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
Definition: VPlanHelpers.h:125
static VPLane getLastLaneForVF(const ElementCount &VF)
Definition: VPlanHelpers.h:166
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
Definition: VPlanHelpers.h:152
static VPLane getFirstLane()
Definition: VPlanHelpers.h:150
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPPartialReductionRecipe.
unsigned getOpcode() const
Get the binary op's opcode.
Definition: VPlan.h:2777
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
Definition: VPlan.h:1263
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
Definition: VPlan.h:3889
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition: VPlan.h:1288
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
Definition: VPlan.h:1255
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:394
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
Definition: VPlan.h:415
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:482
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition: VPlan.h:2822
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition: VPlan.h:2719
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition: VPlan.h:2723
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition: VPlan.h:2725
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
Definition: VPlan.h:2715
bool isOrdered() const
Return true if the in-loop reduction is ordered.
Definition: VPlan.h:2717
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition: VPlan.h:2721
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition: VPlan.h:2837
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
Definition: VPlan.h:2882
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
unsigned getOpcode() const
Definition: VPlan.h:2911
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:3687
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition: VPlan.h:521
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:586
LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
This class can be used to assign names to VPValues.
Definition: VPlanHelpers.h:382
LLVMContext & getContext()
Return the LLVMContext used by the analysis.
Definition: VPlanAnalysis.h:67
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
Definition: VPlan.h:923
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:197
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition: VPlan.cpp:1449
operand_range operands()
Definition: VPlanValue.h:265
void setOperand(unsigned I, VPValue *New)
Definition: VPlanValue.h:241
unsigned getNumOperands() const
Definition: VPlanValue.h:235
operand_iterator op_begin()
Definition: VPlanValue.h:261
VPValue * getOperand(unsigned N) const
Definition: VPlanValue.h:236
virtual bool onlyFirstLaneUsed(const VPValue *Op) const
Returns true if the VPUser only uses the first lane of operand Op.
Definition: VPlanValue.h:280
void addOperand(VPValue *Operand)
Definition: VPlanValue.h:230
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
Definition: VPlan.cpp:1403
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:125
friend class VPExpressionRecipe
Definition: VPlanValue.h:53
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition: VPlan.cpp:1445
bool hasMoreThanOneUniqueUser() const
Returns true if the value has more than one unique user.
Definition: VPlanValue.h:140
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:174
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition: VPlan.cpp:1406
user_iterator user_begin()
Definition: VPlanValue.h:130
unsigned getNumUsers() const
Definition: VPlanValue.h:113
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
Definition: VPlanValue.h:169
user_range users()
Definition: VPlanValue.h:134
VPDef * Def
Pointer to the VPDef that defines this VPValue.
Definition: VPlanValue.h:65
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
operand_range args()
Definition: VPlan.h:1664
Function * getCalledScalarFunction() const
Definition: VPlan.h:1660
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition: VPlan.h:1533
void execute(VPTransformState &State) override
Produce widened copies of the cast.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
VPValue * getStepValue()
Returns the step value of the induction.
Definition: VPlan.h:2057
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition: VPlan.h:2168
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:2177
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
Definition: VPlan.h:1598
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Return the scalar return type of the intrinsic.
Definition: VPlan.h:1601
void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
Definition: VPlan.h:3125
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
Definition: VPlan.h:3122
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition: VPlan.h:3162
Instruction & Ingredient
Definition: VPlan.h:3116
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
Definition: VPlan.h:3119
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:3176
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:3169
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
Definition: VPlan.h:3166
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition: VPlan.h:1437
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
Definition: VPlan.h:1470
unsigned getUF() const
Definition: VPlan.h:4265
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.cpp:1040
LLVM Value Representation.
Definition: Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:390
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1101
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition: Value.h:838
bool hasName() const
Definition: Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
Base class of all SIMD vector types.
Definition: DerivedTypes.h:430
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:695
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
Definition: DerivedTypes.h:463
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition: TypeSize.h:255
const ParentTy * getParent() const
Definition: ilist_node.h:34
self_iterator getIterator()
Definition: ilist_node.h:134
iterator erase(iterator where)
Definition: ilist.h:204
pointer remove(iterator &IT)
Definition: ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:126
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:751
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
Definition: Intrinsics.cpp:44
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition: VPlanUtils.h:44
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
Definition: VPlanUtils.cpp:22
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlanUtils.cpp:17
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
Definition: VPlanUtils.cpp:27
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
Definition: LoopUtils.cpp:1313
@ Offset
Definition: DWP.cpp:477
LLVM_ABI Value * createFindLastIVReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind, Value *Start, Value *Sentinel)
Create a reduction of the given vector Src for a reduction of the kind RecurKind::FindLastIV.
Definition: LoopUtils.cpp:1247
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1744
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
Definition: LoopUtils.cpp:1023
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2491
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition: STLExtras.h:2250
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
Definition: LoopUtils.cpp:1116
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Definition: SmallVector.h:1300
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:345
@ Other
Any other memory.
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:34
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
Definition: LoopUtils.cpp:1305
DWARFExpression::Operation Op
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1916
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
Definition: LoopUtils.cpp:1366
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
InstructionCost Cost
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI Value * createAnyOfReduction(IRBuilderBase &B, Value *Src, Value *InitVal, PHINode *OrigPhi)
Create a reduction of the given vector Src for a reduction of kind RecurKind::AnyOf.
Definition: LoopUtils.cpp:1217
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Struct to hold various analysis needed for cost computations.
Definition: VPlanHelpers.h:344
LLVMContext & LLVMCtx
Definition: VPlanHelpers.h:348
TargetTransformInfo::OperandValueInfo getOperandInfo(VPValue *V) const
Returns the OperandInfo for V, if it is a live-in.
Definition: VPlan.cpp:1633
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
TargetTransformInfo::TargetCostKind CostKind
Definition: VPlanHelpers.h:351
VPTypeAnalysis Types
Definition: VPlanHelpers.h:347
const TargetLibraryInfo & TLI
Definition: VPlanHelpers.h:346
const TargetTransformInfo & TTI
Definition: VPlanHelpers.h:345
SmallPtrSet< Instruction *, 8 > SkipCostComputation
Definition: VPlanHelpers.h:350
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
Definition: VPlan.h:1410
PHINode & getIRPhi()
Definition: VPlan.h:1418
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
Definition: VPlan.h:869
std::optional< InstructionCost > getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
BasicBlock * PrevBB
The previous IR BasicBlock created or used.
Definition: VPlanHelpers.h:303
SmallDenseMap< const VPBasicBlock *, BasicBlock * > VPBB2IRBB
A mapping of each VPBasicBlock to the corresponding BasicBlock.
Definition: VPlanHelpers.h:311
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
Definition: VPlanHelpers.h:205
void reset(const VPValue *Def, Value *V)
Reset an existing vector value for Def and a given Part.
Definition: VPlanHelpers.h:261
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
Definition: VPlanHelpers.h:337
struct llvm::VPTransformState::CFGState CFG
Value * get(const VPValue *Def, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def if IsScalar is false, otherwise return the gen...
Definition: VPlan.cpp:283
std::optional< VPLane > Lane
Hold the index to generate specific scalar instructions.
Definition: VPlanHelpers.h:219
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
Definition: VPlanHelpers.h:328
bool hasScalarValue(const VPValue *Def, VPLane Lane)
Definition: VPlanHelpers.h:240
const TargetTransformInfo * TTI
Target Transform Info.
Definition: VPlanHelpers.h:211
void set(const VPValue *Def, Value *V, bool IsScalar=false)
Set the generated vector Value for a given VPValue, if IsScalar is false.
Definition: VPlanHelpers.h:250
bool hasVectorValue(const VPValue *Def)
Definition: VPlanHelpers.h:236
ElementCount VF
The chosen Vectorization Factor of the loop being vectorized.
Definition: VPlanHelpers.h:214
Value * packScalarIntoVectorizedValue(const VPValue *Def, Value *WideValue, const VPLane &Lane)
Insert the scalar value of Def at Lane into Lane of WideValue and return the resulting value.
Definition: VPlan.cpp:393
AssumptionCache * AC
Hold a pointer to AssumptionCache to register new assumptions after replicating assume calls.
Definition: VPlanHelpers.h:325
void setDebugLocFrom(DebugLoc DL)
Set the debug location in the builder using the debug location DL.
Definition: VPlan.cpp:371
Loop * CurrentParentLoop
The parent loop object for the current scope, or nullptr.
Definition: VPlanHelpers.h:334
void execute(VPTransformState &State) override
Generate the wide load or gather.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition: VPlan.h:3249
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isInvariantCond() const
Definition: VPlan.h:1753
VPValue * getCond() const
Definition: VPlan.h:1749
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenSelectRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the select instruction.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
Definition: VPlan.h:3330
void execute(VPTransformState &State) override
Generate the wide store or scatter.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition: VPlan.h:3333
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition: VPlan.h:3294
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.