LLVM 22.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanHelpers.h"
18#include "VPlanPatternMatch.h"
19#include "VPlanUtils.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Twine.h"
26#include "llvm/IR/BasicBlock.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/Instruction.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/Type.h"
32#include "llvm/IR/Value.h"
35#include "llvm/Support/Debug.h"
40#include <cassert>
41
42using namespace llvm;
43
45
46#define LV_NAME "loop-vectorize"
47#define DEBUG_TYPE LV_NAME
48
50 switch (getVPDefID()) {
51 case VPExpressionSC:
52 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
53 case VPInstructionSC:
54 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
55 case VPInterleaveEVLSC:
56 case VPInterleaveSC:
57 return cast<VPInterleaveBase>(this)->getNumStoreOperands() > 0;
58 case VPWidenStoreEVLSC:
59 case VPWidenStoreSC:
60 return true;
61 case VPReplicateSC:
62 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
63 ->mayWriteToMemory();
64 case VPWidenCallSC:
65 return !cast<VPWidenCallRecipe>(this)
66 ->getCalledScalarFunction()
67 ->onlyReadsMemory();
68 case VPWidenIntrinsicSC:
69 return cast<VPWidenIntrinsicRecipe>(this)->mayWriteToMemory();
70 case VPCanonicalIVPHISC:
71 case VPBranchOnMaskSC:
72 case VPFirstOrderRecurrencePHISC:
73 case VPReductionPHISC:
74 case VPScalarIVStepsSC:
75 case VPPredInstPHISC:
76 return false;
77 case VPBlendSC:
78 case VPReductionEVLSC:
79 case VPReductionSC:
80 case VPVectorPointerSC:
81 case VPWidenCanonicalIVSC:
82 case VPWidenCastSC:
83 case VPWidenGEPSC:
84 case VPWidenIntOrFpInductionSC:
85 case VPWidenLoadEVLSC:
86 case VPWidenLoadSC:
87 case VPWidenPHISC:
88 case VPWidenSC:
89 case VPWidenSelectSC: {
90 const Instruction *I =
91 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
92 (void)I;
93 assert((!I || !I->mayWriteToMemory()) &&
94 "underlying instruction may write to memory");
95 return false;
96 }
97 default:
98 return true;
99 }
100}
101
103 switch (getVPDefID()) {
104 case VPExpressionSC:
105 return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
106 case VPInstructionSC:
107 return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
108 case VPWidenLoadEVLSC:
109 case VPWidenLoadSC:
110 return true;
111 case VPReplicateSC:
112 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
113 ->mayReadFromMemory();
114 case VPWidenCallSC:
115 return !cast<VPWidenCallRecipe>(this)
116 ->getCalledScalarFunction()
117 ->onlyWritesMemory();
118 case VPWidenIntrinsicSC:
119 return cast<VPWidenIntrinsicRecipe>(this)->mayReadFromMemory();
120 case VPBranchOnMaskSC:
121 case VPFirstOrderRecurrencePHISC:
122 case VPPredInstPHISC:
123 case VPScalarIVStepsSC:
124 case VPWidenStoreEVLSC:
125 case VPWidenStoreSC:
126 return false;
127 case VPBlendSC:
128 case VPReductionEVLSC:
129 case VPReductionSC:
130 case VPVectorPointerSC:
131 case VPWidenCanonicalIVSC:
132 case VPWidenCastSC:
133 case VPWidenGEPSC:
134 case VPWidenIntOrFpInductionSC:
135 case VPWidenPHISC:
136 case VPWidenSC:
137 case VPWidenSelectSC: {
138 const Instruction *I =
139 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
140 (void)I;
141 assert((!I || !I->mayReadFromMemory()) &&
142 "underlying instruction may read from memory");
143 return false;
144 }
145 default:
146 // FIXME: Return false if the recipe represents an interleaved store.
147 return true;
148 }
149}
150
152 switch (getVPDefID()) {
153 case VPExpressionSC:
154 return cast<VPExpressionRecipe>(this)->mayHaveSideEffects();
155 case VPDerivedIVSC:
156 case VPFirstOrderRecurrencePHISC:
157 case VPPredInstPHISC:
158 case VPVectorEndPointerSC:
159 return false;
160 case VPInstructionSC:
161 return mayWriteToMemory();
162 case VPWidenCallSC: {
163 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
164 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
165 }
166 case VPWidenIntrinsicSC:
167 return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
168 case VPBlendSC:
169 case VPReductionEVLSC:
170 case VPReductionSC:
171 case VPScalarIVStepsSC:
172 case VPVectorPointerSC:
173 case VPWidenCanonicalIVSC:
174 case VPWidenCastSC:
175 case VPWidenGEPSC:
176 case VPWidenIntOrFpInductionSC:
177 case VPWidenPHISC:
178 case VPWidenPointerInductionSC:
179 case VPWidenSC:
180 case VPWidenSelectSC: {
181 const Instruction *I =
182 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
183 (void)I;
184 assert((!I || !I->mayHaveSideEffects()) &&
185 "underlying instruction has side-effects");
186 return false;
187 }
188 case VPInterleaveEVLSC:
189 case VPInterleaveSC:
190 return mayWriteToMemory();
191 case VPWidenLoadEVLSC:
192 case VPWidenLoadSC:
193 case VPWidenStoreEVLSC:
194 case VPWidenStoreSC:
195 assert(
196 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
198 "mayHaveSideffects result for ingredient differs from this "
199 "implementation");
200 return mayWriteToMemory();
201 case VPReplicateSC: {
202 auto *R = cast<VPReplicateRecipe>(this);
203 return R->getUnderlyingInstr()->mayHaveSideEffects();
204 }
205 default:
206 return true;
207 }
208}
209
211 assert(!Parent && "Recipe already in some VPBasicBlock");
212 assert(InsertPos->getParent() &&
213 "Insertion position not in any VPBasicBlock");
214 InsertPos->getParent()->insert(this, InsertPos->getIterator());
215}
216
217void VPRecipeBase::insertBefore(VPBasicBlock &BB,
219 assert(!Parent && "Recipe already in some VPBasicBlock");
220 assert(I == BB.end() || I->getParent() == &BB);
221 BB.insert(this, I);
222}
223
225 assert(!Parent && "Recipe already in some VPBasicBlock");
226 assert(InsertPos->getParent() &&
227 "Insertion position not in any VPBasicBlock");
228 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
229}
230
232 assert(getParent() && "Recipe not in any VPBasicBlock");
234 Parent = nullptr;
235}
236
238 assert(getParent() && "Recipe not in any VPBasicBlock");
240}
241
244 insertAfter(InsertPos);
245}
246
252
254 // Get the underlying instruction for the recipe, if there is one. It is used
255 // to
256 // * decide if cost computation should be skipped for this recipe,
257 // * apply forced target instruction cost.
258 Instruction *UI = nullptr;
259 if (auto *S = dyn_cast<VPSingleDefRecipe>(this))
260 UI = dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
261 else if (auto *IG = dyn_cast<VPInterleaveBase>(this))
262 UI = IG->getInsertPos();
263 else if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(this))
264 UI = &WidenMem->getIngredient();
265
266 InstructionCost RecipeCost;
267 if (UI && Ctx.skipCostComputation(UI, VF.isVector())) {
268 RecipeCost = 0;
269 } else {
270 RecipeCost = computeCost(VF, Ctx);
271 if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
272 RecipeCost.isValid())
274 }
275
276 LLVM_DEBUG({
277 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
278 dump();
279 });
280 return RecipeCost;
281}
282
284 VPCostContext &Ctx) const {
285 llvm_unreachable("subclasses should implement computeCost");
286}
287
289 return (getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC) ||
291}
292
294 auto *VPI = dyn_cast<VPInstruction>(this);
295 return VPI && Instruction::isCast(VPI->getOpcode());
296}
297
300 VPCostContext &Ctx) const {
301 std::optional<unsigned> Opcode;
302 VPValue *Op = getOperand(0);
303 VPRecipeBase *OpR = Op->getDefiningRecipe();
304
305 // If the partial reduction is predicated, a select will be operand 0
306 using namespace llvm::VPlanPatternMatch;
308 OpR = Op->getDefiningRecipe();
309 }
310
311 Type *InputTypeA = nullptr, *InputTypeB = nullptr;
313 ExtBType = TTI::PR_None;
314
315 auto GetExtendKind = [](VPRecipeBase *R) {
316 if (!R)
317 return TTI::PR_None;
318 auto *WidenCastR = dyn_cast<VPWidenCastRecipe>(R);
319 if (!WidenCastR)
320 return TTI::PR_None;
321 if (WidenCastR->getOpcode() == Instruction::CastOps::ZExt)
322 return TTI::PR_ZeroExtend;
323 if (WidenCastR->getOpcode() == Instruction::CastOps::SExt)
324 return TTI::PR_SignExtend;
325 return TTI::PR_None;
326 };
327
328 // Pick out opcode, type/ext information and use sub side effects from a widen
329 // recipe.
330 auto HandleWiden = [&](VPWidenRecipe *Widen) {
331 if (match(Widen, m_Sub(m_ZeroInt(), m_VPValue(Op)))) {
332 Widen = dyn_cast<VPWidenRecipe>(Op->getDefiningRecipe());
333 }
334 Opcode = Widen->getOpcode();
335 VPRecipeBase *ExtAR = Widen->getOperand(0)->getDefiningRecipe();
336 VPRecipeBase *ExtBR = Widen->getOperand(1)->getDefiningRecipe();
337 InputTypeA = Ctx.Types.inferScalarType(ExtAR ? ExtAR->getOperand(0)
338 : Widen->getOperand(0));
339 InputTypeB = Ctx.Types.inferScalarType(ExtBR ? ExtBR->getOperand(0)
340 : Widen->getOperand(1));
341 ExtAType = GetExtendKind(ExtAR);
342 ExtBType = GetExtendKind(ExtBR);
343 };
344
345 if (isa<VPWidenCastRecipe>(OpR)) {
346 InputTypeA = Ctx.Types.inferScalarType(OpR->getOperand(0));
347 ExtAType = GetExtendKind(OpR);
348 } else if (isa<VPReductionPHIRecipe>(OpR)) {
349 auto RedPhiOp1R = getOperand(1)->getDefiningRecipe();
350 if (isa<VPWidenCastRecipe>(RedPhiOp1R)) {
351 InputTypeA = Ctx.Types.inferScalarType(RedPhiOp1R->getOperand(0));
352 ExtAType = GetExtendKind(RedPhiOp1R);
353 } else if (auto Widen = dyn_cast<VPWidenRecipe>(RedPhiOp1R))
354 HandleWiden(Widen);
355 } else if (auto Widen = dyn_cast<VPWidenRecipe>(OpR)) {
356 HandleWiden(Widen);
357 } else if (auto Reduction = dyn_cast<VPPartialReductionRecipe>(OpR)) {
358 return Reduction->computeCost(VF, Ctx);
359 }
360 auto *PhiType = Ctx.Types.inferScalarType(getOperand(1));
361 return Ctx.TTI.getPartialReductionCost(getOpcode(), InputTypeA, InputTypeB,
362 PhiType, VF, ExtAType, ExtBType,
363 Opcode, Ctx.CostKind);
364}
365
367 auto &Builder = State.Builder;
368
369 assert(getOpcode() == Instruction::Add &&
370 "Unhandled partial reduction opcode");
371
372 Value *BinOpVal = State.get(getOperand(1));
373 Value *PhiVal = State.get(getOperand(0));
374 assert(PhiVal && BinOpVal && "Phi and Mul must be set");
375
376 Type *RetTy = PhiVal->getType();
377
378 CallInst *V =
379 Builder.CreateIntrinsic(RetTy, Intrinsic::vector_partial_reduce_add,
380 {PhiVal, BinOpVal}, nullptr, "partial.reduce");
381
382 State.set(this, V);
383}
384
385#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
387 VPSlotTracker &SlotTracker) const {
388 O << Indent << "PARTIAL-REDUCE ";
390 O << " = " << Instruction::getOpcodeName(getOpcode()) << " ";
392}
393#endif
394
396 assert(OpType == Other.OpType && "OpType must match");
397 switch (OpType) {
398 case OperationType::OverflowingBinOp:
399 WrapFlags.HasNUW &= Other.WrapFlags.HasNUW;
400 WrapFlags.HasNSW &= Other.WrapFlags.HasNSW;
401 break;
402 case OperationType::Trunc:
403 TruncFlags.HasNUW &= Other.TruncFlags.HasNUW;
404 TruncFlags.HasNSW &= Other.TruncFlags.HasNSW;
405 break;
406 case OperationType::DisjointOp:
407 DisjointFlags.IsDisjoint &= Other.DisjointFlags.IsDisjoint;
408 break;
409 case OperationType::PossiblyExactOp:
410 ExactFlags.IsExact &= Other.ExactFlags.IsExact;
411 break;
412 case OperationType::GEPOp:
413 GEPFlags &= Other.GEPFlags;
414 break;
415 case OperationType::FPMathOp:
416 FMFs.NoNaNs &= Other.FMFs.NoNaNs;
417 FMFs.NoInfs &= Other.FMFs.NoInfs;
418 break;
419 case OperationType::NonNegOp:
420 NonNegFlags.NonNeg &= Other.NonNegFlags.NonNeg;
421 break;
422 case OperationType::Cmp:
423 assert(CmpPredicate == Other.CmpPredicate && "Cannot drop CmpPredicate");
424 break;
425 case OperationType::Other:
426 assert(AllFlags == Other.AllFlags && "Cannot drop other flags");
427 break;
428 }
429}
430
432 assert(OpType == OperationType::FPMathOp &&
433 "recipe doesn't have fast math flags");
434 FastMathFlags Res;
435 Res.setAllowReassoc(FMFs.AllowReassoc);
436 Res.setNoNaNs(FMFs.NoNaNs);
437 Res.setNoInfs(FMFs.NoInfs);
438 Res.setNoSignedZeros(FMFs.NoSignedZeros);
439 Res.setAllowReciprocal(FMFs.AllowReciprocal);
440 Res.setAllowContract(FMFs.AllowContract);
441 Res.setApproxFunc(FMFs.ApproxFunc);
442 return Res;
443}
444
445#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
447#endif
448
449template <unsigned PartOpIdx>
450VPValue *
452 if (U.getNumOperands() == PartOpIdx + 1)
453 return U.getOperand(PartOpIdx);
454 return nullptr;
455}
456
457template <unsigned PartOpIdx>
459 if (auto *UnrollPartOp = getUnrollPartOperand(U))
460 return cast<ConstantInt>(UnrollPartOp->getLiveInIRValue())->getZExtValue();
461 return 0;
462}
463
464namespace llvm {
465template class VPUnrollPartAccessor<1>;
466template class VPUnrollPartAccessor<2>;
467template class VPUnrollPartAccessor<3>;
468}
469
471 const VPIRFlags &Flags, DebugLoc DL,
472 const Twine &Name)
473 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, Flags, DL),
474 VPIRMetadata(), Opcode(Opcode), Name(Name.str()) {
476 "Set flags not supported for the provided opcode");
477 assert((getNumOperandsForOpcode(Opcode) == -1u ||
478 getNumOperandsForOpcode(Opcode) == getNumOperands()) &&
479 "number of operands does not match opcode");
480}
481
482#ifndef NDEBUG
483unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) {
484 if (Instruction::isUnaryOp(Opcode) || Instruction::isCast(Opcode))
485 return 1;
486
487 if (Instruction::isBinaryOp(Opcode))
488 return 2;
489
490 switch (Opcode) {
493 return 0;
494 case Instruction::Alloca:
495 case Instruction::ExtractValue:
496 case Instruction::Freeze:
497 case Instruction::Load:
509 return 1;
510 case Instruction::ICmp:
511 case Instruction::FCmp:
512 case Instruction::Store:
520 return 2;
521 case Instruction::Select:
525 return 3;
527 return 4;
528 case Instruction::Call:
529 case Instruction::GetElementPtr:
530 case Instruction::PHI:
531 case Instruction::Switch:
532 // Cannot determine the number of operands from the opcode.
533 return -1u;
534 }
535 llvm_unreachable("all cases should be handled above");
536}
537#endif
538
542
543bool VPInstruction::canGenerateScalarForFirstLane() const {
545 return true;
547 return true;
548 switch (Opcode) {
549 case Instruction::Freeze:
550 case Instruction::ICmp:
551 case Instruction::PHI:
552 case Instruction::Select:
561 return true;
562 default:
563 return false;
564 }
565}
566
567/// Create a conditional branch using \p Cond branching to the successors of \p
568/// VPBB. Note that the first successor is always forward (i.e. not created yet)
569/// while the second successor may already have been created (if it is a header
570/// block and VPBB is a latch).
572 VPTransformState &State) {
573 // Replace the temporary unreachable terminator with a new conditional
574 // branch, hooking it up to backward destination (header) for latch blocks
575 // now, and to forward destination(s) later when they are created.
576 // Second successor may be backwards - iff it is already in VPBB2IRBB.
577 VPBasicBlock *SecondVPSucc = cast<VPBasicBlock>(VPBB->getSuccessors()[1]);
578 BasicBlock *SecondIRSucc = State.CFG.VPBB2IRBB.lookup(SecondVPSucc);
579 BasicBlock *IRBB = State.CFG.VPBB2IRBB[VPBB];
580 BranchInst *CondBr = State.Builder.CreateCondBr(Cond, IRBB, SecondIRSucc);
581 // First successor is always forward, reset it to nullptr
582 CondBr->setSuccessor(0, nullptr);
584 return CondBr;
585}
586
587Value *VPInstruction::generate(VPTransformState &State) {
588 IRBuilderBase &Builder = State.Builder;
589
591 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
592 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
593 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
594 auto *Res =
595 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
596 if (auto *I = dyn_cast<Instruction>(Res))
597 applyFlags(*I);
598 return Res;
599 }
600
601 switch (getOpcode()) {
602 case VPInstruction::Not: {
603 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
604 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
605 return Builder.CreateNot(A, Name);
606 }
607 case Instruction::ExtractElement: {
608 assert(State.VF.isVector() && "Only extract elements from vectors");
609 if (getOperand(1)->isLiveIn()) {
610 unsigned IdxToExtract =
611 cast<ConstantInt>(getOperand(1)->getLiveInIRValue())->getZExtValue();
612 return State.get(getOperand(0), VPLane(IdxToExtract));
613 }
614 Value *Vec = State.get(getOperand(0));
615 Value *Idx = State.get(getOperand(1), /*IsScalar=*/true);
616 return Builder.CreateExtractElement(Vec, Idx, Name);
617 }
618 case Instruction::Freeze: {
620 return Builder.CreateFreeze(Op, Name);
621 }
622 case Instruction::FCmp:
623 case Instruction::ICmp: {
624 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
625 Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
626 Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
627 return Builder.CreateCmp(getPredicate(), A, B, Name);
628 }
629 case Instruction::PHI: {
630 llvm_unreachable("should be handled by VPPhi::execute");
631 }
632 case Instruction::Select: {
633 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
634 Value *Cond = State.get(getOperand(0), OnlyFirstLaneUsed);
635 Value *Op1 = State.get(getOperand(1), OnlyFirstLaneUsed);
636 Value *Op2 = State.get(getOperand(2), OnlyFirstLaneUsed);
637 return Builder.CreateSelect(Cond, Op1, Op2, Name);
638 }
640 // Get first lane of vector induction variable.
641 Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
642 // Get the original loop tripcount.
643 Value *ScalarTC = State.get(getOperand(1), VPLane(0));
644
645 // If this part of the active lane mask is scalar, generate the CMP directly
646 // to avoid unnecessary extracts.
647 if (State.VF.isScalar())
648 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
649 Name);
650
651 auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
652 auto PredTy = VectorType::get(
653 Int1Ty, State.VF * cast<ConstantInt>(getOperand(2)->getLiveInIRValue())
654 ->getZExtValue());
655 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
656 {PredTy, ScalarTC->getType()},
657 {VIVElem0, ScalarTC}, nullptr, Name);
658 }
660 // Generate code to combine the previous and current values in vector v3.
661 //
662 // vector.ph:
663 // v_init = vector(..., ..., ..., a[-1])
664 // br vector.body
665 //
666 // vector.body
667 // i = phi [0, vector.ph], [i+4, vector.body]
668 // v1 = phi [v_init, vector.ph], [v2, vector.body]
669 // v2 = a[i, i+1, i+2, i+3];
670 // v3 = vector(v1(3), v2(0, 1, 2))
671
672 auto *V1 = State.get(getOperand(0));
673 if (!V1->getType()->isVectorTy())
674 return V1;
675 Value *V2 = State.get(getOperand(1));
676 return Builder.CreateVectorSplice(V1, V2, -1, Name);
677 }
679 unsigned UF = getParent()->getPlan()->getUF();
680 Value *ScalarTC = State.get(getOperand(0), VPLane(0));
681 Value *Step = createStepForVF(Builder, ScalarTC->getType(), State.VF, UF);
682 Value *Sub = Builder.CreateSub(ScalarTC, Step);
683 Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
684 Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
685 return Builder.CreateSelect(Cmp, Sub, Zero);
686 }
688 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
689 // be outside of the main loop.
690 Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
691 // Compute EVL
692 assert(AVL->getType()->isIntegerTy() &&
693 "Requested vector length should be an integer.");
694
695 assert(State.VF.isScalable() && "Expected scalable vector factor.");
696 Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
697
698 Value *EVL = State.Builder.CreateIntrinsic(
699 State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
700 {AVL, VFArg, State.Builder.getTrue()});
701 return EVL;
702 }
704 unsigned Part = getUnrollPart(*this);
705 auto *IV = State.get(getOperand(0), VPLane(0));
706 assert(Part != 0 && "Must have a positive part");
707 // The canonical IV is incremented by the vectorization factor (num of
708 // SIMD elements) times the unroll part.
709 Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
710 return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
712 }
714 Value *Cond = State.get(getOperand(0), VPLane(0));
715 auto *Br = createCondBranch(Cond, getParent(), State);
716 applyMetadata(*Br);
717 return Br;
718 }
720 // First create the compare.
721 Value *IV = State.get(getOperand(0), /*IsScalar*/ true);
722 Value *TC = State.get(getOperand(1), /*IsScalar*/ true);
723 Value *Cond = Builder.CreateICmpEQ(IV, TC);
724 return createCondBranch(Cond, getParent(), State);
725 }
727 return Builder.CreateVectorSplat(
728 State.VF, State.get(getOperand(0), /*IsScalar*/ true), "broadcast");
729 }
731 // For struct types, we need to build a new 'wide' struct type, where each
732 // element is widened, i.e., we create a struct of vectors.
733 auto *StructTy =
735 Value *Res = PoisonValue::get(toVectorizedTy(StructTy, State.VF));
736 for (const auto &[LaneIndex, Op] : enumerate(operands())) {
737 for (unsigned FieldIndex = 0; FieldIndex != StructTy->getNumElements();
738 FieldIndex++) {
739 Value *ScalarValue =
740 Builder.CreateExtractValue(State.get(Op, true), FieldIndex);
741 Value *VectorValue = Builder.CreateExtractValue(Res, FieldIndex);
742 VectorValue =
743 Builder.CreateInsertElement(VectorValue, ScalarValue, LaneIndex);
744 Res = Builder.CreateInsertValue(Res, VectorValue, FieldIndex);
745 }
746 }
747 return Res;
748 }
750 auto *ScalarTy = State.TypeAnalysis.inferScalarType(getOperand(0));
751 auto NumOfElements = ElementCount::getFixed(getNumOperands());
752 Value *Res = PoisonValue::get(toVectorizedTy(ScalarTy, NumOfElements));
753 for (const auto &[Idx, Op] : enumerate(operands()))
754 Res = State.Builder.CreateInsertElement(Res, State.get(Op, true),
755 State.Builder.getInt32(Idx));
756 return Res;
757 }
759 if (State.VF.isScalar())
760 return State.get(getOperand(0), true);
761 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
763 // If this start vector is scaled then it should produce a vector with fewer
764 // elements than the VF.
765 ElementCount VF = State.VF.divideCoefficientBy(
766 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue());
767 auto *Iden = Builder.CreateVectorSplat(VF, State.get(getOperand(1), true));
768 Constant *Zero = Builder.getInt32(0);
769 return Builder.CreateInsertElement(Iden, State.get(getOperand(0), true),
770 Zero);
771 }
773 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
774 // and will be removed by breaking up the recipe further.
775 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
776 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
777 Value *ReducedPartRdx = State.get(getOperand(2));
778 for (unsigned Idx = 3; Idx < getNumOperands(); ++Idx)
779 ReducedPartRdx = Builder.CreateBinOp(
782 State.get(getOperand(Idx)), ReducedPartRdx, "bin.rdx");
783 return createAnyOfReduction(Builder, ReducedPartRdx,
784 State.get(getOperand(1), VPLane(0)), OrigPhi);
785 }
787 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
788 // and will be removed by breaking up the recipe further.
789 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
790 // Get its reduction variable descriptor.
791 RecurKind RK = PhiR->getRecurrenceKind();
793 "Unexpected reduction kind");
794 assert(!PhiR->isInLoop() &&
795 "In-loop FindLastIV reduction is not supported yet");
796
797 // The recipe's operands are the reduction phi, the start value, the
798 // sentinel value, followed by one operand for each part of the reduction.
799 unsigned UF = getNumOperands() - 3;
800 Value *ReducedPartRdx = State.get(getOperand(3));
801 RecurKind MinMaxKind;
804 MinMaxKind = IsSigned ? RecurKind::SMax : RecurKind::UMax;
805 else
806 MinMaxKind = IsSigned ? RecurKind::SMin : RecurKind::UMin;
807 for (unsigned Part = 1; Part < UF; ++Part)
808 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
809 State.get(getOperand(3 + Part)));
810
811 Value *Start = State.get(getOperand(1), true);
813 return createFindLastIVReduction(Builder, ReducedPartRdx, RK, Start,
814 Sentinel);
815 }
817 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
818 // and will be removed by breaking up the recipe further.
819 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
820 // Get its reduction variable descriptor.
821
822 RecurKind RK = PhiR->getRecurrenceKind();
824 "should be handled by ComputeFindIVResult");
825
826 // The recipe's operands are the reduction phi, followed by one operand for
827 // each part of the reduction.
828 unsigned UF = getNumOperands() - 1;
829 VectorParts RdxParts(UF);
830 for (unsigned Part = 0; Part < UF; ++Part)
831 RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
832
833 IRBuilderBase::FastMathFlagGuard FMFG(Builder);
834 if (hasFastMathFlags())
836
837 // Reduce all of the unrolled parts into a single vector.
838 Value *ReducedPartRdx = RdxParts[0];
839 if (PhiR->isOrdered()) {
840 ReducedPartRdx = RdxParts[UF - 1];
841 } else {
842 // Floating-point operations should have some FMF to enable the reduction.
843 for (unsigned Part = 1; Part < UF; ++Part) {
844 Value *RdxPart = RdxParts[Part];
846 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
847 else {
849 // For sub-recurrences, each UF's reduction variable is already
850 // negative, we need to do: reduce.add(-acc_uf0 + -acc_uf1)
851 if (RK == RecurKind::Sub)
852 Opcode = Instruction::Add;
853 else
854 Opcode =
856 ReducedPartRdx =
857 Builder.CreateBinOp(Opcode, RdxPart, ReducedPartRdx, "bin.rdx");
858 }
859 }
860 }
861
862 // Create the reduction after the loop. Note that inloop reductions create
863 // the target reduction in the loop using a Reduction recipe.
864 if (State.VF.isVector() && !PhiR->isInLoop()) {
865 // TODO: Support in-order reductions based on the recurrence descriptor.
866 // All ops in the reduction inherit fast-math-flags from the recurrence
867 // descriptor.
868 ReducedPartRdx = createSimpleReduction(Builder, ReducedPartRdx, RK);
869 }
870
871 return ReducedPartRdx;
872 }
875 unsigned Offset = getOpcode() == VPInstruction::ExtractLastElement ? 1 : 2;
876 Value *Res;
877 if (State.VF.isVector()) {
878 assert(Offset <= State.VF.getKnownMinValue() &&
879 "invalid offset to extract from");
880 // Extract lane VF - Offset from the operand.
881 Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
882 } else {
883 assert(Offset <= 1 && "invalid offset to extract from");
884 Res = State.get(getOperand(0));
885 }
887 Res->setName(Name);
888 return Res;
889 }
891 Value *A = State.get(getOperand(0));
892 Value *B = State.get(getOperand(1));
893 return Builder.CreateLogicalAnd(A, B, Name);
894 }
897 "can only generate first lane for PtrAdd");
898 Value *Ptr = State.get(getOperand(0), VPLane(0));
899 Value *Addend = State.get(getOperand(1), VPLane(0));
900 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
901 }
903 Value *Ptr =
905 Value *Addend = State.get(getOperand(1));
906 return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
907 }
909 Value *Res = Builder.CreateFreeze(State.get(getOperand(0)));
910 for (VPValue *Op : drop_begin(operands()))
911 Res = Builder.CreateOr(Res, Builder.CreateFreeze(State.get(Op)));
912 return State.VF.isScalar() ? Res : Builder.CreateOrReduce(Res);
913 }
915 Value *LaneToExtract = State.get(getOperand(0), true);
916 Type *IdxTy = State.TypeAnalysis.inferScalarType(getOperand(0));
917 Value *Res = nullptr;
918 Value *RuntimeVF = getRuntimeVF(State.Builder, IdxTy, State.VF);
919
920 for (unsigned Idx = 1; Idx != getNumOperands(); ++Idx) {
921 Value *VectorStart =
922 Builder.CreateMul(RuntimeVF, ConstantInt::get(IdxTy, Idx - 1));
923 Value *VectorIdx = Idx == 1
924 ? LaneToExtract
925 : Builder.CreateSub(LaneToExtract, VectorStart);
926 Value *Ext = State.VF.isScalar()
927 ? State.get(getOperand(Idx))
928 : Builder.CreateExtractElement(
929 State.get(getOperand(Idx)), VectorIdx);
930 if (Res) {
931 Value *Cmp = Builder.CreateICmpUGE(LaneToExtract, VectorStart);
932 Res = Builder.CreateSelect(Cmp, Ext, Res);
933 } else {
934 Res = Ext;
935 }
936 }
937 return Res;
938 }
940 if (getNumOperands() == 1) {
941 Value *Mask = State.get(getOperand(0));
942 return Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(), Mask,
943 true, Name);
944 }
945 // If there are multiple operands, create a chain of selects to pick the
946 // first operand with an active lane and add the number of lanes of the
947 // preceding operands.
948 Value *RuntimeVF =
949 getRuntimeVF(State.Builder, State.Builder.getInt64Ty(), State.VF);
950 unsigned LastOpIdx = getNumOperands() - 1;
951 Value *Res = nullptr;
952 for (int Idx = LastOpIdx; Idx >= 0; --Idx) {
953 Value *TrailingZeros =
954 State.VF.isScalar()
955 ? Builder.CreateZExt(
956 Builder.CreateICmpEQ(State.get(getOperand(Idx)),
957 Builder.getFalse()),
958 Builder.getInt64Ty())
959 : Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(),
960 State.get(getOperand(Idx)),
961 true, Name);
962 Value *Current = Builder.CreateAdd(
963 Builder.CreateMul(RuntimeVF, Builder.getInt64(Idx)), TrailingZeros);
964 if (Res) {
965 Value *Cmp = Builder.CreateICmpNE(TrailingZeros, RuntimeVF);
966 Res = Builder.CreateSelect(Cmp, Current, Res);
967 } else {
968 Res = Current;
969 }
970 }
971
972 return Res;
973 }
975 return State.get(getOperand(0), true);
976 default:
977 llvm_unreachable("Unsupported opcode for instruction");
978 }
979}
980
982 unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const {
983 Type *ScalarTy = Ctx.Types.inferScalarType(this);
984 Type *ResultTy = VF.isVector() ? toVectorTy(ScalarTy, VF) : ScalarTy;
985 switch (Opcode) {
986 case Instruction::FNeg:
987 return Ctx.TTI.getArithmeticInstrCost(Opcode, ResultTy, Ctx.CostKind);
988 case Instruction::UDiv:
989 case Instruction::SDiv:
990 case Instruction::SRem:
991 case Instruction::URem:
992 case Instruction::Add:
993 case Instruction::FAdd:
994 case Instruction::Sub:
995 case Instruction::FSub:
996 case Instruction::Mul:
997 case Instruction::FMul:
998 case Instruction::FDiv:
999 case Instruction::FRem:
1000 case Instruction::Shl:
1001 case Instruction::LShr:
1002 case Instruction::AShr:
1003 case Instruction::And:
1004 case Instruction::Or:
1005 case Instruction::Xor: {
1008
1009 if (VF.isVector()) {
1010 // Certain instructions can be cheaper to vectorize if they have a
1011 // constant second vector operand. One example of this are shifts on x86.
1012 VPValue *RHS = getOperand(1);
1013 RHSInfo = Ctx.getOperandInfo(RHS);
1014
1015 if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
1018 }
1019
1022 if (CtxI)
1023 Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
1024 return Ctx.TTI.getArithmeticInstrCost(
1025 Opcode, ResultTy, Ctx.CostKind,
1026 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1027 RHSInfo, Operands, CtxI, &Ctx.TLI);
1028 }
1029 case Instruction::Freeze:
1030 // This opcode is unknown. Assume that it is the same as 'mul'.
1031 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, ResultTy,
1032 Ctx.CostKind);
1033 case Instruction::ExtractValue:
1034 return Ctx.TTI.getInsertExtractValueCost(Instruction::ExtractValue,
1035 Ctx.CostKind);
1036 case Instruction::ICmp:
1037 case Instruction::FCmp: {
1038 Type *ScalarOpTy = Ctx.Types.inferScalarType(getOperand(0));
1039 Type *OpTy = VF.isVector() ? toVectorTy(ScalarOpTy, VF) : ScalarOpTy;
1041 return Ctx.TTI.getCmpSelInstrCost(
1042 Opcode, OpTy, CmpInst::makeCmpResultType(OpTy), getPredicate(),
1043 Ctx.CostKind, {TTI::OK_AnyValue, TTI::OP_None},
1044 {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
1045 }
1046 }
1047 llvm_unreachable("called for unsupported opcode");
1048}
1049
1051 VPCostContext &Ctx) const {
1053 if (!getUnderlyingValue() && getOpcode() != Instruction::FMul) {
1054 // TODO: Compute cost for VPInstructions without underlying values once
1055 // the legacy cost model has been retired.
1056 return 0;
1057 }
1058
1060 "Should only generate a vector value or single scalar, not scalars "
1061 "for all lanes.");
1063 getOpcode(),
1065 }
1066
1067 switch (getOpcode()) {
1068 case Instruction::Select: {
1069 // TODO: It may be possible to improve this by analyzing where the
1070 // condition operand comes from.
1072 auto *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1073 auto *VecTy = Ctx.Types.inferScalarType(getOperand(1));
1074 if (!vputils::onlyFirstLaneUsed(this)) {
1075 CondTy = toVectorTy(CondTy, VF);
1076 VecTy = toVectorTy(VecTy, VF);
1077 }
1078 return Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, Pred,
1079 Ctx.CostKind);
1080 }
1081 case Instruction::ExtractElement:
1083 if (VF.isScalar()) {
1084 // ExtractLane with VF=1 takes care of handling extracting across multiple
1085 // parts.
1086 return 0;
1087 }
1088
1089 // Add on the cost of extracting the element.
1090 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1091 return Ctx.TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy,
1092 Ctx.CostKind);
1093 }
1094 case VPInstruction::AnyOf: {
1095 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1096 return Ctx.TTI.getArithmeticReductionCost(
1097 Instruction::Or, cast<VectorType>(VecTy), std::nullopt, Ctx.CostKind);
1098 }
1100 Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0));
1101 if (VF.isScalar())
1102 return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
1104 CmpInst::ICMP_EQ, Ctx.CostKind);
1105 // Calculate the cost of determining the lane index.
1106 auto *PredTy = toVectorTy(ScalarTy, VF);
1107 IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts,
1108 Type::getInt64Ty(Ctx.LLVMCtx),
1109 {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)});
1110 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1111 }
1113 assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?");
1115 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
1116 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1117
1118 return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
1119 cast<VectorType>(VectorTy),
1120 cast<VectorType>(VectorTy), Mask,
1121 Ctx.CostKind, VF.getKnownMinValue() - 1);
1122 }
1124 Type *ArgTy = Ctx.Types.inferScalarType(getOperand(0));
1125 unsigned Multiplier =
1126 cast<ConstantInt>(getOperand(2)->getLiveInIRValue())->getZExtValue();
1127 Type *RetTy = toVectorTy(Type::getInt1Ty(Ctx.LLVMCtx), VF * Multiplier);
1128 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
1129 {ArgTy, ArgTy});
1130 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1131 }
1133 Type *Arg0Ty = Ctx.Types.inferScalarType(getOperand(0));
1134 Type *I32Ty = Type::getInt32Ty(Ctx.LLVMCtx);
1135 Type *I1Ty = Type::getInt1Ty(Ctx.LLVMCtx);
1136 IntrinsicCostAttributes Attrs(Intrinsic::experimental_get_vector_length,
1137 I32Ty, {Arg0Ty, I32Ty, I1Ty});
1138 return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
1139 }
1141 // Add on the cost of extracting the element.
1142 auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1143 return Ctx.TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
1144 VecTy, Ctx.CostKind, 0);
1145 }
1147 if (VF == ElementCount::getScalable(1))
1150 default:
1151 // TODO: Compute cost other VPInstructions once the legacy cost model has
1152 // been retired.
1154 "unexpected VPInstruction witht underlying value");
1155 return 0;
1156 }
1157}
1158
1170
1172 switch (getOpcode()) {
1173 case Instruction::PHI:
1177 return true;
1178 default:
1179 return isScalarCast();
1180 }
1181}
1182
1184 assert(!State.Lane && "VPInstruction executing an Lane");
1185 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
1187 "Set flags not supported for the provided opcode");
1188 if (hasFastMathFlags())
1189 State.Builder.setFastMathFlags(getFastMathFlags());
1190 Value *GeneratedValue = generate(State);
1191 if (!hasResult())
1192 return;
1193 assert(GeneratedValue && "generate must produce a value");
1194 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
1197 assert((((GeneratedValue->getType()->isVectorTy() ||
1198 GeneratedValue->getType()->isStructTy()) ==
1199 !GeneratesPerFirstLaneOnly) ||
1200 State.VF.isScalar()) &&
1201 "scalar value but not only first lane defined");
1202 State.set(this, GeneratedValue,
1203 /*IsScalar*/ GeneratesPerFirstLaneOnly);
1204}
1205
1208 return false;
1209 switch (getOpcode()) {
1210 case Instruction::ExtractElement:
1211 case Instruction::Freeze:
1212 case Instruction::FCmp:
1213 case Instruction::ICmp:
1214 case Instruction::Select:
1215 case Instruction::PHI:
1228 case VPInstruction::Not:
1235 return false;
1236 default:
1237 return true;
1238 }
1239}
1240
1242 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1244 return vputils::onlyFirstLaneUsed(this);
1245
1246 switch (getOpcode()) {
1247 default:
1248 return false;
1249 case Instruction::ExtractElement:
1250 return Op == getOperand(1);
1251 case Instruction::PHI:
1252 return true;
1253 case Instruction::FCmp:
1254 case Instruction::ICmp:
1255 case Instruction::Select:
1256 case Instruction::Or:
1257 case Instruction::Freeze:
1258 case VPInstruction::Not:
1259 // TODO: Cover additional opcodes.
1260 return vputils::onlyFirstLaneUsed(this);
1269 return true;
1272 // Before replicating by VF, Build(Struct)Vector uses all lanes of the
1273 // operand, after replicating its operands only the first lane is used.
1274 // Before replicating, it will have only a single operand.
1275 return getNumOperands() > 1;
1277 return Op == getOperand(0) || vputils::onlyFirstLaneUsed(this);
1279 return Op == getOperand(0);
1282 return Op == getOperand(1);
1284 return Op == getOperand(0);
1285 };
1286 llvm_unreachable("switch should return");
1287}
1288
1290 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1292 return vputils::onlyFirstPartUsed(this);
1293
1294 switch (getOpcode()) {
1295 default:
1296 return false;
1297 case Instruction::FCmp:
1298 case Instruction::ICmp:
1299 case Instruction::Select:
1300 return vputils::onlyFirstPartUsed(this);
1304 return true;
1305 };
1306 llvm_unreachable("switch should return");
1307}
1308
1309#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1311 VPSlotTracker SlotTracker(getParent()->getPlan());
1312 print(dbgs(), "", SlotTracker);
1313}
1314
1316 VPSlotTracker &SlotTracker) const {
1317 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1318
1319 if (hasResult()) {
1321 O << " = ";
1322 }
1323
1324 switch (getOpcode()) {
1325 case VPInstruction::Not:
1326 O << "not";
1327 break;
1329 O << "combined load";
1330 break;
1332 O << "combined store";
1333 break;
1335 O << "active lane mask";
1336 break;
1338 O << "EXPLICIT-VECTOR-LENGTH";
1339 break;
1341 O << "first-order splice";
1342 break;
1344 O << "branch-on-cond";
1345 break;
1347 O << "TC > VF ? TC - VF : 0";
1348 break;
1350 O << "VF * Part +";
1351 break;
1353 O << "branch-on-count";
1354 break;
1356 O << "broadcast";
1357 break;
1359 O << "buildstructvector";
1360 break;
1362 O << "buildvector";
1363 break;
1365 O << "extract-lane";
1366 break;
1368 O << "extract-last-element";
1369 break;
1371 O << "extract-penultimate-element";
1372 break;
1374 O << "compute-anyof-result";
1375 break;
1377 O << "compute-find-iv-result";
1378 break;
1380 O << "compute-reduction-result";
1381 break;
1383 O << "logical-and";
1384 break;
1386 O << "ptradd";
1387 break;
1389 O << "wide-ptradd";
1390 break;
1392 O << "any-of";
1393 break;
1395 O << "first-active-lane";
1396 break;
1398 O << "reduction-start-vector";
1399 break;
1401 O << "resume-for-epilogue";
1402 break;
1403 default:
1405 }
1406
1407 printFlags(O);
1409
1410 if (auto DL = getDebugLoc()) {
1411 O << ", !dbg ";
1412 DL.print(O);
1413 }
1414}
1415#endif
1416
1418 State.setDebugLocFrom(getDebugLoc());
1419 if (isScalarCast()) {
1420 Value *Op = State.get(getOperand(0), VPLane(0));
1421 Value *Cast = State.Builder.CreateCast(Instruction::CastOps(getOpcode()),
1422 Op, ResultTy);
1423 State.set(this, Cast, VPLane(0));
1424 return;
1425 }
1426 switch (getOpcode()) {
1428 Value *StepVector =
1429 State.Builder.CreateStepVector(VectorType::get(ResultTy, State.VF));
1430 State.set(this, StepVector);
1431 break;
1432 }
1433 case VPInstruction::VScale: {
1434 Value *VScale = State.Builder.CreateVScale(ResultTy);
1435 State.set(this, VScale, true);
1436 break;
1437 }
1438
1439 default:
1440 llvm_unreachable("opcode not implemented yet");
1441 }
1442}
1443
1444#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1446 VPSlotTracker &SlotTracker) const {
1447 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1449 O << " = ";
1450
1451 switch (getOpcode()) {
1453 O << "wide-iv-step ";
1455 break;
1457 O << "step-vector " << *ResultTy;
1458 break;
1460 O << "vscale " << *ResultTy;
1461 break;
1462 default:
1463 assert(Instruction::isCast(getOpcode()) && "unhandled opcode");
1466 O << " to " << *ResultTy;
1467 }
1468}
1469#endif
1470
1472 State.setDebugLocFrom(getDebugLoc());
1473 PHINode *NewPhi = State.Builder.CreatePHI(
1474 State.TypeAnalysis.inferScalarType(this), 2, getName());
1475 unsigned NumIncoming = getNumIncoming();
1476 if (getParent() != getParent()->getPlan()->getScalarPreheader()) {
1477 // TODO: Fixup all incoming values of header phis once recipes defining them
1478 // are introduced.
1479 NumIncoming = 1;
1480 }
1481 for (unsigned Idx = 0; Idx != NumIncoming; ++Idx) {
1482 Value *IncV = State.get(getIncomingValue(Idx), VPLane(0));
1483 BasicBlock *PredBB = State.CFG.VPBB2IRBB.at(getIncomingBlock(Idx));
1484 NewPhi->addIncoming(IncV, PredBB);
1485 }
1486 State.set(this, NewPhi, VPLane(0));
1487}
1488
1489#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1490void VPPhi::print(raw_ostream &O, const Twine &Indent,
1491 VPSlotTracker &SlotTracker) const {
1492 O << Indent << "EMIT" << (isSingleScalar() ? "-SCALAR" : "") << " ";
1494 O << " = phi ";
1496}
1497#endif
1498
1499VPIRInstruction *VPIRInstruction ::create(Instruction &I) {
1500 if (auto *Phi = dyn_cast<PHINode>(&I))
1501 return new VPIRPhi(*Phi);
1502 return new VPIRInstruction(I);
1503}
1504
1506 assert(!isa<VPIRPhi>(this) && getNumOperands() == 0 &&
1507 "PHINodes must be handled by VPIRPhi");
1508 // Advance the insert point after the wrapped IR instruction. This allows
1509 // interleaving VPIRInstructions and other recipes.
1510 State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
1511}
1512
1514 VPCostContext &Ctx) const {
1515 // The recipe wraps an existing IR instruction on the border of VPlan's scope,
1516 // hence it does not contribute to the cost-modeling for the VPlan.
1517 return 0;
1518}
1519
1522 "can only update exiting operands to phi nodes");
1523 assert(getNumOperands() > 0 && "must have at least one operand");
1524 VPValue *Exiting = getOperand(0);
1525 if (Exiting->isLiveIn())
1526 return;
1527
1528 Exiting = Builder.createNaryOp(VPInstruction::ExtractLastElement, {Exiting});
1529 setOperand(0, Exiting);
1530}
1531
1532#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1534 VPSlotTracker &SlotTracker) const {
1535 O << Indent << "IR " << I;
1536}
1537#endif
1538
1540 PHINode *Phi = &getIRPhi();
1541 for (const auto &[Idx, Op] : enumerate(operands())) {
1542 VPValue *ExitValue = Op;
1543 auto Lane = vputils::isSingleScalar(ExitValue)
1545 : VPLane::getLastLaneForVF(State.VF);
1546 VPBlockBase *Pred = getParent()->getPredecessors()[Idx];
1547 auto *PredVPBB = Pred->getExitingBasicBlock();
1548 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
1549 // Set insertion point in PredBB in case an extract needs to be generated.
1550 // TODO: Model extracts explicitly.
1551 State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
1552 Value *V = State.get(ExitValue, VPLane(Lane));
1553 // If there is no existing block for PredBB in the phi, add a new incoming
1554 // value. Otherwise update the existing incoming value for PredBB.
1555 if (Phi->getBasicBlockIndex(PredBB) == -1)
1556 Phi->addIncoming(V, PredBB);
1557 else
1558 Phi->setIncomingValueForBlock(PredBB, V);
1559 }
1560
1561 // Advance the insert point after the wrapped IR instruction. This allows
1562 // interleaving VPIRInstructions and other recipes.
1563 State.Builder.SetInsertPoint(Phi->getParent(), std::next(Phi->getIterator()));
1564}
1565
1567 VPRecipeBase *R = const_cast<VPRecipeBase *>(getAsRecipe());
1568 assert(R->getNumOperands() == R->getParent()->getNumPredecessors() &&
1569 "Number of phi operands must match number of predecessors");
1570 unsigned Position = R->getParent()->getIndexForPredecessor(IncomingBlock);
1571 R->removeOperand(Position);
1572}
1573
1574#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1576 VPSlotTracker &SlotTracker) const {
1577 interleaveComma(enumerate(getAsRecipe()->operands()), O,
1578 [this, &O, &SlotTracker](auto Op) {
1579 O << "[ ";
1580 Op.value()->printAsOperand(O, SlotTracker);
1581 O << ", ";
1582 getIncomingBlock(Op.index())->printAsOperand(O);
1583 O << " ]";
1584 });
1585}
1586#endif
1587
1588#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1589void VPIRPhi::print(raw_ostream &O, const Twine &Indent,
1590 VPSlotTracker &SlotTracker) const {
1592
1593 if (getNumOperands() != 0) {
1594 O << " (extra operand" << (getNumOperands() > 1 ? "s" : "") << ": ";
1596 [&O, &SlotTracker](auto Op) {
1597 std::get<0>(Op)->printAsOperand(O, SlotTracker);
1598 O << " from ";
1599 std::get<1>(Op)->printAsOperand(O);
1600 });
1601 O << ")";
1602 }
1603}
1604#endif
1605
1607 : VPIRMetadata(I) {
1608 if (!LVer || !isa<LoadInst, StoreInst>(&I))
1609 return;
1610 const auto &[AliasScopeMD, NoAliasMD] = LVer->getNoAliasMetadataFor(&I);
1611 if (AliasScopeMD)
1612 Metadata.emplace_back(LLVMContext::MD_alias_scope, AliasScopeMD);
1613 if (NoAliasMD)
1614 Metadata.emplace_back(LLVMContext::MD_noalias, NoAliasMD);
1615}
1616
1618 for (const auto &[Kind, Node] : Metadata)
1619 I.setMetadata(Kind, Node);
1620}
1621
1623 SmallVector<std::pair<unsigned, MDNode *>> MetadataIntersection;
1624 for (const auto &[KindA, MDA] : Metadata) {
1625 for (const auto &[KindB, MDB] : Other.Metadata) {
1626 if (KindA == KindB && MDA == MDB) {
1627 MetadataIntersection.emplace_back(KindA, MDA);
1628 break;
1629 }
1630 }
1631 }
1632 Metadata = std::move(MetadataIntersection);
1633}
1634
1636 assert(State.VF.isVector() && "not widening");
1637 assert(Variant != nullptr && "Can't create vector function.");
1638
1639 FunctionType *VFTy = Variant->getFunctionType();
1640 // Add return type if intrinsic is overloaded on it.
1642 for (const auto &I : enumerate(args())) {
1643 Value *Arg;
1644 // Some vectorized function variants may also take a scalar argument,
1645 // e.g. linear parameters for pointers. This needs to be the scalar value
1646 // from the start of the respective part when interleaving.
1647 if (!VFTy->getParamType(I.index())->isVectorTy())
1648 Arg = State.get(I.value(), VPLane(0));
1649 else
1650 Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));
1651 Args.push_back(Arg);
1652 }
1653
1656 if (CI)
1657 CI->getOperandBundlesAsDefs(OpBundles);
1658
1659 CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
1660 applyFlags(*V);
1661 applyMetadata(*V);
1662 V->setCallingConv(Variant->getCallingConv());
1663
1664 if (!V->getType()->isVoidTy())
1665 State.set(this, V);
1666}
1667
1669 VPCostContext &Ctx) const {
1670 return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
1671 Variant->getFunctionType()->params(),
1672 Ctx.CostKind);
1673}
1674
1675#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1677 VPSlotTracker &SlotTracker) const {
1678 O << Indent << "WIDEN-CALL ";
1679
1680 Function *CalledFn = getCalledScalarFunction();
1681 if (CalledFn->getReturnType()->isVoidTy())
1682 O << "void ";
1683 else {
1685 O << " = ";
1686 }
1687
1688 O << "call";
1689 printFlags(O);
1690 O << " @" << CalledFn->getName() << "(";
1691 interleaveComma(args(), O, [&O, &SlotTracker](VPValue *Op) {
1692 Op->printAsOperand(O, SlotTracker);
1693 });
1694 O << ")";
1695
1696 O << " (using library function";
1697 if (Variant->hasName())
1698 O << ": " << Variant->getName();
1699 O << ")";
1700}
1701#endif
1702
1704 assert(State.VF.isVector() && "not widening");
1705
1706 SmallVector<Type *, 2> TysForDecl;
1707 // Add return type if intrinsic is overloaded on it.
1708 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1, State.TTI))
1709 TysForDecl.push_back(VectorType::get(getResultType(), State.VF));
1711 for (const auto &I : enumerate(operands())) {
1712 // Some intrinsics have a scalar argument - don't replace it with a
1713 // vector.
1714 Value *Arg;
1715 if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
1716 State.TTI))
1717 Arg = State.get(I.value(), VPLane(0));
1718 else
1719 Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));
1720 if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index(),
1721 State.TTI))
1722 TysForDecl.push_back(Arg->getType());
1723 Args.push_back(Arg);
1724 }
1725
1726 // Use vector version of the intrinsic.
1727 Module *M = State.Builder.GetInsertBlock()->getModule();
1728 Function *VectorF =
1729 Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
1730 assert(VectorF &&
1731 "Can't retrieve vector intrinsic or vector-predication intrinsics.");
1732
1735 if (CI)
1736 CI->getOperandBundlesAsDefs(OpBundles);
1737
1738 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
1739
1740 applyFlags(*V);
1741 applyMetadata(*V);
1742
1743 if (!V->getType()->isVoidTy())
1744 State.set(this, V);
1745}
1746
1747/// Compute the cost for the intrinsic \p ID with \p Operands, produced by \p R.
1750 const VPRecipeWithIRFlags &R,
1751 ElementCount VF,
1752 VPCostContext &Ctx) {
1753 // Some backends analyze intrinsic arguments to determine cost. Use the
1754 // underlying value for the operand if it has one. Otherwise try to use the
1755 // operand of the underlying call instruction, if there is one. Otherwise
1756 // clear Arguments.
1757 // TODO: Rework TTI interface to be independent of concrete IR values.
1759 for (const auto &[Idx, Op] : enumerate(Operands)) {
1760 auto *V = Op->getUnderlyingValue();
1761 if (!V) {
1762 if (auto *UI = dyn_cast_or_null<CallBase>(R.getUnderlyingValue())) {
1763 Arguments.push_back(UI->getArgOperand(Idx));
1764 continue;
1765 }
1766 Arguments.clear();
1767 break;
1768 }
1769 Arguments.push_back(V);
1770 }
1771
1772 Type *ScalarRetTy = Ctx.Types.inferScalarType(&R);
1773 Type *RetTy = VF.isVector() ? toVectorizedTy(ScalarRetTy, VF) : ScalarRetTy;
1774 SmallVector<Type *> ParamTys;
1775 for (const VPValue *Op : Operands) {
1776 ParamTys.push_back(VF.isVector()
1777 ? toVectorTy(Ctx.Types.inferScalarType(Op), VF)
1778 : Ctx.Types.inferScalarType(Op));
1779 }
1780
1781 // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
1782 FastMathFlags FMF =
1783 R.hasFastMathFlags() ? R.getFastMathFlags() : FastMathFlags();
1784 IntrinsicCostAttributes CostAttrs(
1785 ID, RetTy, Arguments, ParamTys, FMF,
1786 dyn_cast_or_null<IntrinsicInst>(R.getUnderlyingValue()),
1787 InstructionCost::getInvalid(), &Ctx.TLI);
1788 return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, Ctx.CostKind);
1789}
1790
1792 VPCostContext &Ctx) const {
1794 return getCostForIntrinsics(VectorIntrinsicID, ArgOps, *this, VF, Ctx);
1795}
1796
1798 return Intrinsic::getBaseName(VectorIntrinsicID);
1799}
1800
1802 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
1803 return all_of(enumerate(operands()), [this, &Op](const auto &X) {
1804 auto [Idx, V] = X;
1806 Idx, nullptr);
1807 });
1808}
1809
1810#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1812 VPSlotTracker &SlotTracker) const {
1813 O << Indent << "WIDEN-INTRINSIC ";
1814 if (ResultTy->isVoidTy()) {
1815 O << "void ";
1816 } else {
1818 O << " = ";
1819 }
1820
1821 O << "call";
1822 printFlags(O);
1823 O << getIntrinsicName() << "(";
1824
1826 Op->printAsOperand(O, SlotTracker);
1827 });
1828 O << ")";
1829}
1830#endif
1831
1833 IRBuilderBase &Builder = State.Builder;
1834
1835 Value *Address = State.get(getOperand(0));
1836 Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
1837 VectorType *VTy = cast<VectorType>(Address->getType());
1838
1839 // The histogram intrinsic requires a mask even if the recipe doesn't;
1840 // if the mask operand was omitted then all lanes should be executed and
1841 // we just need to synthesize an all-true mask.
1842 Value *Mask = nullptr;
1843 if (VPValue *VPMask = getMask())
1844 Mask = State.get(VPMask);
1845 else
1846 Mask =
1847 Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
1848
1849 // If this is a subtract, we want to invert the increment amount. We may
1850 // add a separate intrinsic in future, but for now we'll try this.
1851 if (Opcode == Instruction::Sub)
1852 IncAmt = Builder.CreateNeg(IncAmt);
1853 else
1854 assert(Opcode == Instruction::Add && "only add or sub supported for now");
1855
1856 State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1857 {VTy, IncAmt->getType()},
1858 {Address, IncAmt, Mask});
1859}
1860
1862 VPCostContext &Ctx) const {
1863 // FIXME: Take the gather and scatter into account as well. For now we're
1864 // generating the same cost as the fallback path, but we'll likely
1865 // need to create a new TTI method for determining the cost, including
1866 // whether we can use base + vec-of-smaller-indices or just
1867 // vec-of-pointers.
1868 assert(VF.isVector() && "Invalid VF for histogram cost");
1869 Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
1870 VPValue *IncAmt = getOperand(1);
1871 Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
1872 VectorType *VTy = VectorType::get(IncTy, VF);
1873
1874 // Assume that a non-constant update value (or a constant != 1) requires
1875 // a multiply, and add that into the cost.
1876 InstructionCost MulCost =
1877 Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy, Ctx.CostKind);
1878 if (IncAmt->isLiveIn()) {
1880
1881 if (CI && CI->getZExtValue() == 1)
1882 MulCost = TTI::TCC_Free;
1883 }
1884
1885 // Find the cost of the histogram operation itself.
1886 Type *PtrTy = VectorType::get(AddressTy, VF);
1887 Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
1888 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
1889 Type::getVoidTy(Ctx.LLVMCtx),
1890 {PtrTy, IncTy, MaskTy});
1891
1892 // Add the costs together with the add/sub operation.
1893 return Ctx.TTI.getIntrinsicInstrCost(ICA, Ctx.CostKind) + MulCost +
1894 Ctx.TTI.getArithmeticInstrCost(Opcode, VTy, Ctx.CostKind);
1895}
1896
1897#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1899 VPSlotTracker &SlotTracker) const {
1900 O << Indent << "WIDEN-HISTOGRAM buckets: ";
1902
1903 if (Opcode == Instruction::Sub)
1904 O << ", dec: ";
1905 else {
1906 assert(Opcode == Instruction::Add);
1907 O << ", inc: ";
1908 }
1910
1911 if (VPValue *Mask = getMask()) {
1912 O << ", mask: ";
1913 Mask->printAsOperand(O, SlotTracker);
1914 }
1915}
1916
1918 VPSlotTracker &SlotTracker) const {
1919 O << Indent << "WIDEN-SELECT ";
1921 O << " = select ";
1922 printFlags(O);
1924 O << ", ";
1926 O << ", ";
1928 O << (isInvariantCond() ? " (condition is loop invariant)" : "");
1929}
1930#endif
1931
1933 // The condition can be loop invariant but still defined inside the
1934 // loop. This means that we can't just use the original 'cond' value.
1935 // We have to take the 'vectorized' value and pick the first lane.
1936 // Instcombine will make this a no-op.
1937 Value *Cond = State.get(getCond(), isInvariantCond());
1938
1939 Value *Op0 = State.get(getOperand(1));
1940 Value *Op1 = State.get(getOperand(2));
1941 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
1942 State.set(this, Sel);
1943 if (auto *I = dyn_cast<Instruction>(Sel)) {
1945 applyFlags(*I);
1946 applyMetadata(*I);
1947 }
1948}
1949
1951 VPCostContext &Ctx) const {
1953 bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
1954 Type *ScalarTy = Ctx.Types.inferScalarType(this);
1955 Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
1956
1957 VPValue *Op0, *Op1;
1958 using namespace llvm::VPlanPatternMatch;
1959 if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 &&
1960 (match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) ||
1961 match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) {
1962 // select x, y, false --> x & y
1963 // select x, true, y --> x | y
1964 const auto [Op1VK, Op1VP] = Ctx.getOperandInfo(Op0);
1965 const auto [Op2VK, Op2VP] = Ctx.getOperandInfo(Op1);
1966
1968 if (all_of(operands(),
1969 [](VPValue *Op) { return Op->getUnderlyingValue(); }))
1970 Operands.append(SI->op_begin(), SI->op_end());
1971 bool IsLogicalOr = match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1)));
1972 return Ctx.TTI.getArithmeticInstrCost(
1973 IsLogicalOr ? Instruction::Or : Instruction::And, VectorTy,
1974 Ctx.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, SI);
1975 }
1976
1977 Type *CondTy = Ctx.Types.inferScalarType(getOperand(0));
1978 if (!ScalarCond)
1979 CondTy = VectorType::get(CondTy, VF);
1980
1982 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
1983 Pred = Cmp->getPredicate();
1984 return Ctx.TTI.getCmpSelInstrCost(
1985 Instruction::Select, VectorTy, CondTy, Pred, Ctx.CostKind,
1986 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, SI);
1987}
1988
1989VPIRFlags::FastMathFlagsTy::FastMathFlagsTy(const FastMathFlags &FMF) {
1990 AllowReassoc = FMF.allowReassoc();
1991 NoNaNs = FMF.noNaNs();
1992 NoInfs = FMF.noInfs();
1993 NoSignedZeros = FMF.noSignedZeros();
1994 AllowReciprocal = FMF.allowReciprocal();
1995 AllowContract = FMF.allowContract();
1996 ApproxFunc = FMF.approxFunc();
1997}
1998
1999#if !defined(NDEBUG)
2000bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
2001 switch (OpType) {
2002 case OperationType::OverflowingBinOp:
2003 return Opcode == Instruction::Add || Opcode == Instruction::Sub ||
2004 Opcode == Instruction::Mul ||
2005 Opcode == VPInstruction::VPInstruction::CanonicalIVIncrementForPart;
2006 case OperationType::Trunc:
2007 return Opcode == Instruction::Trunc;
2008 case OperationType::DisjointOp:
2009 return Opcode == Instruction::Or;
2010 case OperationType::PossiblyExactOp:
2011 return Opcode == Instruction::AShr;
2012 case OperationType::GEPOp:
2013 return Opcode == Instruction::GetElementPtr ||
2014 Opcode == VPInstruction::PtrAdd ||
2015 Opcode == VPInstruction::WidePtrAdd;
2016 case OperationType::FPMathOp:
2017 return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
2018 Opcode == Instruction::FSub || Opcode == Instruction::FNeg ||
2019 Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
2020 Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc ||
2021 Opcode == Instruction::FCmp || Opcode == Instruction::Select ||
2022 Opcode == VPInstruction::WideIVStep ||
2025 case OperationType::NonNegOp:
2026 return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
2027 case OperationType::Cmp:
2028 return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
2029 case OperationType::Other:
2030 return true;
2031 }
2032 llvm_unreachable("Unknown OperationType enum");
2033}
2034#endif
2035
2036#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2038 switch (OpType) {
2039 case OperationType::Cmp:
2041 break;
2042 case OperationType::DisjointOp:
2043 if (DisjointFlags.IsDisjoint)
2044 O << " disjoint";
2045 break;
2046 case OperationType::PossiblyExactOp:
2047 if (ExactFlags.IsExact)
2048 O << " exact";
2049 break;
2050 case OperationType::OverflowingBinOp:
2051 if (WrapFlags.HasNUW)
2052 O << " nuw";
2053 if (WrapFlags.HasNSW)
2054 O << " nsw";
2055 break;
2056 case OperationType::Trunc:
2057 if (TruncFlags.HasNUW)
2058 O << " nuw";
2059 if (TruncFlags.HasNSW)
2060 O << " nsw";
2061 break;
2062 case OperationType::FPMathOp:
2064 break;
2065 case OperationType::GEPOp:
2066 if (GEPFlags.isInBounds())
2067 O << " inbounds";
2068 else if (GEPFlags.hasNoUnsignedSignedWrap())
2069 O << " nusw";
2070 if (GEPFlags.hasNoUnsignedWrap())
2071 O << " nuw";
2072 break;
2073 case OperationType::NonNegOp:
2074 if (NonNegFlags.NonNeg)
2075 O << " nneg";
2076 break;
2077 case OperationType::Other:
2078 break;
2079 }
2080 O << " ";
2081}
2082#endif
2083
2085 auto &Builder = State.Builder;
2086 switch (Opcode) {
2087 case Instruction::Call:
2088 case Instruction::Br:
2089 case Instruction::PHI:
2090 case Instruction::GetElementPtr:
2091 case Instruction::Select:
2092 llvm_unreachable("This instruction is handled by a different recipe.");
2093 case Instruction::UDiv:
2094 case Instruction::SDiv:
2095 case Instruction::SRem:
2096 case Instruction::URem:
2097 case Instruction::Add:
2098 case Instruction::FAdd:
2099 case Instruction::Sub:
2100 case Instruction::FSub:
2101 case Instruction::FNeg:
2102 case Instruction::Mul:
2103 case Instruction::FMul:
2104 case Instruction::FDiv:
2105 case Instruction::FRem:
2106 case Instruction::Shl:
2107 case Instruction::LShr:
2108 case Instruction::AShr:
2109 case Instruction::And:
2110 case Instruction::Or:
2111 case Instruction::Xor: {
2112 // Just widen unops and binops.
2114 for (VPValue *VPOp : operands())
2115 Ops.push_back(State.get(VPOp));
2116
2117 Value *V = Builder.CreateNAryOp(Opcode, Ops);
2118
2119 if (auto *VecOp = dyn_cast<Instruction>(V)) {
2120 applyFlags(*VecOp);
2121 applyMetadata(*VecOp);
2122 }
2123
2124 // Use this vector value for all users of the original instruction.
2125 State.set(this, V);
2126 break;
2127 }
2128 case Instruction::ExtractValue: {
2129 assert(getNumOperands() == 2 && "expected single level extractvalue");
2130 Value *Op = State.get(getOperand(0));
2132 Value *Extract = Builder.CreateExtractValue(Op, CI->getZExtValue());
2133 State.set(this, Extract);
2134 break;
2135 }
2136 case Instruction::Freeze: {
2137 Value *Op = State.get(getOperand(0));
2138 Value *Freeze = Builder.CreateFreeze(Op);
2139 State.set(this, Freeze);
2140 break;
2141 }
2142 case Instruction::ICmp:
2143 case Instruction::FCmp: {
2144 // Widen compares. Generate vector compares.
2145 bool FCmp = Opcode == Instruction::FCmp;
2146 Value *A = State.get(getOperand(0));
2147 Value *B = State.get(getOperand(1));
2148 Value *C = nullptr;
2149 if (FCmp) {
2150 // Propagate fast math flags.
2151 C = Builder.CreateFCmpFMF(
2152 getPredicate(), A, B,
2154 } else {
2155 C = Builder.CreateICmp(getPredicate(), A, B);
2156 }
2157 if (auto *I = dyn_cast<Instruction>(C))
2158 applyMetadata(*I);
2159 State.set(this, C);
2160 break;
2161 }
2162 default:
2163 // This instruction is not vectorized by simple widening.
2164 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
2165 << Instruction::getOpcodeName(Opcode));
2166 llvm_unreachable("Unhandled instruction!");
2167 } // end of switch.
2168
2169#if !defined(NDEBUG)
2170 // Verify that VPlan type inference results agree with the type of the
2171 // generated values.
2172 assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
2173 State.get(this)->getType() &&
2174 "inferred type and type from generated instructions do not match");
2175#endif
2176}
2177
2179 VPCostContext &Ctx) const {
2180 switch (Opcode) {
2181 case Instruction::UDiv:
2182 case Instruction::SDiv:
2183 case Instruction::SRem:
2184 case Instruction::URem:
2185 // If the div/rem operation isn't safe to speculate and requires
2186 // predication, then the only way we can even create a vplan is to insert
2187 // a select on the second input operand to ensure we use the value of 1
2188 // for the inactive lanes. The select will be costed separately.
2189 case Instruction::FNeg:
2190 case Instruction::Add:
2191 case Instruction::FAdd:
2192 case Instruction::Sub:
2193 case Instruction::FSub:
2194 case Instruction::Mul:
2195 case Instruction::FMul:
2196 case Instruction::FDiv:
2197 case Instruction::FRem:
2198 case Instruction::Shl:
2199 case Instruction::LShr:
2200 case Instruction::AShr:
2201 case Instruction::And:
2202 case Instruction::Or:
2203 case Instruction::Xor:
2204 case Instruction::Freeze:
2205 case Instruction::ExtractValue:
2206 case Instruction::ICmp:
2207 case Instruction::FCmp:
2208 return getCostForRecipeWithOpcode(getOpcode(), VF, Ctx);
2209 default:
2210 llvm_unreachable("Unsupported opcode for instruction");
2211 }
2212}
2213
2214#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2216 VPSlotTracker &SlotTracker) const {
2217 O << Indent << "WIDEN ";
2219 O << " = " << Instruction::getOpcodeName(Opcode);
2220 printFlags(O);
2222}
2223#endif
2224
2226 auto &Builder = State.Builder;
2227 /// Vectorize casts.
2228 assert(State.VF.isVector() && "Not vectorizing?");
2229 Type *DestTy = VectorType::get(getResultType(), State.VF);
2230 VPValue *Op = getOperand(0);
2231 Value *A = State.get(Op);
2232 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
2233 State.set(this, Cast);
2234 if (auto *CastOp = dyn_cast<Instruction>(Cast)) {
2235 applyFlags(*CastOp);
2236 applyMetadata(*CastOp);
2237 }
2238}
2239
2241 VPCostContext &Ctx) const {
2242 // TODO: In some cases, VPWidenCastRecipes are created but not considered in
2243 // the legacy cost model, including truncates/extends when evaluating a
2244 // reduction in a smaller type.
2245 if (!getUnderlyingValue())
2246 return 0;
2247 // Computes the CastContextHint from a recipes that may access memory.
2248 auto ComputeCCH = [&](const VPRecipeBase *R) -> TTI::CastContextHint {
2249 if (VF.isScalar())
2251 if (isa<VPInterleaveBase>(R))
2253 if (const auto *ReplicateRecipe = dyn_cast<VPReplicateRecipe>(R))
2254 return ReplicateRecipe->isPredicated() ? TTI::CastContextHint::Masked
2256 const auto *WidenMemoryRecipe = dyn_cast<VPWidenMemoryRecipe>(R);
2257 if (WidenMemoryRecipe == nullptr)
2259 if (!WidenMemoryRecipe->isConsecutive())
2261 if (WidenMemoryRecipe->isReverse())
2263 if (WidenMemoryRecipe->isMasked())
2266 };
2267
2268 VPValue *Operand = getOperand(0);
2270 // For Trunc/FPTrunc, get the context from the only user.
2271 if ((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
2273 if (auto *StoreRecipe = dyn_cast<VPRecipeBase>(*user_begin()))
2274 CCH = ComputeCCH(StoreRecipe);
2275 }
2276 // For Z/Sext, get the context from the operand.
2277 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
2278 Opcode == Instruction::FPExt) {
2279 if (Operand->isLiveIn())
2281 else if (Operand->getDefiningRecipe())
2282 CCH = ComputeCCH(Operand->getDefiningRecipe());
2283 }
2284
2285 auto *SrcTy =
2286 cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
2287 auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
2288 // Arm TTI will use the underlying instruction to determine the cost.
2289 return Ctx.TTI.getCastInstrCost(
2290 Opcode, DestTy, SrcTy, CCH, Ctx.CostKind,
2292}
2293
2294#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2296 VPSlotTracker &SlotTracker) const {
2297 O << Indent << "WIDEN-CAST ";
2299 O << " = " << Instruction::getOpcodeName(Opcode);
2300 printFlags(O);
2302 O << " to " << *getResultType();
2303}
2304#endif
2305
2307 VPCostContext &Ctx) const {
2308 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2309}
2310
2311/// A helper function that returns an integer or floating-point constant with
2312/// value C.
2314 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
2315 : ConstantFP::get(Ty, C);
2316}
2317
2318#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2320 VPSlotTracker &SlotTracker) const {
2321 O << Indent;
2323 O << " = WIDEN-INDUCTION ";
2325
2326 if (auto *TI = getTruncInst())
2327 O << " (truncated to " << *TI->getType() << ")";
2328}
2329#endif
2330
2332 // The step may be defined by a recipe in the preheader (e.g. if it requires
2333 // SCEV expansion), but for the canonical induction the step is required to be
2334 // 1, which is represented as live-in.
2336 return false;
2339 auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
2340 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
2341 getScalarType() == CanIV->getScalarType();
2342}
2343
2344#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2346 VPSlotTracker &SlotTracker) const {
2347 O << Indent;
2349 O << " = DERIVED-IV ";
2350 getStartValue()->printAsOperand(O, SlotTracker);
2351 O << " + ";
2352 getOperand(1)->printAsOperand(O, SlotTracker);
2353 O << " * ";
2354 getStepValue()->printAsOperand(O, SlotTracker);
2355}
2356#endif
2357
2359 // Fast-math-flags propagate from the original induction instruction.
2360 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
2361 if (hasFastMathFlags())
2362 State.Builder.setFastMathFlags(getFastMathFlags());
2363
2364 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2365 /// variable on which to base the steps, \p Step is the size of the step.
2366
2367 Value *BaseIV = State.get(getOperand(0), VPLane(0));
2368 Value *Step = State.get(getStepValue(), VPLane(0));
2369 IRBuilderBase &Builder = State.Builder;
2370
2371 // Ensure step has the same type as that of scalar IV.
2372 Type *BaseIVTy = BaseIV->getType()->getScalarType();
2373 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
2374
2375 // We build scalar steps for both integer and floating-point induction
2376 // variables. Here, we determine the kind of arithmetic we will perform.
2379 if (BaseIVTy->isIntegerTy()) {
2380 AddOp = Instruction::Add;
2381 MulOp = Instruction::Mul;
2382 } else {
2383 AddOp = InductionOpcode;
2384 MulOp = Instruction::FMul;
2385 }
2386
2387 // Determine the number of scalars we need to generate for each unroll
2388 // iteration.
2389 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
2390 // Compute the scalar steps and save the results in State.
2391 Type *IntStepTy =
2392 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
2393 Type *VecIVTy = nullptr;
2394 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2395 if (!FirstLaneOnly && State.VF.isScalable()) {
2396 VecIVTy = VectorType::get(BaseIVTy, State.VF);
2397 UnitStepVec =
2398 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2399 SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2400 SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
2401 }
2402
2403 unsigned StartLane = 0;
2404 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2405 if (State.Lane) {
2406 StartLane = State.Lane->getKnownLane();
2407 EndLane = StartLane + 1;
2408 }
2409 Value *StartIdx0;
2410 if (getUnrollPart(*this) == 0)
2411 StartIdx0 = ConstantInt::get(IntStepTy, 0);
2412 else {
2413 StartIdx0 = State.get(getOperand(2), true);
2414 if (getUnrollPart(*this) != 1) {
2415 StartIdx0 =
2416 Builder.CreateMul(StartIdx0, ConstantInt::get(StartIdx0->getType(),
2417 getUnrollPart(*this)));
2418 }
2419 StartIdx0 = Builder.CreateSExtOrTrunc(StartIdx0, IntStepTy);
2420 }
2421
2422 if (!FirstLaneOnly && State.VF.isScalable()) {
2423 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2424 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2425 if (BaseIVTy->isFloatingPointTy())
2426 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2427 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2428 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2429 State.set(this, Add);
2430 // It's useful to record the lane values too for the known minimum number
2431 // of elements so we do those below. This improves the code quality when
2432 // trying to extract the first element, for example.
2433 }
2434
2435 if (BaseIVTy->isFloatingPointTy())
2436 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
2437
2438 for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
2439 Value *StartIdx = Builder.CreateBinOp(
2440 AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
2441 // The step returned by `createStepForVF` is a runtime-evaluated value
2442 // when VF is scalable. Otherwise, it should be folded into a Constant.
2443 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2444 "Expected StartIdx to be folded to a constant when VF is not "
2445 "scalable");
2446 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2447 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
2448 State.set(this, Add, VPLane(Lane));
2449 }
2450}
2451
2452#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2454 VPSlotTracker &SlotTracker) const {
2455 O << Indent;
2457 O << " = SCALAR-STEPS ";
2459}
2460#endif
2461
2463 assert(State.VF.isVector() && "not widening");
2464 // Construct a vector GEP by widening the operands of the scalar GEP as
2465 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
2466 // results in a vector of pointers when at least one operand of the GEP
2467 // is vector-typed. Thus, to keep the representation compact, we only use
2468 // vector-typed operands for loop-varying values.
2469
2470 if (areAllOperandsInvariant()) {
2471 // If we are vectorizing, but the GEP has only loop-invariant operands,
2472 // the GEP we build (by only using vector-typed operands for
2473 // loop-varying values) would be a scalar pointer. Thus, to ensure we
2474 // produce a vector of pointers, we need to either arbitrarily pick an
2475 // operand to broadcast, or broadcast a clone of the original GEP.
2476 // Here, we broadcast a clone of the original.
2477 //
2478 // TODO: If at some point we decide to scalarize instructions having
2479 // loop-invariant operands, this special case will no longer be
2480 // required. We would add the scalarization decision to
2481 // collectLoopScalars() and teach getVectorValue() to broadcast
2482 // the lane-zero scalar value.
2484 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
2485 Ops.push_back(State.get(getOperand(I), VPLane(0)));
2486
2487 auto *NewGEP =
2488 State.Builder.CreateGEP(getSourceElementType(), Ops[0], drop_begin(Ops),
2489 "", getGEPNoWrapFlags());
2490 Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
2491 State.set(this, Splat);
2492 } else {
2493 // If the GEP has at least one loop-varying operand, we are sure to
2494 // produce a vector of pointers unless VF is scalar.
2495 // The pointer operand of the new GEP. If it's loop-invariant, we
2496 // won't broadcast it.
2497 auto *Ptr = State.get(getOperand(0), isPointerLoopInvariant());
2498
2499 // Collect all the indices for the new GEP. If any index is
2500 // loop-invariant, we won't broadcast it.
2502 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
2503 VPValue *Operand = getOperand(I);
2504 Indices.push_back(State.get(Operand, isIndexLoopInvariant(I - 1)));
2505 }
2506
2507 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
2508 // but it should be a vector, otherwise.
2509 auto *NewGEP = State.Builder.CreateGEP(getSourceElementType(), Ptr, Indices,
2510 "", getGEPNoWrapFlags());
2511 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
2512 "NewGEP is not a pointer vector");
2513 State.set(this, NewGEP);
2514 }
2515}
2516
2517#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2519 VPSlotTracker &SlotTracker) const {
2520 O << Indent << "WIDEN-GEP ";
2521 O << (isPointerLoopInvariant() ? "Inv" : "Var");
2522 for (size_t I = 0; I < getNumOperands() - 1; ++I)
2523 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
2524
2525 O << " ";
2527 O << " = getelementptr";
2528 printFlags(O);
2530}
2531#endif
2532
2533static Type *getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride,
2534 unsigned CurrentPart, IRBuilderBase &Builder) {
2535 // Use i32 for the gep index type when the value is constant,
2536 // or query DataLayout for a more suitable index type otherwise.
2537 const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
2538 return !IsUnitStride || (IsScalable && (IsReverse || CurrentPart > 0))
2539 ? DL.getIndexType(Builder.getPtrTy(0))
2540 : Builder.getInt32Ty();
2541}
2542
2544 auto &Builder = State.Builder;
2545 unsigned CurrentPart = getUnrollPart(*this);
2546 bool IsUnitStride = Stride == 1 || Stride == -1;
2547 Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ true,
2548 IsUnitStride, CurrentPart, Builder);
2549
2550 // The wide store needs to start at the last vector element.
2551 Value *RunTimeVF = State.get(getVFValue(), VPLane(0));
2552 if (IndexTy != RunTimeVF->getType())
2553 RunTimeVF = Builder.CreateZExtOrTrunc(RunTimeVF, IndexTy);
2554 // NumElt = Stride * CurrentPart * RunTimeVF
2555 Value *NumElt = Builder.CreateMul(
2556 ConstantInt::get(IndexTy, Stride * (int64_t)CurrentPart), RunTimeVF);
2557 // LastLane = Stride * (RunTimeVF - 1)
2558 Value *LastLane = Builder.CreateSub(RunTimeVF, ConstantInt::get(IndexTy, 1));
2559 if (Stride != 1)
2560 LastLane = Builder.CreateMul(ConstantInt::get(IndexTy, Stride), LastLane);
2561 Value *Ptr = State.get(getOperand(0), VPLane(0));
2562 Value *ResultPtr =
2563 Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", getGEPNoWrapFlags());
2564 ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "",
2566
2567 State.set(this, ResultPtr, /*IsScalar*/ true);
2568}
2569
2570#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2572 VPSlotTracker &SlotTracker) const {
2573 O << Indent;
2575 O << " = vector-end-pointer";
2576 printFlags(O);
2578}
2579#endif
2580
2582 auto &Builder = State.Builder;
2583 unsigned CurrentPart = getUnrollPart(*this);
2584 Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
2585 /*IsUnitStride*/ true, CurrentPart, Builder);
2586 Value *Ptr = State.get(getOperand(0), VPLane(0));
2587
2588 Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
2589 Value *ResultPtr = Builder.CreateGEP(getSourceElementType(), Ptr, Increment,
2590 "", getGEPNoWrapFlags());
2591
2592 State.set(this, ResultPtr, /*IsScalar*/ true);
2593}
2594
2595#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2597 VPSlotTracker &SlotTracker) const {
2598 O << Indent;
2600 O << " = vector-pointer ";
2601
2603}
2604#endif
2605
2607 VPCostContext &Ctx) const {
2608 // Handle cases where only the first lane is used the same way as the legacy
2609 // cost model.
2611 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
2612
2613 Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2614 Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2615 return (getNumIncomingValues() - 1) *
2616 Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
2617 CmpInst::BAD_ICMP_PREDICATE, Ctx.CostKind);
2618}
2619
2620#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2622 VPSlotTracker &SlotTracker) const {
2623 O << Indent << "BLEND ";
2625 O << " =";
2626 if (getNumIncomingValues() == 1) {
2627 // Not a User of any mask: not really blending, this is a
2628 // single-predecessor phi.
2629 O << " ";
2630 getIncomingValue(0)->printAsOperand(O, SlotTracker);
2631 } else {
2632 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
2633 O << " ";
2634 getIncomingValue(I)->printAsOperand(O, SlotTracker);
2635 if (I == 0)
2636 continue;
2637 O << "/";
2638 getMask(I)->printAsOperand(O, SlotTracker);
2639 }
2640 }
2641}
2642#endif
2643
2645 assert(!State.Lane && "Reduction being replicated.");
2646 Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2649 "In-loop AnyOf reductions aren't currently supported");
2650 // Propagate the fast-math flags carried by the underlying instruction.
2651 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
2652 State.Builder.setFastMathFlags(getFastMathFlags());
2653 Value *NewVecOp = State.get(getVecOp());
2654 if (VPValue *Cond = getCondOp()) {
2655 Value *NewCond = State.get(Cond, State.VF.isScalar());
2656 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
2657 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
2658
2659 Value *Start = getRecurrenceIdentity(Kind, ElementTy, getFastMathFlags());
2660 if (State.VF.isVector())
2661 Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
2662
2663 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
2664 NewVecOp = Select;
2665 }
2666 Value *NewRed;
2667 Value *NextInChain;
2668 if (IsOrdered) {
2669 if (State.VF.isVector())
2670 NewRed =
2671 createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain);
2672 else
2673 NewRed = State.Builder.CreateBinOp(
2675 PrevInChain, NewVecOp);
2676 PrevInChain = NewRed;
2677 NextInChain = NewRed;
2678 } else {
2679 PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
2680 NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind);
2682 NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
2683 else
2684 NextInChain = State.Builder.CreateBinOp(
2686 PrevInChain, NewRed);
2687 }
2688 State.set(this, NextInChain, /*IsScalar*/ true);
2689}
2690
2692 assert(!State.Lane && "Reduction being replicated.");
2693
2694 auto &Builder = State.Builder;
2695 // Propagate the fast-math flags carried by the underlying instruction.
2696 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
2697 Builder.setFastMathFlags(getFastMathFlags());
2698
2700 Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
2701 Value *VecOp = State.get(getVecOp());
2702 Value *EVL = State.get(getEVL(), VPLane(0));
2703
2704 Value *Mask;
2705 if (VPValue *CondOp = getCondOp())
2706 Mask = State.get(CondOp);
2707 else
2708 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2709
2710 Value *NewRed;
2711 if (isOrdered()) {
2712 NewRed = createOrderedReduction(Builder, Kind, VecOp, Prev, Mask, EVL);
2713 } else {
2714 NewRed = createSimpleReduction(Builder, VecOp, Kind, Mask, EVL);
2716 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2717 else
2718 NewRed = Builder.CreateBinOp(
2720 Prev);
2721 }
2722 State.set(this, NewRed, /*IsScalar*/ true);
2723}
2724
2726 VPCostContext &Ctx) const {
2727 RecurKind RdxKind = getRecurrenceKind();
2728 Type *ElementTy = Ctx.Types.inferScalarType(this);
2729 auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
2730 unsigned Opcode = RecurrenceDescriptor::getOpcode(RdxKind);
2732 std::optional<FastMathFlags> OptionalFMF =
2733 ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt;
2734
2735 // TODO: Support any-of reductions.
2736 assert(
2738 ForceTargetInstructionCost.getNumOccurrences() > 0) &&
2739 "Any-of reduction not implemented in VPlan-based cost model currently.");
2740
2741 // Note that TTI should model the cost of moving result to the scalar register
2742 // and the BinOp cost in the getMinMaxReductionCost().
2745 return Ctx.TTI.getMinMaxReductionCost(Id, VectorTy, FMFs, Ctx.CostKind);
2746 }
2747
2748 // Note that TTI should model the cost of moving result to the scalar register
2749 // and the BinOp cost in the getArithmeticReductionCost().
2750 return Ctx.TTI.getArithmeticReductionCost(Opcode, VectorTy, OptionalFMF,
2751 Ctx.CostKind);
2752}
2753
2755 ExpressionTypes ExpressionType,
2756 ArrayRef<VPSingleDefRecipe *> ExpressionRecipes)
2757 : VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}),
2758 ExpressionRecipes(SetVector<VPSingleDefRecipe *>(
2759 ExpressionRecipes.begin(), ExpressionRecipes.end())
2760 .takeVector()),
2761 ExpressionType(ExpressionType) {
2762 assert(!ExpressionRecipes.empty() && "Nothing to combine?");
2763 assert(
2764 none_of(ExpressionRecipes,
2765 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2766 "expression cannot contain recipes with side-effects");
2767
2768 // Maintain a copy of the expression recipes as a set of users.
2769 SmallPtrSet<VPUser *, 4> ExpressionRecipesAsSetOfUsers;
2770 for (auto *R : ExpressionRecipes)
2771 ExpressionRecipesAsSetOfUsers.insert(R);
2772
2773 // Recipes in the expression, except the last one, must only be used by
2774 // (other) recipes inside the expression. If there are other users, external
2775 // to the expression, use a clone of the recipe for external users.
2776 for (VPSingleDefRecipe *R : ExpressionRecipes) {
2777 if (R != ExpressionRecipes.back() &&
2778 any_of(R->users(), [&ExpressionRecipesAsSetOfUsers](VPUser *U) {
2779 return !ExpressionRecipesAsSetOfUsers.contains(U);
2780 })) {
2781 // There are users outside of the expression. Clone the recipe and use the
2782 // clone those external users.
2783 VPSingleDefRecipe *CopyForExtUsers = R->clone();
2784 R->replaceUsesWithIf(CopyForExtUsers, [&ExpressionRecipesAsSetOfUsers](
2785 VPUser &U, unsigned) {
2786 return !ExpressionRecipesAsSetOfUsers.contains(&U);
2787 });
2788 CopyForExtUsers->insertBefore(R);
2789 }
2790 if (R->getParent())
2791 R->removeFromParent();
2792 }
2793
2794 // Internalize all external operands to the expression recipes. To do so,
2795 // create new temporary VPValues for all operands defined by a recipe outside
2796 // the expression. The original operands are added as operands of the
2797 // VPExpressionRecipe itself.
2798 for (auto *R : ExpressionRecipes) {
2799 for (const auto &[Idx, Op] : enumerate(R->operands())) {
2800 auto *Def = Op->getDefiningRecipe();
2801 if (Def && ExpressionRecipesAsSetOfUsers.contains(Def))
2802 continue;
2803 addOperand(Op);
2804 LiveInPlaceholders.push_back(new VPValue());
2805 R->setOperand(Idx, LiveInPlaceholders.back());
2806 }
2807 }
2808}
2809
2811 for (auto *R : ExpressionRecipes)
2812 R->insertBefore(this);
2813
2814 for (const auto &[Idx, Op] : enumerate(operands()))
2815 LiveInPlaceholders[Idx]->replaceAllUsesWith(Op);
2816
2817 replaceAllUsesWith(ExpressionRecipes.back());
2818 ExpressionRecipes.clear();
2819}
2820
2822 VPCostContext &Ctx) const {
2823 Type *RedTy = Ctx.Types.inferScalarType(this);
2824 auto *SrcVecTy = cast<VectorType>(
2825 toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF));
2826 assert(RedTy->isIntegerTy() &&
2827 "VPExpressionRecipe only supports integer types currently.");
2828 unsigned Opcode = RecurrenceDescriptor::getOpcode(
2829 cast<VPReductionRecipe>(ExpressionRecipes.back())->getRecurrenceKind());
2830 switch (ExpressionType) {
2831 case ExpressionTypes::ExtendedReduction: {
2832 return Ctx.TTI.getExtendedReductionCost(
2833 Opcode,
2834 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
2835 Instruction::ZExt,
2836 RedTy, SrcVecTy, std::nullopt, Ctx.CostKind);
2837 }
2838 case ExpressionTypes::MulAccReduction:
2839 return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy,
2840 Ctx.CostKind);
2841
2842 case ExpressionTypes::ExtNegatedMulAccReduction:
2843 assert(Opcode == Instruction::Add && "Unexpected opcode");
2844 Opcode = Instruction::Sub;
2846 case ExpressionTypes::ExtMulAccReduction: {
2847 return Ctx.TTI.getMulAccReductionCost(
2848 cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
2849 Instruction::ZExt,
2850 Opcode, RedTy, SrcVecTy, Ctx.CostKind);
2851 }
2852 }
2853 llvm_unreachable("Unknown VPExpressionRecipe::ExpressionTypes enum");
2854}
2855
2857 return any_of(ExpressionRecipes, [](VPSingleDefRecipe *R) {
2858 return R->mayReadFromMemory() || R->mayWriteToMemory();
2859 });
2860}
2861
2863 assert(
2864 none_of(ExpressionRecipes,
2865 [](VPSingleDefRecipe *R) { return R->mayHaveSideEffects(); }) &&
2866 "expression cannot contain recipes with side-effects");
2867 return false;
2868}
2869
2870#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2871
2873 VPSlotTracker &SlotTracker) const {
2874 O << Indent << "EXPRESSION ";
2876 O << " = ";
2877 auto *Red = cast<VPReductionRecipe>(ExpressionRecipes.back());
2878 unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
2879
2880 switch (ExpressionType) {
2881 case ExpressionTypes::ExtendedReduction: {
2883 O << " +";
2884 O << " reduce." << Instruction::getOpcodeName(Opcode) << " (";
2886 Red->printFlags(O);
2887
2888 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2889 O << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2890 << *Ext0->getResultType();
2891 if (Red->isConditional()) {
2892 O << ", ";
2893 Red->getCondOp()->printAsOperand(O, SlotTracker);
2894 }
2895 O << ")";
2896 break;
2897 }
2898 case ExpressionTypes::ExtNegatedMulAccReduction: {
2900 O << " + reduce."
2902 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
2903 << " (sub (0, mul";
2904 auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
2905 Mul->printFlags(O);
2906 O << "(";
2908 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2909 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2910 << *Ext0->getResultType() << "), (";
2912 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
2913 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
2914 << *Ext1->getResultType() << ")";
2915 if (Red->isConditional()) {
2916 O << ", ";
2917 Red->getCondOp()->printAsOperand(O, SlotTracker);
2918 }
2919 O << "))";
2920 break;
2921 }
2922 case ExpressionTypes::MulAccReduction:
2923 case ExpressionTypes::ExtMulAccReduction: {
2925 O << " + ";
2926 O << "reduce."
2928 RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
2929 << " (";
2930 O << "mul";
2931 bool IsExtended = ExpressionType == ExpressionTypes::ExtMulAccReduction;
2932 auto *Mul = cast<VPWidenRecipe>(IsExtended ? ExpressionRecipes[2]
2933 : ExpressionRecipes[0]);
2934 Mul->printFlags(O);
2935 if (IsExtended)
2936 O << "(";
2938 if (IsExtended) {
2939 auto *Ext0 = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
2940 O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to "
2941 << *Ext0->getResultType() << "), (";
2942 } else {
2943 O << ", ";
2944 }
2946 if (IsExtended) {
2947 auto *Ext1 = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
2948 O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to "
2949 << *Ext1->getResultType() << ")";
2950 }
2951 if (Red->isConditional()) {
2952 O << ", ";
2953 Red->getCondOp()->printAsOperand(O, SlotTracker);
2954 }
2955 O << ")";
2956 break;
2957 }
2958 }
2959}
2960
2962 VPSlotTracker &SlotTracker) const {
2963 O << Indent << "REDUCE ";
2965 O << " = ";
2967 O << " +";
2968 printFlags(O);
2969 O << " reduce."
2972 << " (";
2974 if (isConditional()) {
2975 O << ", ";
2977 }
2978 O << ")";
2979}
2980
2982 VPSlotTracker &SlotTracker) const {
2983 O << Indent << "REDUCE ";
2985 O << " = ";
2987 O << " +";
2988 printFlags(O);
2989 O << " vp.reduce."
2992 << " (";
2994 O << ", ";
2996 if (isConditional()) {
2997 O << ", ";
2999 }
3000 O << ")";
3001}
3002
3003#endif
3004
3005/// A helper function to scalarize a single Instruction in the innermost loop.
3006/// Generates a sequence of scalar instances for lane \p Lane. Uses the VPValue
3007/// operands from \p RepRecipe instead of \p Instr's operands.
3008static void scalarizeInstruction(const Instruction *Instr,
3009 VPReplicateRecipe *RepRecipe,
3010 const VPLane &Lane, VPTransformState &State) {
3011 assert((!Instr->getType()->isAggregateType() ||
3012 canVectorizeTy(Instr->getType())) &&
3013 "Expected vectorizable or non-aggregate type.");
3014
3015 // Does this instruction return a value ?
3016 bool IsVoidRetTy = Instr->getType()->isVoidTy();
3017
3018 Instruction *Cloned = Instr->clone();
3019 if (!IsVoidRetTy) {
3020 Cloned->setName(Instr->getName() + ".cloned");
3021 Type *ResultTy = State.TypeAnalysis.inferScalarType(RepRecipe);
3022 // The operands of the replicate recipe may have been narrowed, resulting in
3023 // a narrower result type. Update the type of the cloned instruction to the
3024 // correct type.
3025 if (ResultTy != Cloned->getType())
3026 Cloned->mutateType(ResultTy);
3027 }
3028
3029 RepRecipe->applyFlags(*Cloned);
3030 RepRecipe->applyMetadata(*Cloned);
3031
3032 if (RepRecipe->hasPredicate())
3033 cast<CmpInst>(Cloned)->setPredicate(RepRecipe->getPredicate());
3034
3035 if (auto DL = RepRecipe->getDebugLoc())
3036 State.setDebugLocFrom(DL);
3037
3038 // Replace the operands of the cloned instructions with their scalar
3039 // equivalents in the new loop.
3040 for (const auto &I : enumerate(RepRecipe->operands())) {
3041 auto InputLane = Lane;
3042 VPValue *Operand = I.value();
3043 if (vputils::isSingleScalar(Operand))
3044 InputLane = VPLane::getFirstLane();
3045 Cloned->setOperand(I.index(), State.get(Operand, InputLane));
3046 }
3047
3048 // Place the cloned scalar in the new loop.
3049 State.Builder.Insert(Cloned);
3050
3051 State.set(RepRecipe, Cloned, Lane);
3052
3053 // If we just cloned a new assumption, add it the assumption cache.
3054 if (auto *II = dyn_cast<AssumeInst>(Cloned))
3055 State.AC->registerAssumption(II);
3056
3057 assert(
3058 (RepRecipe->getParent()->getParent() ||
3059 !RepRecipe->getParent()->getPlan()->getVectorLoopRegion() ||
3060 all_of(RepRecipe->operands(),
3061 [](VPValue *Op) { return Op->isDefinedOutsideLoopRegions(); })) &&
3062 "Expected a recipe is either within a region or all of its operands "
3063 "are defined outside the vectorized region.");
3064}
3065
3068
3069 if (!State.Lane) {
3070 assert(IsSingleScalar && "VPReplicateRecipes outside replicate regions "
3071 "must have already been unrolled");
3072 scalarizeInstruction(UI, this, VPLane(0), State);
3073 return;
3074 }
3075
3076 assert((State.VF.isScalar() || !isSingleScalar()) &&
3077 "uniform recipe shouldn't be predicated");
3078 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
3079 scalarizeInstruction(UI, this, *State.Lane, State);
3080 // Insert scalar instance packing it into a vector.
3081 if (State.VF.isVector() && shouldPack()) {
3082 Value *WideValue =
3083 State.Lane->isFirstLane()
3084 ? PoisonValue::get(toVectorizedTy(UI->getType(), State.VF))
3085 : State.get(this);
3086 State.set(this, State.packScalarIntoVectorizedValue(this, WideValue,
3087 *State.Lane));
3088 }
3089}
3090
3092 // Find if the recipe is used by a widened recipe via an intervening
3093 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
3094 return any_of(users(), [](const VPUser *U) {
3095 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
3096 return !vputils::onlyScalarValuesUsed(PredR);
3097 return false;
3098 });
3099}
3100
3101/// Returns true if \p Ptr is a pointer computation for which the legacy cost
3102/// model computes a SCEV expression when computing the address cost.
3104 auto *PtrR = Ptr->getDefiningRecipe();
3105 if (!PtrR || !((isa<VPReplicateRecipe>(PtrR) &&
3107 Instruction::GetElementPtr) ||
3108 isa<VPWidenGEPRecipe>(PtrR)))
3109 return false;
3110
3111 // We are looking for a GEP where all indices are either loop invariant or
3112 // inductions.
3113 for (VPValue *Opd : drop_begin(PtrR->operands())) {
3114 if (!Opd->isDefinedOutsideLoopRegions() &&
3116 return false;
3117 }
3118
3119 return true;
3120}
3121
3122/// Returns true if \p V is used as part of the address of another load or
3123/// store.
3124static bool isUsedByLoadStoreAddress(const VPUser *V) {
3126 SmallVector<const VPUser *> WorkList = {V};
3127
3128 while (!WorkList.empty()) {
3129 auto *Cur = dyn_cast<VPSingleDefRecipe>(WorkList.pop_back_val());
3130 if (!Cur || !Seen.insert(Cur).second)
3131 continue;
3132
3133 for (VPUser *U : Cur->users()) {
3134 if (auto *InterleaveR = dyn_cast<VPInterleaveBase>(U))
3135 if (InterleaveR->getAddr() == Cur)
3136 return true;
3137 if (auto *RepR = dyn_cast<VPReplicateRecipe>(U)) {
3138 if (RepR->getOpcode() == Instruction::Load &&
3139 RepR->getOperand(0) == Cur)
3140 return true;
3141 if (RepR->getOpcode() == Instruction::Store &&
3142 RepR->getOperand(1) == Cur)
3143 return true;
3144 }
3145 if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) {
3146 if (MemR->getAddr() == Cur && MemR->isConsecutive())
3147 return true;
3148 }
3149 }
3150
3151 append_range(WorkList, cast<VPSingleDefRecipe>(Cur)->users());
3152 }
3153 return false;
3154}
3155
3157 VPCostContext &Ctx) const {
3159 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
3160 // transform, avoid computing their cost multiple times for now.
3161 Ctx.SkipCostComputation.insert(UI);
3162
3163 switch (UI->getOpcode()) {
3164 case Instruction::GetElementPtr:
3165 // We mark this instruction as zero-cost because the cost of GEPs in
3166 // vectorized code depends on whether the corresponding memory instruction
3167 // is scalarized or not. Therefore, we handle GEPs with the memory
3168 // instruction cost.
3169 return 0;
3170 case Instruction::Call: {
3171 auto *CalledFn =
3173
3176 for (const VPValue *ArgOp : ArgOps)
3177 Tys.push_back(Ctx.Types.inferScalarType(ArgOp));
3178
3179 if (CalledFn->isIntrinsic())
3180 // Various pseudo-intrinsics with costs of 0 are scalarized instead of
3181 // vectorized via VPWidenIntrinsicRecipe. Return 0 for them early.
3182 switch (CalledFn->getIntrinsicID()) {
3183 case Intrinsic::assume:
3184 case Intrinsic::lifetime_end:
3185 case Intrinsic::lifetime_start:
3186 case Intrinsic::sideeffect:
3187 case Intrinsic::pseudoprobe:
3188 case Intrinsic::experimental_noalias_scope_decl: {
3189 assert(getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3190 ElementCount::getFixed(1), Ctx) == 0 &&
3191 "scalarizing intrinsic should be free");
3192 return InstructionCost(0);
3193 }
3194 default:
3195 break;
3196 }
3197
3198 Type *ResultTy = Ctx.Types.inferScalarType(this);
3199 InstructionCost ScalarCallCost =
3200 Ctx.TTI.getCallInstrCost(CalledFn, ResultTy, Tys, Ctx.CostKind);
3201 if (isSingleScalar()) {
3202 if (CalledFn->isIntrinsic())
3203 ScalarCallCost = std::min(
3204 ScalarCallCost,
3205 getCostForIntrinsics(CalledFn->getIntrinsicID(), ArgOps, *this,
3206 ElementCount::getFixed(1), Ctx));
3207 return ScalarCallCost;
3208 }
3209
3210 if (VF.isScalable())
3212
3213 return ScalarCallCost * VF.getFixedValue() +
3214 Ctx.getScalarizationOverhead(ResultTy, ArgOps, VF);
3215 }
3216 case Instruction::Add:
3217 case Instruction::Sub:
3218 case Instruction::FAdd:
3219 case Instruction::FSub:
3220 case Instruction::Mul:
3221 case Instruction::FMul:
3222 case Instruction::FDiv:
3223 case Instruction::FRem:
3224 case Instruction::Shl:
3225 case Instruction::LShr:
3226 case Instruction::AShr:
3227 case Instruction::And:
3228 case Instruction::Or:
3229 case Instruction::Xor:
3230 case Instruction::ICmp:
3231 case Instruction::FCmp:
3233 Ctx) *
3234 (isSingleScalar() ? 1 : VF.getFixedValue());
3235 case Instruction::SDiv:
3236 case Instruction::UDiv:
3237 case Instruction::SRem:
3238 case Instruction::URem: {
3239 InstructionCost ScalarCost =
3241 if (isSingleScalar())
3242 return ScalarCost;
3243
3244 ScalarCost = ScalarCost * VF.getFixedValue() +
3245 Ctx.getScalarizationOverhead(Ctx.Types.inferScalarType(this),
3246 to_vector(operands()), VF);
3247 // If the recipe is not predicated (i.e. not in a replicate region), return
3248 // the scalar cost. Otherwise handle predicated cost.
3249 if (!getParent()->getParent()->isReplicator())
3250 return ScalarCost;
3251
3252 // Account for the phi nodes that we will create.
3253 ScalarCost += VF.getFixedValue() *
3254 Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
3255 // Scale the cost by the probability of executing the predicated blocks.
3256 // This assumes the predicated block for each vector lane is equally
3257 // likely.
3258 ScalarCost /= getPredBlockCostDivisor(Ctx.CostKind);
3259 return ScalarCost;
3260 }
3261 case Instruction::Load:
3262 case Instruction::Store: {
3263 if (VF.isScalable() && !isSingleScalar())
3265
3266 // TODO: See getMemInstScalarizationCost for how to handle replicating and
3267 // predicated cases.
3268 const VPRegionBlock *ParentRegion = getParent()->getParent();
3269 if (ParentRegion && ParentRegion->isReplicator())
3270 break;
3271
3272 bool IsLoad = UI->getOpcode() == Instruction::Load;
3273 const VPValue *PtrOp = getOperand(!IsLoad);
3274 // TODO: Handle cases where we need to pass a SCEV to
3275 // getAddressComputationCost.
3276 if (shouldUseAddressAccessSCEV(PtrOp))
3277 break;
3278
3279 Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0));
3280 Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp);
3281 const Align Alignment = getLoadStoreAlignment(UI);
3282 unsigned AS = getLoadStoreAddressSpace(UI);
3284 InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost(
3285 UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
3286
3287 Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
3288
3289 InstructionCost ScalarCost =
3290 ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
3291 PtrTy, &Ctx.SE, nullptr, Ctx.CostKind);
3292 if (isSingleScalar())
3293 return ScalarCost;
3294
3295 SmallVector<const VPValue *> OpsToScalarize;
3296 Type *ResultTy = Type::getVoidTy(PtrTy->getContext());
3297 // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we
3298 // don't assign scalarization overhead in general, if the target prefers
3299 // vectorized addressing or the loaded value is used as part of an address
3300 // of another load or store.
3301 bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3302 if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) {
3303 bool EfficientVectorLoadStore =
3304 Ctx.TTI.supportsEfficientVectorElementLoadStore();
3305 if (!(IsLoad && !PreferVectorizedAddressing) &&
3306 !(!IsLoad && EfficientVectorLoadStore))
3307 append_range(OpsToScalarize, operands());
3308
3309 if (!EfficientVectorLoadStore)
3310 ResultTy = Ctx.Types.inferScalarType(this);
3311 }
3312
3313 return (ScalarCost * VF.getFixedValue()) +
3314 Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true);
3315 }
3316 }
3317
3318 return Ctx.getLegacyCost(UI, VF);
3319}
3320
3321#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3323 VPSlotTracker &SlotTracker) const {
3324 O << Indent << (IsSingleScalar ? "CLONE " : "REPLICATE ");
3325
3326 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
3328 O << " = ";
3329 }
3330 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
3331 O << "call";
3332 printFlags(O);
3333 O << "@" << CB->getCalledFunction()->getName() << "(";
3335 O, [&O, &SlotTracker](VPValue *Op) {
3336 Op->printAsOperand(O, SlotTracker);
3337 });
3338 O << ")";
3339 } else {
3341 printFlags(O);
3343 }
3344
3345 if (shouldPack())
3346 O << " (S->V)";
3347}
3348#endif
3349
3351 assert(State.Lane && "Branch on Mask works only on single instance.");
3352
3353 VPValue *BlockInMask = getOperand(0);
3354 Value *ConditionBit = State.get(BlockInMask, *State.Lane);
3355
3356 // Replace the temporary unreachable terminator with a new conditional branch,
3357 // whose two destinations will be set later when they are created.
3358 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
3359 assert(isa<UnreachableInst>(CurrentTerminator) &&
3360 "Expected to replace unreachable terminator with conditional branch.");
3361 auto CondBr =
3362 State.Builder.CreateCondBr(ConditionBit, State.CFG.PrevBB, nullptr);
3363 CondBr->setSuccessor(0, nullptr);
3364 CurrentTerminator->eraseFromParent();
3365}
3366
3368 VPCostContext &Ctx) const {
3369 // The legacy cost model doesn't assign costs to branches for individual
3370 // replicate regions. Match the current behavior in the VPlan cost model for
3371 // now.
3372 return 0;
3373}
3374
3376 assert(State.Lane && "Predicated instruction PHI works per instance.");
3377 Instruction *ScalarPredInst =
3378 cast<Instruction>(State.get(getOperand(0), *State.Lane));
3379 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
3380 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
3381 assert(PredicatingBB && "Predicated block has no single predecessor.");
3383 "operand must be VPReplicateRecipe");
3384
3385 // By current pack/unpack logic we need to generate only a single phi node: if
3386 // a vector value for the predicated instruction exists at this point it means
3387 // the instruction has vector users only, and a phi for the vector value is
3388 // needed. In this case the recipe of the predicated instruction is marked to
3389 // also do that packing, thereby "hoisting" the insert-element sequence.
3390 // Otherwise, a phi node for the scalar value is needed.
3391 if (State.hasVectorValue(getOperand(0))) {
3392 auto *VecI = cast<Instruction>(State.get(getOperand(0)));
3394 "Packed operands must generate an insertelement or insertvalue");
3395
3396 // If VectorI is a struct, it will be a sequence like:
3397 // %1 = insertvalue %unmodified, %x, 0
3398 // %2 = insertvalue %1, %y, 1
3399 // %VectorI = insertvalue %2, %z, 2
3400 // To get the unmodified vector we need to look through the chain.
3401 if (auto *StructTy = dyn_cast<StructType>(VecI->getType()))
3402 for (unsigned I = 0; I < StructTy->getNumContainedTypes() - 1; I++)
3403 VecI = cast<InsertValueInst>(VecI->getOperand(0));
3404
3405 PHINode *VPhi = State.Builder.CreatePHI(VecI->getType(), 2);
3406 VPhi->addIncoming(VecI->getOperand(0), PredicatingBB); // Unmodified vector.
3407 VPhi->addIncoming(VecI, PredicatedBB); // New vector with inserted element.
3408 if (State.hasVectorValue(this))
3409 State.reset(this, VPhi);
3410 else
3411 State.set(this, VPhi);
3412 // NOTE: Currently we need to update the value of the operand, so the next
3413 // predicated iteration inserts its generated value in the correct vector.
3414 State.reset(getOperand(0), VPhi);
3415 } else {
3416 if (vputils::onlyFirstLaneUsed(this) && !State.Lane->isFirstLane())
3417 return;
3418
3419 Type *PredInstType = State.TypeAnalysis.inferScalarType(getOperand(0));
3420 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
3421 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
3422 PredicatingBB);
3423 Phi->addIncoming(ScalarPredInst, PredicatedBB);
3424 if (State.hasScalarValue(this, *State.Lane))
3425 State.reset(this, Phi, *State.Lane);
3426 else
3427 State.set(this, Phi, *State.Lane);
3428 // NOTE: Currently we need to update the value of the operand, so the next
3429 // predicated iteration inserts its generated value in the correct vector.
3430 State.reset(getOperand(0), Phi, *State.Lane);
3431 }
3432}
3433
3434#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3436 VPSlotTracker &SlotTracker) const {
3437 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
3439 O << " = ";
3441}
3442#endif
3443
3445 VPCostContext &Ctx) const {
3447 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3448 unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
3449 ->getAddressSpace();
3450 unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
3451 ? Instruction::Load
3452 : Instruction::Store;
3453
3454 if (!Consecutive) {
3455 // TODO: Using the original IR may not be accurate.
3456 // Currently, ARM will use the underlying IR to calculate gather/scatter
3457 // instruction cost.
3458 assert(!Reverse &&
3459 "Inconsecutive memory access should not have the order.");
3460
3462 Type *PtrTy = Ptr->getType();
3463
3464 // If the address value is uniform across all lanes, then the address can be
3465 // calculated with scalar type and broadcast.
3467 PtrTy = toVectorTy(PtrTy, VF);
3468
3469 return Ctx.TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
3470 Ctx.CostKind) +
3471 Ctx.TTI.getGatherScatterOpCost(Opcode, Ty, Ptr, IsMasked, Alignment,
3472 Ctx.CostKind, &Ingredient);
3473 }
3474
3476 if (IsMasked) {
3477 Cost +=
3478 Ctx.TTI.getMaskedMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind);
3479 } else {
3480 TTI::OperandValueInfo OpInfo = Ctx.getOperandInfo(
3482 : getOperand(1));
3483 Cost += Ctx.TTI.getMemoryOpCost(Opcode, Ty, Alignment, AS, Ctx.CostKind,
3484 OpInfo, &Ingredient);
3485 }
3486 if (!Reverse)
3487 return Cost;
3488
3489 return Cost += Ctx.TTI.getShuffleCost(
3491 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3492}
3493
3495 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3496 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3497 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3498 bool CreateGather = !isConsecutive();
3499
3500 auto &Builder = State.Builder;
3501 Value *Mask = nullptr;
3502 if (auto *VPMask = getMask()) {
3503 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3504 // of a null all-one mask is a null mask.
3505 Mask = State.get(VPMask);
3506 if (isReverse())
3507 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3508 }
3509
3510 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
3511 Value *NewLI;
3512 if (CreateGather) {
3513 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
3514 "wide.masked.gather");
3515 } else if (Mask) {
3516 NewLI =
3517 Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
3518 PoisonValue::get(DataTy), "wide.masked.load");
3519 } else {
3520 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
3521 }
3523 if (Reverse)
3524 NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
3525 State.set(this, NewLI);
3526}
3527
3528#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3530 VPSlotTracker &SlotTracker) const {
3531 O << Indent << "WIDEN ";
3533 O << " = load ";
3535}
3536#endif
3537
3538/// Use all-true mask for reverse rather than actual mask, as it avoids a
3539/// dependence w/o affecting the result.
3541 Value *EVL, const Twine &Name) {
3542 VectorType *ValTy = cast<VectorType>(Operand->getType());
3543 Value *AllTrueMask =
3544 Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
3545 return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
3546 {Operand, AllTrueMask, EVL}, nullptr, Name);
3547}
3548
3550 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
3551 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
3552 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3553 bool CreateGather = !isConsecutive();
3554
3555 auto &Builder = State.Builder;
3556 CallInst *NewLI;
3557 Value *EVL = State.get(getEVL(), VPLane(0));
3558 Value *Addr = State.get(getAddr(), !CreateGather);
3559 Value *Mask = nullptr;
3560 if (VPValue *VPMask = getMask()) {
3561 Mask = State.get(VPMask);
3562 if (isReverse())
3563 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3564 } else {
3565 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3566 }
3567
3568 if (CreateGather) {
3569 NewLI =
3570 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
3571 nullptr, "wide.masked.gather");
3572 } else {
3573 NewLI = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_load,
3574 {Addr, Mask, EVL}, nullptr, "vp.op.load");
3575 }
3576 NewLI->addParamAttr(
3577 0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
3578 applyMetadata(*NewLI);
3579 Instruction *Res = NewLI;
3580 if (isReverse())
3581 Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
3582 State.set(this, Res);
3583}
3584
3586 VPCostContext &Ctx) const {
3587 if (!Consecutive || IsMasked)
3588 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3589
3590 // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
3591 // here because the EVL recipes using EVL to replace the tail mask. But in the
3592 // legacy model, it will always calculate the cost of mask.
3593 // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
3594 // don't need to compare to the legacy cost model.
3596 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3597 unsigned AS = getLoadStoreAddressSpace(&Ingredient);
3598 InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
3599 Instruction::Load, Ty, Alignment, AS, Ctx.CostKind);
3600 if (!Reverse)
3601 return Cost;
3602
3603 return Cost + Ctx.TTI.getShuffleCost(
3605 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3606}
3607
3608#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3610 VPSlotTracker &SlotTracker) const {
3611 O << Indent << "WIDEN ";
3613 O << " = vp.load ";
3615}
3616#endif
3617
3619 VPValue *StoredVPValue = getStoredValue();
3620 bool CreateScatter = !isConsecutive();
3621 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3622
3623 auto &Builder = State.Builder;
3624
3625 Value *Mask = nullptr;
3626 if (auto *VPMask = getMask()) {
3627 // Mask reversal is only needed for non-all-one (null) masks, as reverse
3628 // of a null all-one mask is a null mask.
3629 Mask = State.get(VPMask);
3630 if (isReverse())
3631 Mask = Builder.CreateVectorReverse(Mask, "reverse");
3632 }
3633
3634 Value *StoredVal = State.get(StoredVPValue);
3635 if (isReverse()) {
3636 // If we store to reverse consecutive memory locations, then we need
3637 // to reverse the order of elements in the stored value.
3638 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
3639 // We don't want to update the value in the map as it might be used in
3640 // another expression. So don't call resetVectorValue(StoredVal).
3641 }
3642 Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
3643 Instruction *NewSI = nullptr;
3644 if (CreateScatter)
3645 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
3646 else if (Mask)
3647 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
3648 else
3649 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
3650 applyMetadata(*NewSI);
3651}
3652
3653#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3655 VPSlotTracker &SlotTracker) const {
3656 O << Indent << "WIDEN store ";
3658}
3659#endif
3660
3662 VPValue *StoredValue = getStoredValue();
3663 bool CreateScatter = !isConsecutive();
3664 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3665
3666 auto &Builder = State.Builder;
3667
3668 CallInst *NewSI = nullptr;
3669 Value *StoredVal = State.get(StoredValue);
3670 Value *EVL = State.get(getEVL(), VPLane(0));
3671 if (isReverse())
3672 StoredVal = createReverseEVL(Builder, StoredVal, EVL, "vp.reverse");
3673 Value *Mask = nullptr;
3674 if (VPValue *VPMask = getMask()) {
3675 Mask = State.get(VPMask);
3676 if (isReverse())
3677 Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
3678 } else {
3679 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
3680 }
3681 Value *Addr = State.get(getAddr(), !CreateScatter);
3682 if (CreateScatter) {
3683 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3684 Intrinsic::vp_scatter,
3685 {StoredVal, Addr, Mask, EVL});
3686 } else {
3687 NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
3688 Intrinsic::vp_store,
3689 {StoredVal, Addr, Mask, EVL});
3690 }
3691 NewSI->addParamAttr(
3692 1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
3693 applyMetadata(*NewSI);
3694}
3695
3697 VPCostContext &Ctx) const {
3698 if (!Consecutive || IsMasked)
3699 return VPWidenMemoryRecipe::computeCost(VF, Ctx);
3700
3701 // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
3702 // here because the EVL recipes using EVL to replace the tail mask. But in the
3703 // legacy model, it will always calculate the cost of mask.
3704 // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
3705 // don't need to compare to the legacy cost model.
3707 const Align Alignment = getLoadStoreAlignment(&Ingredient);
3708 unsigned AS = getLoadStoreAddressSpace(&Ingredient);
3709 InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
3710 Instruction::Store, Ty, Alignment, AS, Ctx.CostKind);
3711 if (!Reverse)
3712 return Cost;
3713
3714 return Cost + Ctx.TTI.getShuffleCost(
3716 cast<VectorType>(Ty), {}, Ctx.CostKind, 0);
3717}
3718
3719#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3721 VPSlotTracker &SlotTracker) const {
3722 O << Indent << "WIDEN vp.store ";
3724}
3725#endif
3726
3728 VectorType *DstVTy, const DataLayout &DL) {
3729 // Verify that V is a vector type with same number of elements as DstVTy.
3730 auto VF = DstVTy->getElementCount();
3731 auto *SrcVecTy = cast<VectorType>(V->getType());
3732 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
3733 Type *SrcElemTy = SrcVecTy->getElementType();
3734 Type *DstElemTy = DstVTy->getElementType();
3735 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3736 "Vector elements must have same size");
3737
3738 // Do a direct cast if element types are castable.
3739 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3740 return Builder.CreateBitOrPointerCast(V, DstVTy);
3741 }
3742 // V cannot be directly casted to desired vector type.
3743 // May happen when V is a floating point vector but DstVTy is a vector of
3744 // pointers or vice-versa. Handle this using a two-step bitcast using an
3745 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3746 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3747 "Only one type should be a pointer type");
3748 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3749 "Only one type should be a floating point type");
3750 Type *IntTy =
3751 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3752 auto *VecIntTy = VectorType::get(IntTy, VF);
3753 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3754 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
3755}
3756
3757/// Return a vector containing interleaved elements from multiple
3758/// smaller input vectors.
3760 const Twine &Name) {
3761 unsigned Factor = Vals.size();
3762 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
3763
3764 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
3765#ifndef NDEBUG
3766 for (Value *Val : Vals)
3767 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
3768#endif
3769
3770 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
3771 // must use intrinsics to interleave.
3772 if (VecTy->isScalableTy()) {
3773 assert(Factor <= 8 && "Unsupported interleave factor for scalable vectors");
3774 return Builder.CreateVectorInterleave(Vals, Name);
3775 }
3776
3777 // Fixed length. Start by concatenating all vectors into a wide vector.
3778 Value *WideVec = concatenateVectors(Builder, Vals);
3779
3780 // Interleave the elements into the wide vector.
3781 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
3782 return Builder.CreateShuffleVector(
3783 WideVec, createInterleaveMask(NumElts, Factor), Name);
3784}
3785
3786// Try to vectorize the interleave group that \p Instr belongs to.
3787//
3788// E.g. Translate following interleaved load group (factor = 3):
3789// for (i = 0; i < N; i+=3) {
3790// R = Pic[i]; // Member of index 0
3791// G = Pic[i+1]; // Member of index 1
3792// B = Pic[i+2]; // Member of index 2
3793// ... // do something to R, G, B
3794// }
3795// To:
3796// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
3797// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
3798// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
3799// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
3800//
3801// Or translate following interleaved store group (factor = 3):
3802// for (i = 0; i < N; i+=3) {
3803// ... do something to R, G, B
3804// Pic[i] = R; // Member of index 0
3805// Pic[i+1] = G; // Member of index 1
3806// Pic[i+2] = B; // Member of index 2
3807// }
3808// To:
3809// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
3810// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
3811// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
3812// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
3813// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
3815 assert(!State.Lane && "Interleave group being replicated.");
3816 assert((!needsMaskForGaps() || !State.VF.isScalable()) &&
3817 "Masking gaps for scalable vectors is not yet supported.");
3819 Instruction *Instr = Group->getInsertPos();
3820
3821 // Prepare for the vector type of the interleaved load/store.
3822 Type *ScalarTy = getLoadStoreType(Instr);
3823 unsigned InterleaveFactor = Group->getFactor();
3824 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
3825
3826 VPValue *BlockInMask = getMask();
3827 VPValue *Addr = getAddr();
3828 Value *ResAddr = State.get(Addr, VPLane(0));
3829
3830 auto CreateGroupMask = [&BlockInMask, &State,
3831 &InterleaveFactor](Value *MaskForGaps) -> Value * {
3832 if (State.VF.isScalable()) {
3833 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
3834 assert(InterleaveFactor <= 8 &&
3835 "Unsupported deinterleave factor for scalable vectors");
3836 auto *ResBlockInMask = State.get(BlockInMask);
3837 SmallVector<Value *> Ops(InterleaveFactor, ResBlockInMask);
3838 return interleaveVectors(State.Builder, Ops, "interleaved.mask");
3839 }
3840
3841 if (!BlockInMask)
3842 return MaskForGaps;
3843
3844 Value *ResBlockInMask = State.get(BlockInMask);
3845 Value *ShuffledMask = State.Builder.CreateShuffleVector(
3846 ResBlockInMask,
3847 createReplicatedMask(InterleaveFactor, State.VF.getFixedValue()),
3848 "interleaved.mask");
3849 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
3850 ShuffledMask, MaskForGaps)
3851 : ShuffledMask;
3852 };
3853
3854 const DataLayout &DL = Instr->getDataLayout();
3855 // Vectorize the interleaved load group.
3856 if (isa<LoadInst>(Instr)) {
3857 Value *MaskForGaps = nullptr;
3858 if (needsMaskForGaps()) {
3859 MaskForGaps =
3860 createBitMaskForGaps(State.Builder, State.VF.getFixedValue(), *Group);
3861 assert(MaskForGaps && "Mask for Gaps is required but it is null");
3862 }
3863
3864 Instruction *NewLoad;
3865 if (BlockInMask || MaskForGaps) {
3866 Value *GroupMask = CreateGroupMask(MaskForGaps);
3867 Value *PoisonVec = PoisonValue::get(VecTy);
3868 NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
3869 Group->getAlign(), GroupMask,
3870 PoisonVec, "wide.masked.vec");
3871 } else
3872 NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
3873 Group->getAlign(), "wide.vec");
3874 applyMetadata(*NewLoad);
3875 // TODO: Also manage existing metadata using VPIRMetadata.
3876 Group->addMetadata(NewLoad);
3877
3879 if (VecTy->isScalableTy()) {
3880 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
3881 // so must use intrinsics to deinterleave.
3882 assert(InterleaveFactor <= 8 &&
3883 "Unsupported deinterleave factor for scalable vectors");
3884 NewLoad = State.Builder.CreateIntrinsic(
3885 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
3886 NewLoad->getType(), NewLoad,
3887 /*FMFSource=*/nullptr, "strided.vec");
3888 }
3889
3890 auto CreateStridedVector = [&InterleaveFactor, &State,
3891 &NewLoad](unsigned Index) -> Value * {
3892 assert(Index < InterleaveFactor && "Illegal group index");
3893 if (State.VF.isScalable())
3894 return State.Builder.CreateExtractValue(NewLoad, Index);
3895
3896 // For fixed length VF, use shuffle to extract the sub-vectors from the
3897 // wide load.
3898 auto StrideMask =
3899 createStrideMask(Index, InterleaveFactor, State.VF.getFixedValue());
3900 return State.Builder.CreateShuffleVector(NewLoad, StrideMask,
3901 "strided.vec");
3902 };
3903
3904 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
3905 Instruction *Member = Group->getMember(I);
3906
3907 // Skip the gaps in the group.
3908 if (!Member)
3909 continue;
3910
3911 Value *StridedVec = CreateStridedVector(I);
3912
3913 // If this member has different type, cast the result type.
3914 if (Member->getType() != ScalarTy) {
3915 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
3916 StridedVec =
3917 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
3918 }
3919
3920 if (Group->isReverse())
3921 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
3922
3923 State.set(VPDefs[J], StridedVec);
3924 ++J;
3925 }
3926 return;
3927 }
3928
3929 // The sub vector type for current instruction.
3930 auto *SubVT = VectorType::get(ScalarTy, State.VF);
3931
3932 // Vectorize the interleaved store group.
3933 Value *MaskForGaps =
3934 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
3935 assert(((MaskForGaps != nullptr) == needsMaskForGaps()) &&
3936 "Mismatch between NeedsMaskForGaps and MaskForGaps");
3937 ArrayRef<VPValue *> StoredValues = getStoredValues();
3938 // Collect the stored vector from each member.
3939 SmallVector<Value *, 4> StoredVecs;
3940 unsigned StoredIdx = 0;
3941 for (unsigned i = 0; i < InterleaveFactor; i++) {
3942 assert((Group->getMember(i) || MaskForGaps) &&
3943 "Fail to get a member from an interleaved store group");
3944 Instruction *Member = Group->getMember(i);
3945
3946 // Skip the gaps in the group.
3947 if (!Member) {
3948 Value *Undef = PoisonValue::get(SubVT);
3949 StoredVecs.push_back(Undef);
3950 continue;
3951 }
3952
3953 Value *StoredVec = State.get(StoredValues[StoredIdx]);
3954 ++StoredIdx;
3955
3956 if (Group->isReverse())
3957 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
3958
3959 // If this member has different type, cast it to a unified type.
3960
3961 if (StoredVec->getType() != SubVT)
3962 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
3963
3964 StoredVecs.push_back(StoredVec);
3965 }
3966
3967 // Interleave all the smaller vectors into one wider vector.
3968 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
3969 Instruction *NewStoreInstr;
3970 if (BlockInMask || MaskForGaps) {
3971 Value *GroupMask = CreateGroupMask(MaskForGaps);
3972 NewStoreInstr = State.Builder.CreateMaskedStore(
3973 IVec, ResAddr, Group->getAlign(), GroupMask);
3974 } else
3975 NewStoreInstr =
3976 State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
3977
3978 applyMetadata(*NewStoreInstr);
3979 // TODO: Also manage existing metadata using VPIRMetadata.
3980 Group->addMetadata(NewStoreInstr);
3981}
3982
3983#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3985 VPSlotTracker &SlotTracker) const {
3987 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
3988 IG->getInsertPos()->printAsOperand(O, false);
3989 O << ", ";
3991 VPValue *Mask = getMask();
3992 if (Mask) {
3993 O << ", ";
3994 Mask->printAsOperand(O, SlotTracker);
3995 }
3996
3997 unsigned OpIdx = 0;
3998 for (unsigned i = 0; i < IG->getFactor(); ++i) {
3999 if (!IG->getMember(i))
4000 continue;
4001 if (getNumStoreOperands() > 0) {
4002 O << "\n" << Indent << " store ";
4004 O << " to index " << i;
4005 } else {
4006 O << "\n" << Indent << " ";
4008 O << " = load from index " << i;
4009 }
4010 ++OpIdx;
4011 }
4012}
4013#endif
4014
4016 assert(!State.Lane && "Interleave group being replicated.");
4017 assert(State.VF.isScalable() &&
4018 "Only support scalable VF for EVL tail-folding.");
4020 "Masking gaps for scalable vectors is not yet supported.");
4022 Instruction *Instr = Group->getInsertPos();
4023
4024 // Prepare for the vector type of the interleaved load/store.
4025 Type *ScalarTy = getLoadStoreType(Instr);
4026 unsigned InterleaveFactor = Group->getFactor();
4027 assert(InterleaveFactor <= 8 &&
4028 "Unsupported deinterleave/interleave factor for scalable vectors");
4029 ElementCount WideVF = State.VF * InterleaveFactor;
4030 auto *VecTy = VectorType::get(ScalarTy, WideVF);
4031
4032 VPValue *Addr = getAddr();
4033 Value *ResAddr = State.get(Addr, VPLane(0));
4034 Value *EVL = State.get(getEVL(), VPLane(0));
4035 Value *InterleaveEVL = State.Builder.CreateMul(
4036 EVL, ConstantInt::get(EVL->getType(), InterleaveFactor), "interleave.evl",
4037 /* NUW= */ true, /* NSW= */ true);
4038 LLVMContext &Ctx = State.Builder.getContext();
4039
4040 Value *GroupMask = nullptr;
4041 if (VPValue *BlockInMask = getMask()) {
4042 SmallVector<Value *> Ops(InterleaveFactor, State.get(BlockInMask));
4043 GroupMask = interleaveVectors(State.Builder, Ops, "interleaved.mask");
4044 } else {
4045 GroupMask =
4046 State.Builder.CreateVectorSplat(WideVF, State.Builder.getTrue());
4047 }
4048
4049 // Vectorize the interleaved load group.
4050 if (isa<LoadInst>(Instr)) {
4051 CallInst *NewLoad = State.Builder.CreateIntrinsic(
4052 VecTy, Intrinsic::vp_load, {ResAddr, GroupMask, InterleaveEVL}, nullptr,
4053 "wide.vp.load");
4054 NewLoad->addParamAttr(0,
4055 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4056
4057 applyMetadata(*NewLoad);
4058 // TODO: Also manage existing metadata using VPIRMetadata.
4059 Group->addMetadata(NewLoad);
4060
4061 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
4062 // so must use intrinsics to deinterleave.
4063 NewLoad = State.Builder.CreateIntrinsic(
4064 Intrinsic::getDeinterleaveIntrinsicID(InterleaveFactor),
4065 NewLoad->getType(), NewLoad,
4066 /*FMFSource=*/nullptr, "strided.vec");
4067
4068 const DataLayout &DL = Instr->getDataLayout();
4069 for (unsigned I = 0, J = 0; I < InterleaveFactor; ++I) {
4070 Instruction *Member = Group->getMember(I);
4071 // Skip the gaps in the group.
4072 if (!Member)
4073 continue;
4074
4075 Value *StridedVec = State.Builder.CreateExtractValue(NewLoad, I);
4076 // If this member has different type, cast the result type.
4077 if (Member->getType() != ScalarTy) {
4078 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
4079 StridedVec =
4080 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
4081 }
4082
4083 State.set(getVPValue(J), StridedVec);
4084 ++J;
4085 }
4086 return;
4087 } // End for interleaved load.
4088
4089 // The sub vector type for current instruction.
4090 auto *SubVT = VectorType::get(ScalarTy, State.VF);
4091 // Vectorize the interleaved store group.
4092 ArrayRef<VPValue *> StoredValues = getStoredValues();
4093 // Collect the stored vector from each member.
4094 SmallVector<Value *, 4> StoredVecs;
4095 const DataLayout &DL = Instr->getDataLayout();
4096 for (unsigned I = 0, StoredIdx = 0; I < InterleaveFactor; I++) {
4097 Instruction *Member = Group->getMember(I);
4098 // Skip the gaps in the group.
4099 if (!Member) {
4100 StoredVecs.push_back(PoisonValue::get(SubVT));
4101 continue;
4102 }
4103
4104 Value *StoredVec = State.get(StoredValues[StoredIdx]);
4105 // If this member has different type, cast it to a unified type.
4106 if (StoredVec->getType() != SubVT)
4107 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
4108
4109 StoredVecs.push_back(StoredVec);
4110 ++StoredIdx;
4111 }
4112
4113 // Interleave all the smaller vectors into one wider vector.
4114 Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
4115 CallInst *NewStore =
4116 State.Builder.CreateIntrinsic(Type::getVoidTy(Ctx), Intrinsic::vp_store,
4117 {IVec, ResAddr, GroupMask, InterleaveEVL});
4118 NewStore->addParamAttr(1,
4119 Attribute::getWithAlignment(Ctx, Group->getAlign()));
4120
4121 applyMetadata(*NewStore);
4122 // TODO: Also manage existing metadata using VPIRMetadata.
4123 Group->addMetadata(NewStore);
4124}
4125
4126#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4128 VPSlotTracker &SlotTracker) const {
4130 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
4131 IG->getInsertPos()->printAsOperand(O, false);
4132 O << ", ";
4134 O << ", ";
4136 if (VPValue *Mask = getMask()) {
4137 O << ", ";
4138 Mask->printAsOperand(O, SlotTracker);
4139 }
4140
4141 unsigned OpIdx = 0;
4142 for (unsigned i = 0; i < IG->getFactor(); ++i) {
4143 if (!IG->getMember(i))
4144 continue;
4145 if (getNumStoreOperands() > 0) {
4146 O << "\n" << Indent << " vp.store ";
4148 O << " to index " << i;
4149 } else {
4150 O << "\n" << Indent << " ";
4152 O << " = vp.load from index " << i;
4153 }
4154 ++OpIdx;
4155 }
4156}
4157#endif
4158
4160 VPCostContext &Ctx) const {
4161 Instruction *InsertPos = getInsertPos();
4162 // Find the VPValue index of the interleave group. We need to skip gaps.
4163 unsigned InsertPosIdx = 0;
4164 for (unsigned Idx = 0; IG->getFactor(); ++Idx)
4165 if (auto *Member = IG->getMember(Idx)) {
4166 if (Member == InsertPos)
4167 break;
4168 InsertPosIdx++;
4169 }
4170 Type *ValTy = Ctx.Types.inferScalarType(
4171 getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
4172 : getStoredValues()[InsertPosIdx]);
4173 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4174 unsigned AS = getLoadStoreAddressSpace(InsertPos);
4175
4176 unsigned InterleaveFactor = IG->getFactor();
4177 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4178
4179 // Holds the indices of existing members in the interleaved group.
4181 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4182 if (IG->getMember(IF))
4183 Indices.push_back(IF);
4184
4185 // Calculate the cost of the whole interleaved group.
4186 InstructionCost Cost = Ctx.TTI.getInterleavedMemoryOpCost(
4187 InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices,
4188 IG->getAlign(), AS, Ctx.CostKind, getMask(), NeedsMaskForGaps);
4189
4190 if (!IG->isReverse())
4191 return Cost;
4192
4193 return Cost + IG->getNumMembers() *
4194 Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
4195 VectorTy, VectorTy, {}, Ctx.CostKind,
4196 0);
4197}
4198
4199#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4201 VPSlotTracker &SlotTracker) const {
4202 O << Indent << "EMIT ";
4204 O << " = CANONICAL-INDUCTION ";
4206}
4207#endif
4208
4210 return IsScalarAfterVectorization &&
4211 (!IsScalable || vputils::onlyFirstLaneUsed(this));
4212}
4213
4214#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4216 VPSlotTracker &SlotTracker) const {
4217 assert((getNumOperands() == 3 || getNumOperands() == 5) &&
4218 "unexpected number of operands");
4219 O << Indent << "EMIT ";
4221 O << " = WIDEN-POINTER-INDUCTION ";
4223 O << ", ";
4225 O << ", ";
4227 if (getNumOperands() == 5) {
4228 O << ", ";
4230 O << ", ";
4232 }
4233}
4234
4236 VPSlotTracker &SlotTracker) const {
4237 O << Indent << "EMIT ";
4239 O << " = EXPAND SCEV " << *Expr;
4240}
4241#endif
4242
4244 Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
4245 Type *STy = CanonicalIV->getType();
4246 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
4247 ElementCount VF = State.VF;
4248 Value *VStart = VF.isScalar()
4249 ? CanonicalIV
4250 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
4251 Value *VStep = createStepForVF(Builder, STy, VF, getUnrollPart(*this));
4252 if (VF.isVector()) {
4253 VStep = Builder.CreateVectorSplat(VF, VStep);
4254 VStep =
4255 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
4256 }
4257 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
4258 State.set(this, CanonicalVectorIV);
4259}
4260
4261#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4263 VPSlotTracker &SlotTracker) const {
4264 O << Indent << "EMIT ";
4266 O << " = WIDEN-CANONICAL-INDUCTION ";
4268}
4269#endif
4270
4272 auto &Builder = State.Builder;
4273 // Create a vector from the initial value.
4274 auto *VectorInit = getStartValue()->getLiveInIRValue();
4275
4276 Type *VecTy = State.VF.isScalar()
4277 ? VectorInit->getType()
4278 : VectorType::get(VectorInit->getType(), State.VF);
4279
4280 BasicBlock *VectorPH =
4281 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4282 if (State.VF.isVector()) {
4283 auto *IdxTy = Builder.getInt32Ty();
4284 auto *One = ConstantInt::get(IdxTy, 1);
4285 IRBuilder<>::InsertPointGuard Guard(Builder);
4286 Builder.SetInsertPoint(VectorPH->getTerminator());
4287 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
4288 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4289 VectorInit = Builder.CreateInsertElement(
4290 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
4291 }
4292
4293 // Create a phi node for the new recurrence.
4294 PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
4295 Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
4296 Phi->addIncoming(VectorInit, VectorPH);
4297 State.set(this, Phi);
4298}
4299
4302 VPCostContext &Ctx) const {
4303 if (VF.isScalar())
4304 return Ctx.TTI.getCFInstrCost(Instruction::PHI, Ctx.CostKind);
4305
4306 return 0;
4307}
4308
4309#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4311 VPSlotTracker &SlotTracker) const {
4312 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
4314 O << " = phi ";
4316}
4317#endif
4318
4320 // Reductions do not have to start at zero. They can start with
4321 // any loop invariant values.
4322 VPValue *StartVPV = getStartValue();
4323
4324 // In order to support recurrences we need to be able to vectorize Phi nodes.
4325 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4326 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4327 // this value when we vectorize all of the instructions that use the PHI.
4328 BasicBlock *VectorPH =
4329 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4330 bool ScalarPHI = State.VF.isScalar() || IsInLoop;
4331 Value *StartV = State.get(StartVPV, ScalarPHI);
4332 Type *VecTy = StartV->getType();
4333
4334 BasicBlock *HeaderBB = State.CFG.PrevBB;
4335 assert(State.CurrentParentLoop->getHeader() == HeaderBB &&
4336 "recipe must be in the vector loop header");
4337 auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
4338 Phi->insertBefore(HeaderBB->getFirstInsertionPt());
4339 State.set(this, Phi, IsInLoop);
4340
4341 Phi->addIncoming(StartV, VectorPH);
4342}
4343
4344#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4346 VPSlotTracker &SlotTracker) const {
4347 O << Indent << "WIDEN-REDUCTION-PHI ";
4348
4350 O << " = phi ";
4352 if (VFScaleFactor != 1)
4353 O << " (VF scaled by 1/" << VFScaleFactor << ")";
4354}
4355#endif
4356
4358 Value *Op0 = State.get(getOperand(0));
4359 Type *VecTy = Op0->getType();
4360 Instruction *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
4361 State.set(this, VecPhi);
4362}
4363
4364#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4366 VPSlotTracker &SlotTracker) const {
4367 O << Indent << "WIDEN-PHI ";
4368
4370 O << " = phi ";
4372}
4373#endif
4374
4375// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
4376// remove VPActiveLaneMaskPHIRecipe.
4378 BasicBlock *VectorPH =
4379 State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0));
4380 Value *StartMask = State.get(getOperand(0));
4381 PHINode *Phi =
4382 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
4383 Phi->addIncoming(StartMask, VectorPH);
4384 State.set(this, Phi);
4385}
4386
4387#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4389 VPSlotTracker &SlotTracker) const {
4390 O << Indent << "ACTIVE-LANE-MASK-PHI ";
4391
4393 O << " = phi ";
4395}
4396#endif
4397
4398#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4400 VPSlotTracker &SlotTracker) const {
4401 O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
4402
4404 O << " = phi ";
4406}
4407#endif
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition Compiler.h:404
iv users
Definition IVUsers.cpp:48
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file provides a LoopVectorizationPlanner class.
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
static bool isOrdered(const Instruction *I)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file contains the declarations of different VPlan-related auxiliary helpers.
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static InstructionCost getCostForIntrinsics(Intrinsic::ID ID, ArrayRef< const VPValue * > Operands, const VPRecipeWithIRFlags &R, ElementCount VF, VPCostContext &Ctx)
Compute the cost for the intrinsic ID with Operands, produced by R.
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
static Type * getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride, unsigned CurrentPart, IRBuilderBase &Builder)
SmallVector< Value *, 2 > VectorParts
static bool isUsedByLoadStoreAddress(const VPUser *V)
Returns true if V is used as part of the address of another load or store.
static void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPLane &Lane, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
static bool shouldUseAddressAccessSCEV(const VPValue *Ptr)
Returns true if Ptr is a pointer computation for which the legacy cost model computes a SCEV expressi...
static Constant * getSignedIntOrFpConstant(Type *Ty, int64_t C)
A helper function that returns an integer or floating-point constant with value C.
static BranchInst * createCondBranch(Value *Cond, VPBasicBlock *VPBB, VPTransformState &State)
Create a conditional branch using Cond branching to the successors of VPBB.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition blake3_impl.h:83
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
Conditional or Unconditional Branch instruction.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:984
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
static LLVM_ABI StringRef getPredicateName(Predicate P)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:131
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:325
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:313
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:310
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:321
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
void setAllowContract(bool B=true)
Definition FMF.h:90
bool noSignedZeros() const
Definition FMF.h:67
bool noInfs() const
Definition FMF.h:66
void setAllowReciprocal(bool B=true)
Definition FMF.h:87
bool allowReciprocal() const
Definition FMF.h:68
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
Definition Operator.cpp:271
void setNoSignedZeros(bool B=true)
Definition FMF.h:84
bool allowReassoc() const
Flag queries.
Definition FMF.h:64
bool approxFunc() const
Definition FMF.h:70
void setNoNaNs(bool B=true)
Definition FMF.h:78
void setAllowReassoc(bool B=true)
Flag setters.
Definition FMF.h:75
bool noNaNs() const
Definition FMF.h:65
void setApproxFunc(bool B=true)
Definition FMF.h:93
void setNoInfs(bool B=true)
Definition FMF.h:81
bool allowContract() const
Definition FMF.h:69
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool willReturn() const
Determine if the function will return.
Definition Function.h:661
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition Function.h:594
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2571
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2625
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2559
LLVM_ABI Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2618
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition IRBuilder.h:2637
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2036
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2463
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1805
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2329
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
LLVMContext & getContext() const
Definition IRBuilder.h:203
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1708
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition IRBuilder.h:1725
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2341
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2439
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
static InstructionCost getInvalid(CostType Val=0)
bool isCast() const
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isUnaryOp() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
bool isReverse() const
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
std::pair< MDNode *, MDNode * > getNoAliasMetadataFor(const Instruction *OrigInst) const
Returns a pair containing the alias_scope and noalias metadata nodes for OrigInst,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
static bool isSignedRecurrenceKind(RecurKind Kind)
Returns true if recurrece kind is a signed redux kind.
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindLastIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:59
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition SetVector.h:93
This class provides computation of slot numbers for LLVM Assembly writing.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
@ TCC_Free
Expected to fold away in lowering.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:298
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:301
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
value_op_iterator value_op_end()
Definition User.h:313
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
value_op_iterator value_op_begin()
Definition User.h:310
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3781
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition VPlan.h:3834
iterator end()
Definition VPlan.h:3818
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:3847
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition VPlan.h:2432
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account when normalized the first incoming value wi...
Definition VPlan.h:2427
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
VPRegionBlock * getParent()
Definition VPlan.h:173
const VPBlocksTy & getPredecessors() const
Definition VPlan.h:204
VPlan * getPlan()
Definition VPlan.cpp:165
void printAsOperand(raw_ostream &OS, bool PrintType=false) const
Definition VPlan.h:356
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPBranchOnMaskRecipe.
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
VPlan-based builder utility analogous to IRBuilder.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
Definition VPlanValue.h:302
void dump() const
Dump the VPDef to stderr (for debugging).
Definition VPlan.cpp:126
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:424
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:419
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:397
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:409
friend class VPValue
Definition VPlanValue.h:303
unsigned getVPDefID() const
Definition VPlanValue.h:429
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition VPlan.h:3658
VPValue * getStartValue() const
Definition VPlan.h:3657
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void decompose()
Insert the recipes of the expression back into the VPlan, directly before the current recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool mayHaveSideEffects() const
Returns true if this expression contains recipes that may have side effects.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
bool mayReadOrWriteMemory() const
Returns true if this expression contains recipes that may read from or write to memory.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this header phi recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2014
void execute(VPTransformState &State) override
Produce a vectorized histogram operation.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPHistogramRecipe.
VPValue * getMask() const
Return the mask operand if one was provided, or a null pointer if all lanes should be executed uncond...
Definition VPlan.h:1710
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Class to record and manage LLVM IR flags.
Definition VPlan.h:600
FastMathFlagsTy FMFs
Definition VPlan.h:664
bool flagsValidForOpcode(unsigned Opcode) const
Returns true if the set flags are valid for Opcode.
WrapFlagsTy WrapFlags
Definition VPlan.h:658
CmpInst::Predicate CmpPredicate
Definition VPlan.h:657
void printFlags(raw_ostream &O) const
GEPNoWrapFlags GEPFlags
Definition VPlan.h:662
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition VPlan.h:822
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
TruncFlagsTy TruncFlags
Definition VPlan.h:659
CmpInst::Predicate getPredicate() const
Definition VPlan.h:804
ExactFlagsTy ExactFlags
Definition VPlan.h:661
bool hasNoSignedWrap() const
Definition VPlan.h:846
void intersectFlags(const VPIRFlags &Other)
Only keep flags also present in Other.
GEPNoWrapFlags getGEPNoWrapFlags() const
Definition VPlan.h:816
bool hasPredicate() const
Returns true if the recipe has a comparison predicate.
Definition VPlan.h:819
DisjointFlagsTy DisjointFlags
Definition VPlan.h:660
unsigned AllFlags
Definition VPlan.h:665
bool hasNoUnsignedWrap() const
Definition VPlan.h:835
NonNegFlagsTy NonNegFlags
Definition VPlan.h:663
void applyFlags(Instruction &I) const
Apply the IR flags to I.
Definition VPlan.h:767
Instruction & getInstruction() const
Definition VPlan.h:1375
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void extractLastLaneOfFirstOperand(VPBuilder &Builder)
Update the recipes first operand to the last lane of the operand using Builder.
LLVM_ABI_FOR_TEST InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPIRInstruction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPIRInstruction(Instruction &I)
VPIRInstruction::create() should be used to create VPIRInstructions, as subclasses may need to be cre...
Definition VPlan.h:1350
void intersect(const VPIRMetadata &MD)
Intersect this VPIRMetada object with MD, keeping only metadata nodes that are common to both.
void applyMetadata(Instruction &I) const
Add all metadata to I.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPInstruction.
VPInstruction(unsigned Opcode, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Definition VPlan.h:1100
bool doesGeneratePerAllLanes() const
Returns true if this VPInstruction generates scalar values for all lanes.
@ ExtractLane
Extracts a single lane (first operand) from a set of vector operands.
Definition VPlan.h:1060
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1016
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1050
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1063
@ FirstOrderRecurrenceSplice
Definition VPlan.h:989
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1054
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1013
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1010
@ VScale
Returns the value for vscale.
Definition VPlan.h:1065
@ CanonicalIVIncrementForPart
Definition VPlan.h:1003
bool hasResult() const
Definition VPlan.h:1139
bool opcodeMayReadOrWriteFromMemory() const
Returns true if the underlying opcode may read from or write to memory.
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
StringRef getName() const
Returns the symbolic name assigned to the VPInstruction.
Definition VPlan.h:1179
unsigned getOpcode() const
Definition VPlan.h:1119
bool onlyFirstPartUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
void execute(VPTransformState &State) override
Generate the instruction.
bool needsMaskForGaps() const
Return true if the access needs a mask because of the gaps.
Definition VPlan.h:2542
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this recipe.
Instruction * getInsertPos() const
Definition VPlan.h:2546
const InterleaveGroup< Instruction > * getInterleaveGroup() const
Definition VPlan.h:2544
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:2536
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition VPlan.h:2565
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:2530
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:2639
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:2658
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
unsigned getNumStoreOperands() const override
Returns the number of stored operands of this interleave group.
Definition VPlan.h:2609
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
static VPLane getFirstLane()
void execute(VPTransformState &State) override
Generate the reduction in the loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPPartialReductionRecipe.
unsigned getOpcode() const
Get the binary op's opcode.
Definition VPlan.h:2796
virtual const VPRecipeBase * getAsRecipe() const =0
Return a VPRecipeBase* to the current object.
virtual unsigned getNumIncoming() const
Returns the number of incoming values, also number of incoming blocks.
Definition VPlan.h:1265
void removeIncomingValueFor(VPBlockBase *IncomingBlock) const
Removes the incoming value for IncomingBlock, which must be a predecessor.
const VPBasicBlock * getIncomingBlock(unsigned Idx) const
Returns the incoming block with index Idx.
Definition VPlan.h:3925
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1290
VPValue * getIncomingValue(unsigned Idx) const
Returns the incoming VPValue with index Idx.
Definition VPlan.h:1257
void printPhiOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the recipe.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs (from a replicate region) as needed to retain SSA form.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:394
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
bool isPhi() const
Returns true for PHI-like recipes.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
Definition VPlan.h:415
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:482
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
bool isScalarCast() const
Return true if the recipe is a scalar cast.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
VPRecipeBase(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:405
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition VPlan.h:2841
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition VPlan.h:2738
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of VPReductionRecipe.
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition VPlan.h:2742
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition VPlan.h:2744
RecurKind getRecurrenceKind() const
Return the recurrence kind for the in-loop reduction.
Definition VPlan.h:2734
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition VPlan.h:2740
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:3969
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4037
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2856
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool isSingleScalar() const
Definition VPlan.h:2901
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPReplicateRecipe.
unsigned getOpcode() const
Definition VPlan.h:2930
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition VPlan.h:3723
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:521
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:586
LLVM_DUMP_METHOD void dump() const
Print this VPSingleDefRecipe to dbgs() (for debugging).
VPSingleDefRecipe(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:523
This class can be used to assign names to VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
Helper to access the operand that contains the unroll part for this recipe after unrolling.
Definition VPlan.h:930
VPValue * getUnrollPartOperand(const VPUser &U) const
Return the VPValue operand containing the unroll part or null if there is no such operand.
unsigned getUnrollPart(const VPUser &U) const
Return the unroll part.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition VPlan.cpp:1446
operand_range operands()
Definition VPlanValue.h:267
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:243
unsigned getNumOperands() const
Definition VPlanValue.h:237
operand_iterator op_begin()
Definition VPlanValue.h:263
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:238
virtual bool onlyFirstLaneUsed(const VPValue *Op) const
Returns true if the VPUser only uses the first lane of operand Op.
Definition VPlanValue.h:282
bool isDefinedOutsideLoopRegions() const
Returns true if the VPValue is defined outside any loop.
Definition VPlan.cpp:1400
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:135
friend class VPExpressionRecipe
Definition VPlanValue.h:53
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition VPlan.cpp:1442
bool hasMoreThanOneUniqueUser() const
Returns true if the value has more than one unique user.
Definition VPlanValue.h:140
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:176
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
VPValue(const unsigned char SC, Value *UV=nullptr, VPDef *Def=nullptr)
Definition VPlan.cpp:98
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1403
user_iterator user_begin()
Definition VPlanValue.h:130
unsigned getNumUsers() const
Definition VPlanValue.h:113
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
Definition VPlanValue.h:171
user_range users()
Definition VPlanValue.h:134
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getSourceElementType() const
Definition VPlan.h:1914
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
operand_range args()
Definition VPlan.h:1667
Function * getCalledScalarFunction() const
Definition VPlan.h:1663
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCallRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition VPlan.h:1536
void execute(VPTransformState &State) override
Produce widened copies of the cast.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenCastRecipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
Type * getSourceElementType() const
Definition VPlan.h:1811
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2070
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition VPlan.h:2181
Type * getScalarType() const
Returns the scalar type of the induction.
Definition VPlan.h:2190
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the VPUser only uses the first lane of operand Op.
Intrinsic::ID getVectorIntrinsicID() const
Return the ID of the intrinsic.
Definition VPlan.h:1601
StringRef getIntrinsicName() const
Return to name of the intrinsic as string.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Return the scalar return type of the intrinsic.
Definition VPlan.h:1604
void execute(VPTransformState &State) override
Produce a widened version of the vector intrinsic.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this vector intrinsic.
bool IsMasked
Whether the memory access is masked.
Definition VPlan.h:3161
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
Definition VPlan.h:3158
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition VPlan.h:3198
Instruction & Ingredient
Definition VPlan.h:3152
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenMemoryRecipe.
bool Consecutive
Whether the accessed addresses are consecutive.
Definition VPlan.h:3155
VPValue * getMask() const
Return the mask used by this recipe.
Definition VPlan.h:3212
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition VPlan.h:3205
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
Definition VPlan.h:3202
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1439
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getUF() const
Definition VPlan.h:4301
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1037
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
Definition Value.h:838
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:253
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
pointer remove(iterator &IT)
Definition ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI Intrinsic::ID getDeinterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.deinterleaveN intrinsic for factor N.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
bool match(Val *V, const Pattern &P)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition VPlanUtils.h:44
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
bool onlyScalarValuesUsed(const VPValue *Def)
Returns true if only scalar values of Def are used by all users.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
LLVM_ABI Value * createSimpleReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a reduction of the given vector.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * createFindLastIVReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind, Value *Start, Value *Sentinel)
Create a reduction of the given vector Src for a reduction of the kind RecurKind::FindLastIV.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2452
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:738
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition STLExtras.h:2211
auto cast_or_null(const Y &Val)
Definition Casting.h:720
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
LLVM_ABI Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
LLVM_ABI Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
LLVM_ABI llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:325
@ Other
Any other memory.
Definition ModRef.h:68
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
DWARFExpression::Operation Op
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
LLVM_ABI Value * createOrderedReduction(IRBuilderBase &B, RecurKind RdxKind, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence kind RdxKind.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind)
A helper function that returns how much we should divide the cost of a predicated block by.
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
LLVM_ABI Value * createAnyOfReduction(IRBuilderBase &B, Value *Src, Value *InitVal, PHINode *OrigPhi)
Create a reduction of the given vector Src for a reduction of kind RecurKind::AnyOf.
LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Struct to hold various analysis needed for cost computations.
void execute(VPTransformState &State) override
Generate the phi nodes.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this first-order recurrence phi recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
An overlay for VPIRInstructions wrapping PHI nodes enabling convenient use cast/dyn_cast/isa and exec...
Definition VPlan.h:1412
PHINode & getIRPhi()
Definition VPlan.h:1420
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
The method which generates the output IR instructions that correspond to this VPRecipe,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the instruction.
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
Definition VPlan.h:875
InstructionCost getCostForRecipeWithOpcode(unsigned Opcode, ElementCount VF, VPCostContext &Ctx) const
Compute the cost for this recipe for VF, using Opcode and Ctx.
VPRecipeWithIRFlags(const unsigned char SC, ArrayRef< VPValue * > Operands, DebugLoc DL=DebugLoc::getUnknown())
Definition VPlan.h:876
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
Value * get(const VPValue *Def, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def if IsScalar is false, otherwise return the gen...
Definition VPlan.cpp:293
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
ElementCount VF
The chosen Vectorization Factor of the loop being vectorized.
void execute(VPTransformState &State) override
Generate the wide load or gather.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenLoadEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3285
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isInvariantCond() const
Definition VPlan.h:1756
VPValue * getCond() const
Definition VPlan.h:1752
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenSelectRecipe.
void execute(VPTransformState &State) override
Produce a widened version of the select instruction.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
Definition VPlan.h:3366
void execute(VPTransformState &State) override
Generate the wide store or scatter.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenStoreEVLRecipe.
VPValue * getEVL() const
Return the EVL operand.
Definition VPlan.h:3369
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition VPlan.h:3330
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.