Skip to content

Commit 46850aa

Browse files
committed
[VPlan] Compute interleave count for VPlan.
Move selectInterleaveCount to LoopVectorizationPlanner and retrieve some information directly from VPlan. Register pressure was already computed for a VPlan, and with this patch we now also check for reductions directly on VPlan, as well as checking how many load and store operations remain in the loop. This should be mostly NFC, but we may compute slightly different interleave counts, except for some edge cases, e.g. where dead loads have been removed. This shouldn't happen in practice, and the patch doesn't cause changes across a large test corpus on AArch64. Computing the interleave count based on VPlan allows for making better decisions in presence of VPlan optimizations, for example when operations on interleave groups are narrowed. Note that there are a few test changes for tests that were still checking the legacy cost-model output when it was computed in selectInterleaveCount.
1 parent 0410720 commit 46850aa

File tree

6 files changed

+95
-561
lines changed

6 files changed

+95
-561
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,9 @@ class LoopVectorizationPlanner {
487487
/// all profitable VFs in ProfitableVFs.
488488
VectorizationFactor computeBestVF();
489489

490+
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF,
491+
InstructionCost LoopCost);
492+
490493
/// Generate the IR code for the vectorized loop captured in VPlan \p BestPlan
491494
/// according to the best selected \p VF and \p UF.
492495
///

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 72 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -955,13 +955,6 @@ class LoopVectorizationCostModel {
955955
/// 64 bit loop indices.
956956
std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
957957

958-
/// \return The desired interleave count.
959-
/// If interleave count has been specified by metadata it will be returned.
960-
/// Otherwise, the interleave count is computed and returned. VF and LoopCost
961-
/// are the selected vectorization factor and the cost of the selected VF.
962-
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF,
963-
InstructionCost LoopCost);
964-
965958
/// Memory access instruction may be vectorized in more than one way.
966959
/// Form of instruction after vectorization depends on cost.
967960
/// This function takes cost-based decisions for Load/Store instructions
@@ -4611,8 +4604,8 @@ void LoopVectorizationCostModel::collectElementTypesForWidening() {
46114604
}
46124605

46134606
unsigned
4614-
LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
4615-
InstructionCost LoopCost) {
4607+
LoopVectorizationPlanner::selectInterleaveCount(VPlan &Plan, ElementCount VF,
4608+
InstructionCost LoopCost) {
46164609
// -- The interleave heuristics --
46174610
// We interleave the loop in order to expose ILP and reduce the loop overhead.
46184611
// There are many micro-architectural considerations that we can't predict
@@ -4627,32 +4620,36 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
46274620
// 3. We don't interleave if we think that we will spill registers to memory
46284621
// due to the increased register pressure.
46294622

4630-
if (!isScalarEpilogueAllowed())
4623+
if (!CM.isScalarEpilogueAllowed())
46314624
return 1;
46324625

4633-
// Do not interleave if EVL is preferred and no User IC is specified.
4634-
if (foldTailWithEVL()) {
4626+
if (any_of(Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis(),
4627+
IsaPred<VPEVLBasedIVPHIRecipe>)) {
46354628
LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
46364629
"Unroll factor forced to be 1.\n");
46374630
return 1;
46384631
}
4639-
46404632
// We used the distance for the interleave count.
46414633
if (!Legal->isSafeForAnyVectorWidth())
46424634
return 1;
46434635

46444636
// We don't attempt to perform interleaving for loops with uncountable early
46454637
// exits because the VPInstruction::AnyOf code cannot currently handle
46464638
// multiple parts.
4647-
if (Legal->hasUncountableEarlyExit())
4639+
if (Plan.hasEarlyExit())
46484640
return 1;
46494641

4650-
const bool HasReductions = !Legal->getReductionVars().empty();
4642+
const bool HasReductions =
4643+
any_of(Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis(),
4644+
IsaPred<VPReductionPHIRecipe>);
46514645

46524646
// If we did not calculate the cost for VF (because the user selected the VF)
46534647
// then we calculate the cost of VF here.
46544648
if (LoopCost == 0) {
4655-
LoopCost = expectedCost(VF);
4649+
if (VF.isScalar())
4650+
LoopCost = CM.expectedCost(VF);
4651+
else
4652+
LoopCost = cost(Plan, VF);
46564653
assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
46574654

46584655
// Loop body is free and there is no need for interleaving.
@@ -4661,7 +4658,7 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
46614658
}
46624659

46634660
VPRegisterUsage R =
4664-
calculateRegisterUsageForPlan(Plan, {VF}, TTI, ValuesToIgnore)[0];
4661+
calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
46654662
// We divide by these constants so assume that we have at least one
46664663
// instruction that uses at least one register.
46674664
for (auto &Pair : R.MaxLocalUsers) {
@@ -4722,21 +4719,21 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
47224719
MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
47234720
}
47244721

4725-
unsigned EstimatedVF = getEstimatedRuntimeVF(VF, VScaleForTuning);
4722+
unsigned EstimatedVF = getEstimatedRuntimeVF(VF, CM.getVScaleForTuning());
47264723

47274724
// Try to get the exact trip count, or an estimate based on profiling data or
47284725
// ConstantMax from PSE, failing that.
4729-
if (auto BestKnownTC = getSmallBestKnownTC(PSE, TheLoop)) {
4726+
if (auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop)) {
47304727
// At least one iteration must be scalar when this constraint holds. So the
47314728
// maximum available iterations for interleaving is one less.
4732-
unsigned AvailableTC = requiresScalarEpilogue(VF.isVector())
4729+
unsigned AvailableTC = CM.requiresScalarEpilogue(VF.isVector())
47334730
? BestKnownTC->getFixedValue() - 1
47344731
: BestKnownTC->getFixedValue();
47354732

47364733
unsigned InterleaveCountLB = bit_floor(std::max(
47374734
1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
47384735

4739-
if (getSmallConstantTripCount(PSE.getSE(), TheLoop).isNonZero()) {
4736+
if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
47404737
// If the best known trip count is exact, we select between two
47414738
// prospective ICs, where
47424739
//
@@ -4797,7 +4794,7 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
47974794
// vectorized the loop we will have done the runtime check and so interleaving
47984795
// won't require further checks.
47994796
bool ScalarInterleavingRequiresPredication =
4800-
(VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
4797+
(VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
48014798
return Legal->blockNeedsPredication(BB);
48024799
}));
48034800
bool ScalarInterleavingRequiresRuntimePointerCheck =
@@ -4820,8 +4817,39 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
48204817

48214818
// Interleave until store/load ports (estimated by max interleave count) are
48224819
// saturated.
4823-
unsigned NumStores = Legal->getNumStores();
4824-
unsigned NumLoads = Legal->getNumLoads();
4820+
unsigned NumStores = 0;
4821+
unsigned NumLoads = 0;
4822+
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4823+
vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry()))) {
4824+
for (VPRecipeBase &R : *VPBB) {
4825+
if (isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(&R)) {
4826+
NumLoads++;
4827+
continue;
4828+
}
4829+
if (isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe>(&R)) {
4830+
NumStores++;
4831+
continue;
4832+
}
4833+
4834+
if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4835+
if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4836+
NumStores += StoreOps;
4837+
else
4838+
NumLoads += InterleaveR->getNumDefinedValues();
4839+
continue;
4840+
}
4841+
if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4842+
NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4843+
NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4844+
continue;
4845+
}
4846+
if (isa<VPHistogramRecipe>(&R)) {
4847+
NumLoads++;
4848+
NumStores++;
4849+
continue;
4850+
}
4851+
}
4852+
}
48254853
unsigned StoresIC = IC / (NumStores ? NumStores : 1);
48264854
unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
48274855

@@ -4831,12 +4859,15 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
48314859
// do the final reduction after the loop.
48324860
bool HasSelectCmpReductions =
48334861
HasReductions &&
4834-
any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
4835-
const RecurrenceDescriptor &RdxDesc = Reduction.second;
4836-
RecurKind RK = RdxDesc.getRecurrenceKind();
4837-
return RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) ||
4838-
RecurrenceDescriptor::isFindIVRecurrenceKind(RK);
4839-
});
4862+
any_of(Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis(),
4863+
[](VPRecipeBase &R) {
4864+
auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4865+
4866+
return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4867+
RedR->getRecurrenceKind()) ||
4868+
RecurrenceDescriptor::isFindIVRecurrenceKind(
4869+
RedR->getRecurrenceKind()));
4870+
});
48404871
if (HasSelectCmpReductions) {
48414872
LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
48424873
return 1;
@@ -4847,12 +4878,14 @@ LoopVectorizationCostModel::selectInterleaveCount(VPlan &Plan, ElementCount VF,
48474878
// we're interleaving is inside another loop. For tree-wise reductions
48484879
// set the limit to 2, and for ordered reductions it's best to disable
48494880
// interleaving entirely.
4850-
if (HasReductions && TheLoop->getLoopDepth() > 1) {
4881+
if (HasReductions && OrigLoop->getLoopDepth() > 1) {
48514882
bool HasOrderedReductions =
4852-
any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
4853-
const RecurrenceDescriptor &RdxDesc = Reduction.second;
4854-
return RdxDesc.isOrdered();
4855-
});
4883+
any_of(Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis(),
4884+
[](VPRecipeBase &R) {
4885+
auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4886+
4887+
return RedR && RedR->isOrdered();
4888+
});
48564889
if (HasOrderedReductions) {
48574890
LLVM_DEBUG(
48584891
dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
@@ -10071,8 +10104,11 @@ bool LoopVectorizePass::processLoop(Loop *L) {
1007110104

1007210105
GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
1007310106
if (LVP.hasPlanWithVF(VF.Width)) {
10107+
VPCostContext CostCtx(CM.TTI, *CM.TLI, CM.Legal->getWidestInductionType(),
10108+
CM, CM.CostKind);
10109+
1007410110
// Select the interleave count.
10075-
IC = CM.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
10111+
IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
1007610112

1007710113
unsigned SelectedIC = std::max(IC, UserIC);
1007810114
// Optimistically generate runtime checks if they are needed. Drop them if
@@ -10083,8 +10119,6 @@ bool LoopVectorizePass::processLoop(Loop *L) {
1008310119
// Check if it is profitable to vectorize with runtime checks.
1008410120
bool ForceVectorization =
1008510121
Hints.getForce() == LoopVectorizeHints::FK_Enabled;
10086-
VPCostContext CostCtx(CM.TTI, *CM.TLI, CM.Legal->getWidestInductionType(),
10087-
CM, CM.CostKind);
1008810122
if (!ForceVectorization &&
1008910123
!isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
1009010124
LVP.getPlanFor(VF.Width), SEL,

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4213,7 +4213,10 @@ class VPlan {
42134213
/// block with multiple predecessors (one for the exit via the latch and one
42144214
/// via the other early exit).
42154215
bool hasEarlyExit() const {
4216-
return ExitBlocks.size() > 1 ||
4216+
return count_if(ExitBlocks,
4217+
[](VPIRBasicBlock *EB) {
4218+
return EB->getNumPredecessors() != 0;
4219+
}) > 1 ||
42174220
(ExitBlocks.size() == 1 && ExitBlocks[0]->getNumPredecessors() > 1);
42184221
}
42194222

llvm/test/Transforms/LoopVectorize/AArch64/predication_costs.ll

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ target triple = "aarch64--linux-gnu"
1919
; (udiv(2) + extractelement(8) + insertelement(4)) / 2 = 7
2020
;
2121
; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp2, %tmp3
22-
; CHECK: Found an estimated cost of 7 for VF 2 For instruction: %tmp4 = udiv i32 %tmp2, %tmp3
22+
; CHECK: Cost of 7 for VF 2: profitable to scalarize %tmp4 = udiv i32 %tmp2, %tmp3
2323
;
2424
define i32 @predicated_udiv(ptr %a, ptr %b, i1 %c, i64 %n) {
2525
entry:
@@ -60,7 +60,7 @@ for.end:
6060
; (store(4) + extractelement(4)) / 2 = 4
6161
;
6262
; CHECK: Scalarizing and predicating: store i32 %tmp2, ptr %tmp0, align 4
63-
; CHECK: Found an estimated cost of 4 for VF 2 For instruction: store i32 %tmp2, ptr %tmp0, align 4
63+
; CHECK: Cost of 4 for VF 2: profitable to scalarize store i32 %tmp2, ptr %tmp0, align 4
6464
;
6565
define void @predicated_store(ptr %a, i1 %c, i32 %x, i64 %n) {
6666
entry:
@@ -93,8 +93,8 @@ for.end:
9393
; CHECK: Found scalar instruction: %addr = phi ptr [ %a, %entry ], [ %addr.next, %for.inc ]
9494
; CHECK: Found scalar instruction: %addr.next = getelementptr inbounds i32, ptr %addr, i64 1
9595
; CHECK: Scalarizing and predicating: store i32 %tmp2, ptr %addr, align 4
96-
; CHECK: Found an estimated cost of 0 for VF 2 For instruction: %addr = phi ptr [ %a, %entry ], [ %addr.next, %for.inc ]
97-
; CHECK: Found an estimated cost of 4 for VF 2 For instruction: store i32 %tmp2, ptr %addr, align 4
96+
; CHECK: Cost of 0 for VF 2: induction instruction %addr = phi ptr [ %a, %entry ], [ %addr.next, %for.inc ]
97+
; CHECK: Cost of 4 for VF 2: profitable to scalarize store i32 %tmp2, ptr %addr, align 4
9898
;
9999
define void @predicated_store_phi(ptr %a, i1 %c, i32 %x, i64 %n) {
100100
entry:
@@ -135,9 +135,10 @@ for.end:
135135
;
136136
; CHECK: Scalarizing: %tmp3 = add nsw i32 %tmp2, %x
137137
; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp2, %tmp3
138-
; CHECK: Found an estimated cost of 3 for VF 2 For instruction: %tmp3 = add nsw i32 %tmp2, %x
139-
; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp4 = udiv i32 %tmp2, %tmp3
138+
; CHECK: Cost of 3 for VF 2: profitable to scalarize %tmp3 = add nsw i32 %tmp2, %x
139+
; CHECK: Cost of 5 for VF 2: profitable to scalarize %tmp4 = udiv i32 %tmp2, %tmp3
140140
;
141+
141142
define i32 @predicated_udiv_scalarized_operand(ptr %a, i1 %c, i32 %x, i64 %n) {
142143
entry:
143144
br label %for.body
@@ -180,8 +181,8 @@ for.end:
180181
;
181182
; CHECK: Scalarizing: %tmp2 = add nsw i32 %tmp1, %x
182183
; CHECK: Scalarizing and predicating: store i32 %tmp2, ptr %tmp0, align 4
183-
; CHECK: Found an estimated cost of 3 for VF 2 For instruction: %tmp2 = add nsw i32 %tmp1, %x
184-
; CHECK: Found an estimated cost of 2 for VF 2 For instruction: store i32 %tmp2, ptr %tmp0, align 4
184+
; CHECK: Cost of 3 for VF 2: profitable to scalarize %tmp2 = add nsw i32 %tmp1, %x
185+
; CHECK: Cost of 2 for VF 2: profitable to scalarize store i32 %tmp2, ptr %tmp0, align 4
185186
;
186187
define void @predicated_store_scalarized_operand(ptr %a, i1 %c, i32 %x, i64 %n) {
187188
entry:
@@ -232,11 +233,11 @@ for.end:
232233
; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp3, %tmp2
233234
; CHECK: Scalarizing: %tmp5 = sub i32 %tmp4, %x
234235
; CHECK: Scalarizing and predicating: store i32 %tmp5, ptr %tmp0, align 4
235-
; CHECK: Found an estimated cost of 1 for VF 2 For instruction: %tmp2 = add i32 %tmp1, %x
236-
; CHECK: Found an estimated cost of 7 for VF 2 For instruction: %tmp3 = sdiv i32 %tmp1, %tmp2
237-
; CHECK: Found an estimated cost of 7 for VF 2 For instruction: %tmp4 = udiv i32 %tmp3, %tmp2
238-
; CHECK: Found an estimated cost of 3 for VF 2 For instruction: %tmp5 = sub i32 %tmp4, %x
239-
; CHECK: Found an estimated cost of 2 for VF 2 For instruction: store i32 %tmp5, ptr %tmp0, align 4
236+
; CHECK: Cost of 7 for VF 2: profitable to scalarize %tmp4 = udiv i32 %tmp3, %tmp2
237+
; CHECK: Cost of 7 for VF 2: profitable to scalarize %tmp3 = sdiv i32 %tmp1, %tmp2
238+
; CHECK: Cost of 2 for VF 2: profitable to scalarize store i32 %tmp5, ptr %tmp0, align 4
239+
; CHECK: Cost of 3 for VF 2: profitable to scalarize %tmp5 = sub i32 %tmp4, %x
240+
; CHECK: Cost of 1 for VF 2: WIDEN ir<%tmp2> = add ir<%tmp1>, ir<%x>
240241
;
241242
define void @predication_multi_context(ptr %a, i1 %c, i32 %x, i64 %n) {
242243
entry:

0 commit comments

Comments
 (0)