Skip to content

Commit

Permalink
[VPlan] Compute cost for binary op VPInstruction with underlying valu…
Browse files Browse the repository at this point in the history
…es. (llvm#125434)

As exposed by llvm#125094, we are
missing cost computation for some binary VPInstructions we created based
on original IR instructions. Their cost should be considered.

PR: llvm#125434
  • Loading branch information
fhahn authored and Icohedron committed Feb 11, 2025
1 parent 2fdc7c6 commit 475ad1d
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 1 deletion.
22 changes: 21 additions & 1 deletion llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -720,14 +720,34 @@ Value *VPInstruction::generate(VPTransformState &State) {

InstructionCost VPInstruction::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
if (Instruction::isBinaryOp(getOpcode())) {
if (!getUnderlyingValue()) {
// TODO: Compute cost for VPInstructions without underlying values once
// the legacy cost model has been retired.
return 0;
}

assert(!doesGeneratePerAllLanes() &&
"Should only generate a vector value or single scalar, not scalars "
"for all lanes.");
Type *ResTy = Ctx.Types.inferScalarType(this);
if (!vputils::onlyFirstLaneUsed(this))
ResTy = toVectorTy(ResTy, VF);

return Ctx.TTI.getArithmeticInstrCost(getOpcode(), ResTy, Ctx.CostKind);
}

switch (getOpcode()) {
case VPInstruction::AnyOf: {
auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
return Ctx.TTI.getArithmeticReductionCost(
Instruction::Or, cast<VectorType>(VecTy), std::nullopt, Ctx.CostKind);
}
default:
// TODO: Fill out other opcodes!
// TODO: Compute cost other VPInstructions once the legacy cost model has
// been retired.
assert(!getUnderlyingValue() &&
"unexpected VPInstruction witht underlying value");
return 0;
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --filter "Cost of"
; RUN: opt -S -passes=loop-vectorize -mcpu=skylake-avx512 -mtriple=x86_64-apple-macosx -debug -disable-output -S %s 2>&1 | FileCheck %s

; REQUIRES: asserts

target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"

define void @wide_or_replaced_with_add_vpinstruction(ptr %src, ptr noalias %dst) {
; CHECK-LABEL: 'wide_or_replaced_with_add_vpinstruction'
; CHECK: Cost of 1 for VF 2: induction instruction %iv.next = add nuw nsw i64 %iv, 1
; CHECK: Cost of 0 for VF 2: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
; CHECK: Cost of 1 for VF 2: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32
; CHECK: Cost of 0 for VF 2: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK: Cost of 0 for VF 2: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0>
; CHECK: Cost of 0 for VF 2: vp<%4> = SCALAR-STEPS vp<%3>, ir<1>
; CHECK: Cost of 0 for VF 2: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4>
; CHECK: Cost of 0 for VF 2: vp<%5> = vector-pointer ir<%g.src>
; CHECK: Cost of 1 for VF 2: WIDEN ir<%l> = load vp<%5>
; CHECK: Cost of 1 for VF 2: WIDEN ir<%iv.4> = add ir<%iv>, ir<4>
; CHECK: Cost of 1 for VF 2: WIDEN ir<%c> = icmp ule ir<%l>, ir<128>
; CHECK: Cost of 1 for VF 2: EMIT ir<%or> = add ir<%iv.4>, ir<1>
; CHECK: Cost of 0 for VF 2: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or>
; CHECK: Cost of 0 for VF 2: vp<%6> = vector-pointer ir<%g.dst>
; CHECK: Cost of 1 for VF 2: WIDEN store vp<%6>, ir<%iv.4>, ir<%c>
; CHECK: Cost of 0 for VF 2: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1>
; CHECK: Cost of 0 for VF 2: EMIT branch-on-count vp<%index.next>, vp<%2>
; CHECK: Cost of 0 for VF 2: vector loop backedge
; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1
; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32
; CHECK: Cost of 0 for VF 4: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
; CHECK: Cost of 0 for VF 4: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<%0>
; CHECK: Cost of 0 for VF 4: vp<%4> = SCALAR-STEPS vp<%3>, ir<1>
; CHECK: Cost of 0 for VF 4: CLONE ir<%g.src> = getelementptr inbounds ir<%src>, vp<%4>
; CHECK: Cost of 0 for VF 4: vp<%5> = vector-pointer ir<%g.src>
; CHECK: Cost of 1 for VF 4: WIDEN ir<%l> = load vp<%5>
; CHECK: Cost of 1 for VF 4: WIDEN ir<%iv.4> = add ir<%iv>, ir<4>
; CHECK: Cost of 1 for VF 4: WIDEN ir<%c> = icmp ule ir<%l>, ir<128>
; CHECK: Cost of 1 for VF 4: EMIT ir<%or> = add ir<%iv.4>, ir<1>
; CHECK: Cost of 0 for VF 4: CLONE ir<%g.dst> = getelementptr ir<%dst>, ir<%or>
; CHECK: Cost of 0 for VF 4: vp<%6> = vector-pointer ir<%g.dst>
; CHECK: Cost of 1 for VF 4: WIDEN store vp<%6>, ir<%iv.4>, ir<%c>
; CHECK: Cost of 0 for VF 4: EMIT vp<%index.next> = add nuw vp<%3>, vp<%1>
; CHECK: Cost of 0 for VF 4: EMIT branch-on-count vp<%index.next>, vp<%2>
; CHECK: Cost of 0 for VF 4: vector loop backedge
; CHECK: Cost of 1 for VF 4: induction instruction %iv.next = add nuw nsw i64 %iv, 1
; CHECK: Cost of 0 for VF 4: induction instruction %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
; CHECK: Cost of 1 for VF 4: exit condition instruction %exitcond = icmp eq i64 %iv.next, 32
;
entry:
br label %loop.header

loop.header:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%g.src = getelementptr inbounds i64, ptr %src, i64 %iv
%l = load i64, ptr %g.src
%iv.4 = add nuw nsw i64 %iv, 4
%c = icmp ule i64 %l, 128
br i1 %c, label %loop.then, label %loop.latch

loop.then:
%or = or disjoint i64 %iv.4, 1
%g.dst = getelementptr inbounds i64, ptr %dst, i64 %or
store i64 %iv.4, ptr %g.dst, align 4
br label %loop.latch

loop.latch:
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 32
br i1 %exitcond, label %exit, label %loop.header

exit:
ret void
}

0 comments on commit 475ad1d

Please sign in to comment.