Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[x86] Add lowering for @llvm.experimental.vector.compress #104904

Merged
merged 9 commits into from
Sep 13, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11582,11 +11582,12 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
// ... if it is not a splat vector, we need to get the passthru value at
// position = popcount(mask) and re-load it from the stack before it is
// overwritten in the loop below.
EVT PopcountVT = ScalarVT.changeTypeToInteger();
SDValue Popcount = DAG.getNode(
ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask);
Popcount = DAG.getNode(ISD::ZERO_EXTEND, DL,
MaskVT.changeVectorElementType(ScalarVT), Popcount);
Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, ScalarVT, Popcount);
MaskVT.changeVectorElementType(PopcountVT), Popcount);
Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount);
SDValue LastElmtPtr =
getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
LastWriteVal = DAG.getLoad(
Expand Down Expand Up @@ -11625,8 +11626,10 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,

// Re-write the last ValI if all lanes were selected. Otherwise,
// overwrite the last write it with the passthru value.
SDNodeFlags Flags{};
Flags.setUnpredictable(true);
LastWriteVal =
DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal);
DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal, Flags);
Chain = DAG.getStore(
Chain, DL, LastWriteVal, OutPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
Expand Down
12 changes: 12 additions & 0 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2321,6 +2321,18 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}

if (Subtarget.hasAVX512()) {
for (MVT VT : {MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64, MVT::v8i32,
lawben marked this conversation as resolved.
Show resolved Hide resolved
MVT::v8f32, MVT::v4i64, MVT::v4f64, MVT::v16i32, MVT::v16f32,
MVT::v8i64, MVT::v8f64})
setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);

if (Subtarget.hasVBMI2())
for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16,
MVT::v64i8, MVT::v32i16})
setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal);
}
lawben marked this conversation as resolved.
Show resolved Hide resolved

if (!Subtarget.useSoftFloat() &&
(Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
Expand Down
6 changes: 6 additions & 0 deletions llvm/lib/Target/X86/X86InstrAVX512.td
Original file line number Diff line number Diff line change
Expand Up @@ -10543,6 +10543,12 @@ multiclass compress_by_vec_width_lowering<X86VectorVTInfo _, string Name> {
def : Pat<(X86compress (_.VT _.RC:$src), _.ImmAllZerosV, _.KRCWM:$mask),
(!cast<Instruction>(Name#_.ZSuffix#rrkz)
_.KRCWM:$mask, _.RC:$src)>;
def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, undef)),
(!cast<Instruction>(Name#_.ZSuffix#rrkz)
_.KRCWM:$mask, _.RC:$src)>;
def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)),
(!cast<Instruction>(Name#_.ZSuffix#rrk)
_.RC:$passthru, _.KRCWM:$mask, _.RC:$src)>;
}

multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
Expand Down
281 changes: 281 additions & 0 deletions llvm/test/CodeGen/X86/vector-compress-avx2.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=x86_64 -mattr=avx2 < %s | FileCheck %s
lawben marked this conversation as resolved.
Show resolved Hide resolved

; The main logic for vpcompress is tested in the -avx512.ll version of this file.
; This file only checks the fallback expand path.

define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) {
; CHECK-LABEL: test_compress_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vpextrd $1, %xmm1, %eax
; CHECK-NEXT: vmovd %xmm1, %esi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: movl %esi, %edi
; CHECK-NEXT: subl %eax, %edi
; CHECK-NEXT: vpextrd $2, %xmm1, %edx
; CHECK-NEXT: subl %edx, %edi
; CHECK-NEXT: vpextrd $3, %xmm1, %ecx
; CHECK-NEXT: subl %ecx, %edi
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addq %rsi, %rax
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: addq %rax, %rdx
; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: vextractps $3, %xmm0, %r8d
; CHECK-NEXT: cmpq $4, %rcx
; CHECK-NEXT: cmovbl -24(%rsp,%rdi,4), %r8d
; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4)
; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rax,4)
; CHECK-NEXT: andl $3, %edx
; CHECK-NEXT: vextractps $3, %xmm0, -24(%rsp,%rdx,4)
; CHECK-NEXT: cmpq $3, %rcx
; CHECK-NEXT: movl $3, %eax
; CHECK-NEXT: cmovbq %rcx, %rax
; CHECK-NEXT: movl %r8d, -24(%rsp,%rax,4)
; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: retq
%out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru)
ret <4 x i32> %out
}

define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) {
; CHECK-LABEL: test_compress_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vpextrd $1, %xmm1, %edx
; CHECK-NEXT: vmovd %xmm1, %esi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: movl %esi, %edi
; CHECK-NEXT: subl %edx, %edi
; CHECK-NEXT: vpextrd $2, %xmm1, %ecx
; CHECK-NEXT: subl %ecx, %edi
; CHECK-NEXT: vpextrd $3, %xmm1, %eax
; CHECK-NEXT: subl %eax, %edi
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4)
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: addq %rsi, %rdx
; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rdx,4)
; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addq %rcx, %rax
; CHECK-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
; CHECK-NEXT: andl $3, %ecx
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4)
; CHECK-NEXT: cmpq $3, %rax
; CHECK-NEXT: movl $3, %ecx
; CHECK-NEXT: cmovbq %rax, %rcx
; CHECK-NEXT: ja .LBB1_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4)
; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: retq
%out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru)
ret <4 x float> %out
}

define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) {
; CHECK-LABEL: test_compress_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vpextrq $1, %xmm1, %rax
; CHECK-NEXT: vmovq %xmm1, %rcx
; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: subl %eax, %edx
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: addq %rcx, %rax
; CHECK-NEXT: vpextrq $1, %xmm0, %rsi
; CHECK-NEXT: cmpq $2, %rax
; CHECK-NEXT: cmovbq -24(%rsp,%rdx,8), %rsi
; CHECK-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movl %ecx, %ecx
; CHECK-NEXT: vpextrq $1, %xmm0, -24(%rsp,%rcx,8)
; CHECK-NEXT: cmpq $1, %rax
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: cmovbq %rax, %rcx
; CHECK-NEXT: movq %rsi, -24(%rsp,%rcx,8)
; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: retq
%out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru)
ret <2 x i64> %out
}

define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) {
; CHECK-LABEL: test_compress_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vpextrq $1, %xmm1, %rax
; CHECK-NEXT: vmovq %xmm1, %rcx
; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: subl %eax, %edx
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: vmovlpd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: vmovhpd %xmm0, -24(%rsp,%rdx,8)
; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addq %rcx, %rax
; CHECK-NEXT: cmpq $2, %rax
; CHECK-NEXT: jb .LBB3_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; CHECK-NEXT: .LBB3_2:
; CHECK-NEXT: cmpq $1, %rax
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: cmovbq %rax, %rcx
; CHECK-NEXT: vmovsd %xmm1, -24(%rsp,%rcx,8)
; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: retq
%out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru)
ret <2 x double> %out
}

define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) {
; CHECK-LABEL: test_compress_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
; CHECK-NEXT: .cfi_offset %rbx, -24
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; CHECK-NEXT: vpslld $31, %ymm1, %ymm1
; CHECK-NEXT: vpsrad $31, %ymm1, %ymm3
; CHECK-NEXT: vmovaps %ymm2, (%rsp)
; CHECK-NEXT: vextracti128 $1, %ymm3, %xmm1
; CHECK-NEXT: vpackssdw %xmm1, %xmm3, %xmm2
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; CHECK-NEXT: vpslld $31, %ymm2, %ymm2
; CHECK-NEXT: vpsrld $31, %ymm2, %ymm2
; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm4
; CHECK-NEXT: vpaddd %xmm4, %xmm2, %xmm2
; CHECK-NEXT: vpextrd $1, %xmm2, %eax
; CHECK-NEXT: vmovd %xmm2, %ecx
; CHECK-NEXT: addl %eax, %ecx
; CHECK-NEXT: vpextrd $2, %xmm2, %edx
; CHECK-NEXT: vpextrd $3, %xmm2, %eax
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: andl $7, %eax
; CHECK-NEXT: vpextrd $1, %xmm3, %ecx
; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: vmovd %xmm3, %edx
; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: vpextrd $2, %xmm3, %esi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: addq %rcx, %rsi
; CHECK-NEXT: vpextrd $3, %xmm3, %edi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: vmovd %xmm1, %r8d
; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: addq %rdi, %r8
; CHECK-NEXT: vpextrd $1, %xmm1, %r9d
; CHECK-NEXT: andl $1, %r9d
; CHECK-NEXT: addq %r8, %r9
; CHECK-NEXT: vpextrd $2, %xmm1, %r10d
; CHECK-NEXT: andl $1, %r10d
; CHECK-NEXT: addq %r9, %r10
; CHECK-NEXT: vpextrd $3, %xmm1, %r11d
; CHECK-NEXT: andl $1, %r11d
; CHECK-NEXT: addq %r10, %r11
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vextractps $3, %xmm1, %ebx
; CHECK-NEXT: cmpq $8, %r11
; CHECK-NEXT: cmovbl (%rsp,%rax,4), %ebx
; CHECK-NEXT: vmovss %xmm0, (%rsp)
; CHECK-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4)
; CHECK-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4)
; CHECK-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss %xmm1, (%rsp,%rdi,4)
; CHECK-NEXT: andl $7, %r8d
; CHECK-NEXT: vextractps $1, %xmm1, (%rsp,%r8,4)
; CHECK-NEXT: andl $7, %r9d
; CHECK-NEXT: vextractps $2, %xmm1, (%rsp,%r9,4)
; CHECK-NEXT: andl $7, %r10d
; CHECK-NEXT: vextractps $3, %xmm1, (%rsp,%r10,4)
; CHECK-NEXT: cmpq $7, %r11
; CHECK-NEXT: movl $7, %eax
; CHECK-NEXT: cmovbq %r11, %rax
; CHECK-NEXT: movl %eax, %eax
; CHECK-NEXT: movl %ebx, (%rsp,%rax,4)
; CHECK-NEXT: vmovaps (%rsp), %ymm0
; CHECK-NEXT: leaq -8(%rbp), %rsp
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: retq
%out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru)
ret <8 x i32> %out
}

define <4 x i32> @test_compress_all_const() {
; CHECK-LABEL: test_compress_all_const:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0]
; CHECK-NEXT: retq
%out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> <i32 3, i32 5, i32 7, i32 9>,
<4 x i1> <i1 0, i1 1, i1 0, i1 1>,
<4 x i32> undef)
ret <4 x i32> %out
}

define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) {
; CHECK-LABEL: test_compress_const_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
; CHECK-NEXT: retq
%out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> undef)
ret <4 x i32> %out
}

define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) {
; CHECK-LABEL: test_compress_const_mask_passthrough:
; CHECK: # %bb.0:
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3]
; CHECK-NEXT: retq
%out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 undef, i1 0, i1 1>, <4 x i32> %passthru)
ret <4 x i32> %out
}

define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) {
; CHECK-LABEL: test_compress_const_mask_const_passthrough:
; CHECK: # %bb.0:
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; CHECK-NEXT: movl $7, %eax
; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; CHECK-NEXT: movl $8, %eax
; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; CHECK-NEXT: retq
%out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x i32> <i32 5, i32 6, i32 7, i32 8>)
ret <4 x i32> %out
}
Loading
Loading