Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PHI] Code auto-generate for Sparse API #40060

Merged
merged 8 commits into from
Mar 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@ paddle/fluid/op_use_default_grad_maker_DEV.spec
paddle/fluid/op_use_default_grad_maker_PR.spec
paddle/phi/api/backward/backward_api.h
paddle/phi/api/include/api.h
paddle/phi/api/include/sparse_api.h
paddle/phi/api/lib/api.cc
paddle/phi/api/lib/dygraph_api.*
paddle/phi/api/lib/backward_api.cc
paddle/phi/api/lib/sparse_api.cc
paddle/phi/extension.h
paddle/phi/include/*
paddle/phi/infermeta/generated.*
Expand Down
33 changes: 28 additions & 5 deletions paddle/phi/api/lib/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ set(bw_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/backward_api.cc)
set(bw_api_header_file_tmp ${bw_api_header_file}.tmp)
set(bw_api_source_file_tmp ${bw_api_source_file}.tmp)

# sparse api file
set(sparse_api_gen_file ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/sparse_api_gen.py)
set(sparse_api_yaml_file ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/sparse_api.yaml)
set(sparse_api_header_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/sparse_api.h)
set(sparse_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/sparse_api.cc)
set(sparse_api_header_file_tmp ${api_header_file}.tmp)
set(sparse_api_source_file_tmp ${api_source_file}.tmp)

# wrapped infermeta file
set(wrapped_infermeta_gen_file ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/wrapped_infermeta_gen.py)
set(api_yaml_file ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/api.yaml)
Expand Down Expand Up @@ -73,6 +81,19 @@ add_custom_command(
DEPENDS ${bw_api_yaml_file} ${bw_api_gen_file} ${api_gen_base}
VERBATIM)

# generate sparse api
add_custom_command(
OUTPUT ${sparse_api_header_file} ${sparse_api_source_file}
COMMAND ${PYTHON_EXECUTABLE} ${sparse_api_gen_file}
--api_yaml_path ${sparse_api_yaml_file}
--api_header_path ${sparse_api_header_file_tmp}
--api_source_path ${sparse_api_source_file_tmp}
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${sparse_api_header_file_tmp} ${sparse_api_header_file}
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${sparse_api_source_file_tmp} ${sparse_api_source_file}
COMMENT "copy_if_different ${sparse_api_header_file} ${sparse_sparse_api_source_file}"
DEPENDS ${sparse_api_yaml_file} ${sparse_api_gen_file} ${api_gen_base}
VERBATIM)

# generate wrapped infermeta
add_custom_command(
OUTPUT ${wrapped_infermeta_header_file} ${wrapped_infermeta_source_file}
Expand All @@ -87,12 +108,14 @@ cc_library(op_meta_info SRCS op_meta_info.cc DEPS phi_tensor_raw)
cc_library(wrapped_infermeta SRCS ${wrapped_infermeta_source_file} DEPS phi)

cc_library(kernel_dispatch SRCS kernel_dispatch.cc DEPS phi_tensor_raw phi_context kernel_factory)
cc_library(api_gen_utils SRCS api_gen_utils.cc DEPS phi_tensor_raw selected_rows sparse_csr_tensor sparse_coo_tensor)
cc_library(phi_data_transform SRCS data_transform.cc DEPS phi_tensor_raw transfer_layout_kernel cast_kernel data_device_transform)
cc_library(api_custom_impl SRCS api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch phi_data_transform)
cc_library(api_custom_impl SRCS api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform)
cc_library(sparse_api_custom_impl SRCS sparse_api_custom_impl.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform)

cc_library(sparse_api SRCS sparse_api.cc DEPS phi_tensor_raw phi kernel_dispatch phi_data_transform)
cc_library(phi_function_api SRCS ${api_source_file} DEPS phi_tensor_raw phi kernel_dispatch phi_data_transform api_custom_impl)
cc_library(phi_dygraph_api SRCS ${dygraph_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch phi_data_transform)
cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch backward_infermeta phi_data_transform phi_function_api api_custom_impl)
cc_library(sparse_api SRCS sparse_api.cc DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils sparse_api_custom_impl)
cc_library(phi_function_api SRCS ${api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform api_custom_impl)
cc_library(phi_dygraph_api SRCS ${dygraph_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils phi_data_transform)
cc_library(phi_bw_function_api SRCS ${bw_api_source_file} DEPS phi_tensor_raw phi kernel_dispatch api_gen_utils backward_infermeta phi_data_transform phi_function_api api_custom_impl)

cc_library(phi_tensor SRCS tensor_method.cc DEPS phi_tensor_raw phi_function_api)
2 changes: 1 addition & 1 deletion paddle/phi/api/lib/api_custom_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ limitations under the License. */

#include "paddle/phi/api/lib/api_custom_impl.h"

#include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/api_registry.h"
#include "paddle/phi/api/lib/api_utils.h"
#include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/utils/storage.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,34 +12,26 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/api/lib/api_gen_utils.h"

namespace paddle {
namespace experimental {

/* ------------------ for input ----------------------- */

inline std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
const Tensor& tensor) {
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor) {
return std::dynamic_pointer_cast<phi::DenseTensor>(tensor.impl());
}

inline std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<Tensor>& tensor) {
if (tensor) {
return std::dynamic_pointer_cast<phi::DenseTensor>(tensor->impl());
}
return nullptr;
}

inline std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
const std::vector<Tensor>& tensors) {
auto pt_tensors = std::make_unique<std::vector<phi::DenseTensor>>();
pt_tensors->reserve(tensors.size());
Expand All @@ -52,12 +44,11 @@ inline std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
return std::move(pt_tensors);
}

inline std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
const Tensor& tensor) {
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor) {
return std::dynamic_pointer_cast<phi::SelectedRows>(tensor.impl());
}

inline std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<Tensor>& tensor) {
if (tensor) {
return std::dynamic_pointer_cast<phi::SelectedRows>(tensor->impl());
Expand All @@ -67,19 +58,19 @@ inline std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(

/* ----------------- for infer_meta --------------------- */

inline phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor) {
phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor) {
return phi::MetaTensor(tensor);
}

inline paddle::optional<phi::MetaTensor> MakeMetaTensor(
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::DenseTensor&>& tensor) {
if (tensor) {
return {phi::MetaTensor(*tensor)};
}
return {paddle::none};
}

inline std::vector<phi::MetaTensor> MakeMetaTensor(
std::vector<phi::MetaTensor> MakeMetaTensor(
const std::vector<phi::DenseTensor>& tensors) {
std::vector<phi::MetaTensor> meta_tensors;
meta_tensors.reserve(tensors.size());
Expand All @@ -89,11 +80,11 @@ inline std::vector<phi::MetaTensor> MakeMetaTensor(
return meta_tensors;
}

inline phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor) {
phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor) {
return phi::MetaTensor(tensor);
}

inline paddle::optional<phi::MetaTensor> MakeMetaTensor(
paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::SelectedRows&>& tensor) {
if (tensor) {
return {phi::MetaTensor(*tensor)};
Expand All @@ -103,7 +94,7 @@ inline paddle::optional<phi::MetaTensor> MakeMetaTensor(

/* ------------------ for output ----------------------- */

inline phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) {
phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) {
if (!out->initialized()) {
auto dense_tensor = std::make_shared<phi::DenseTensor>(
phi::make_intrusive<SharedStorage>(phi::TransToPhiPlace(backend)),
Expand All @@ -114,8 +105,9 @@ inline phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) {
return static_cast<phi::DenseTensor*>(out->impl().get());
}

inline std::vector<phi::DenseTensor*> SetKernelOutput(
size_t out_size, Backend backend, std::vector<Tensor>* out) {
std::vector<phi::DenseTensor*> SetKernelOutput(size_t out_size,
Backend backend,
std::vector<Tensor>* out) {
out->reserve(out_size);
std::vector<phi::DenseTensor*> results(out_size);
for (size_t i = 0; i < out_size; ++i) {
Expand All @@ -129,8 +121,7 @@ inline std::vector<phi::DenseTensor*> SetKernelOutput(
return results;
}

inline phi::SelectedRows* SetSelectedRowsKernelOutput(Backend backend,
Tensor* out) {
phi::SelectedRows* SetSelectedRowsKernelOutput(Backend backend, Tensor* out) {
if (!out->initialized()) {
auto select_rows = std::make_shared<phi::SelectedRows>();
out->set_impl(select_rows);
Expand All @@ -139,5 +130,29 @@ inline phi::SelectedRows* SetSelectedRowsKernelOutput(Backend backend,
return static_cast<phi::SelectedRows*>(out->impl().get());
}

phi::TensorBase* SetSparseKernelOutput(Tensor* out, TensorType type) {
if (!out->initialized()) {
if (type == TensorType::SPARSE_COO) {
auto sparse_tensor = std::make_shared<phi::SparseCooTensor>(
phi::DenseTensor(), phi::DenseTensor(), phi::DDim{-1});
out->set_impl(sparse_tensor);
return sparse_tensor.get();
} else if (type == TensorType::SPARSE_CSR) {
auto sparse_tensor =
std::make_shared<phi::SparseCsrTensor>(phi::DenseTensor(),
phi::DenseTensor(),
phi::DenseTensor(),
phi::DDim{-1});
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里是不是没有测试过,SparseCsrTensor 的构造函数内会检查 dims 的长度,只允许 2d 和 3d

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out->set_impl(sparse_tensor);
return sparse_tensor.get();
} else {
auto dense_tensor = std::make_shared<phi::DenseTensor>();
out->set_impl(dense_tensor);
return dense_tensor.get();
}
}
return out->impl().get();
}

} // namespace experimental
} // namespace paddle
74 changes: 74 additions & 0 deletions paddle/phi/api/lib/api_gen_utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/meta_tensor.h"
#include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"

namespace paddle {
namespace experimental {

enum class TensorType { DENSE_TENSOR, SPARSE_CSR, SPARSE_COO };

/* ------------------ for input ----------------------- */

std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor);

std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<Tensor>& tensor);

std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
const std::vector<Tensor>& tensors);

std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor);

std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(
const paddle::optional<Tensor>& tensor);

/* ----------------- for infer_meta --------------------- */

phi::MetaTensor MakeMetaTensor(const phi::DenseTensor& tensor);

paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::DenseTensor&>& tensor);

std::vector<phi::MetaTensor> MakeMetaTensor(
const std::vector<phi::DenseTensor>& tensors);

phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor);

paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::SelectedRows&>& tensor);

/* ------------------ for output ----------------------- */

phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out);

std::vector<phi::DenseTensor*> SetKernelOutput(size_t out_size,
Backend backend,
std::vector<Tensor>* out);

phi::SelectedRows* SetSelectedRowsKernelOutput(Backend backend, Tensor* out);

phi::TensorBase* SetSparseKernelOutput(Tensor* out, TensorType type);

} // namespace experimental
} // namespace paddle
Original file line number Diff line number Diff line change
Expand Up @@ -12,39 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"

#include <memory>
#include "glog/logging.h"
#include "paddle/phi/api/lib/api_registry.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/infermeta/unary.h"

PD_DECLARE_KERNEL(dense_to_sparse_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, CPU, ALL_LAYOUT);

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(dense_to_sparse_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_coo, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(dense_to_sparse_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_csr, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_coo_to_dense, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sparse_csr_to_dense, GPU, ALL_LAYOUT);
#endif

namespace paddle {
namespace experimental {
namespace sparse {

PADDLE_API Tensor to_sparse_coo(const Tensor& x,
Backend backend,
const int64_t sparse_dim) {
Tensor to_sparse_coo_impl(const Tensor& x,
Backend backend,
const int64_t sparse_dim) {
if (x.layout() == phi::DataLayout::SPARSE_COO) {
return x;
}
Expand Down Expand Up @@ -105,7 +88,7 @@ PADDLE_API Tensor to_sparse_coo(const Tensor& x,
return out;
}

PADDLE_API Tensor to_sparse_csr(const Tensor& x, Backend backend) {
Tensor to_sparse_csr_impl(const Tensor& x, Backend backend) {
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
return x;
}
Expand Down Expand Up @@ -171,7 +154,7 @@ PADDLE_API Tensor to_sparse_csr(const Tensor& x, Backend backend) {
return out;
}

PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) {
Tensor to_dense_impl(const Tensor& x, Backend backend) {
if (x.layout() != phi::DataLayout::SPARSE_CSR &&
x.layout() != phi::DataLayout::SPARSE_COO) {
return x;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ namespace paddle {
namespace experimental {
namespace sparse {

PADDLE_API Tensor to_sparse_coo(const Tensor& x,
Backend backend,
const int64_t sparse_dim);
Tensor to_dense_impl(const Tensor& x, Backend backend);

PADDLE_API Tensor to_sparse_csr(const Tensor& x, Backend backend);
Tensor to_sparse_coo_impl(const Tensor& x,
Backend backend,
const int64_t sparse_dim);

PADDLE_API Tensor to_dense(const Tensor& x, Backend backend);
Tensor to_sparse_csr_impl(const Tensor& x, Backend backend);

} // namespace sparse
} // namespace experimental
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/kernels/sparse/cpu/convolution.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,9 @@ void ProductRuleBook(const Context& dev_ctx,

f_calc_rulebook(nullptr);
// alloc the rulebook
rulebook->ResizeAndAllocate({3, rulebook_len});
DenseTensorMeta rulebook_meta(
DataType::INT32, {3, rulebook_len}, DataLayout::NCHW);
rulebook->set_meta(rulebook_meta);
dev_ctx.Alloc(rulebook, rulebook->dtype(), rulebook->numel() * sizeof(int));
int* rulebook_ptr = rulebook->data<int>();
f_calc_rulebook(rulebook_ptr);
Expand Down
Loading