Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[phi] move sigmoid_cross_entopy_with_logits log_loss cumsum auc infershape to phi #40200

Merged
merged 7 commits into from
Mar 8, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 7 additions & 13 deletions paddle/fluid/operators/cumsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"

namespace paddle {
namespace operators {

class CumOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
if (ctx->Attrs().Get<bool>("flatten")) {
ctx->SetOutputDim("Out",
phi::make_ddim({phi::product(ctx->GetInputDim("X"))}));
} else {
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
}

ctx->ShareLoD("X", /*->*/ "Out");
}
};

class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
Expand Down Expand Up @@ -87,10 +79,12 @@ class CumsumGradMaker : public framework::SingleGradOpMaker<T> {

namespace ops = paddle::operators;
using CPU = paddle::platform::CPUDeviceContext;

DECLARE_INFER_SHAPE_FUNCTOR(cumsum, CumsumInferShapeFunctor,
PD_INFER_META(phi::CumsumInferMeta));
REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker,
ops::CumsumGradMaker<paddle::framework::OpDesc>,
ops::CumsumGradMaker<paddle::imperative::OpBase>);
ops::CumsumGradMaker<paddle::imperative::OpBase>,
CumsumInferShapeFunctor);

REGISTER_OP_VERSION(cumsum)
.AddCheckpoint(
Expand Down
45 changes: 7 additions & 38 deletions paddle/fluid/operators/log_loss_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,51 +13,17 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/binary.h"

namespace paddle {
namespace operators {

class LogLossOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Predicted"), "Input", "Predicted", "LogLoss");
OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "LogLoss");

auto pred_dims = ctx->GetInputDim("Predicted");
auto label_dims = ctx->GetInputDim("Labels");

if (ctx->IsRuntime() ||
(phi::product(pred_dims) > 0 && phi::product(label_dims) > 0)) {
PADDLE_ENFORCE_EQ(
pred_dims, label_dims,
platform::errors::InvalidArgument(
"The dimensions of Input(Predicted) must be equal to the"
"dimensions of Input(Labels), but received dimensions of "
"Input(Predicted)"
"is [%s], received dimensions of Input(Labels) is [%s].",
pred_dims, label_dims));
}
PADDLE_ENFORCE_EQ(pred_dims.size(), 2,
platform::errors::InvalidArgument(
"The dimensions of Input(Predicted) must be 2,"
"But received dimensions of Input(Predicted)"
"is [%d]",
pred_dims.size()));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
pred_dims[1], 1,
platform::errors::InvalidArgument(
"Each row of Input(Predicted) contains a real value, "
"so the 2nd dimension of Input(X) must be 1,"
"But got [%d]",
pred_dims[1]));
}
ctx->SetOutputDim("Loss", {pred_dims[0], 1});
ctx->ShareLoD("Predicted", "Loss");
}
};

template <typename AttrType>
Expand Down Expand Up @@ -145,7 +111,10 @@ class LogLossGradMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(log_loss, LogLossInferShapeFunctor,
PD_INFER_META(phi::LogLossInferMeta));
REGISTER_OPERATOR(log_loss, ops::LogLossOp, ops::LogLossOpMaker<float>,
ops::LogLossGradMaker<paddle::framework::OpDesc>,
ops::LogLossGradMaker<paddle::imperative::OpBase>);
ops::LogLossGradMaker<paddle::imperative::OpBase>,
LogLossInferShapeFunctor);
REGISTER_OPERATOR(log_loss_grad, ops::LogLossGradOp);
72 changes: 7 additions & 65 deletions paddle/fluid/operators/metrics/auc_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/multiary.h"

namespace paddle {
namespace operators {
Expand All @@ -21,70 +24,6 @@ class AucOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Predict"), "Input", "Predict", "Auc");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Auc");
auto predict_dims = ctx->GetInputDim("Predict");
auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_GE(
predict_dims.size(), 2,
platform::errors::InvalidArgument(
"The Input(Predict) has not been initialized properly. The "
"shape of Input(Predict) = [%s], the shape size must be "
"greater_equal 2.",
predict_dims));
auto predict_width = predict_dims[1];
PADDLE_ENFORCE_NE(
phi::product(predict_dims), 0,
platform::errors::InvalidArgument(
"The Input(Predict) has not been initialized properly. The "
"shape of Input(Predict) = [%s], the shape can not involes 0.",
predict_dims));
PADDLE_ENFORCE_NE(
phi::product(label_dims), 0,
platform::errors::InvalidArgument(
"The Input(Label) has not been initialized properly. The "
"shape of Input(Label) = [%s], the shape can not involes 0.",
label_dims));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_LE(predict_width, 2,
platform::errors::InvalidArgument(
"Only support binary classification,"
"prediction dims[1] should be 1 or 2"));
}
auto predict_height = ctx->GetInputDim("Predict")[0];
auto label_height = ctx->GetInputDim("Label")[0];

if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(predict_height, label_height,
platform::errors::InvalidArgument(
"Out and Label should have same height."));
}

int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1;
int slide_steps = ctx->Attrs().Get<int>("slide_steps");

PADDLE_ENFORCE_GE(
num_pred_buckets, 1,
platform::errors::InvalidArgument("num_thresholds must larger than 1"));
PADDLE_ENFORCE_GE(slide_steps, 0,
platform::errors::InvalidArgument(
"slide_steps must be natural number"));

ctx->SetOutputDim("AUC", {1});

if (slide_steps) {
ctx->SetOutputDim("StatPosOut",
{(1 + slide_steps) * num_pred_buckets + 1});
ctx->SetOutputDim("StatNegOut",
{(1 + slide_steps) * num_pred_buckets + 1});
} else {
ctx->SetOutputDim("StatPosOut", {1, num_pred_buckets});
ctx->SetOutputDim("StatNegOut", {1, num_pred_buckets});
}
}

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
Expand Down Expand Up @@ -145,4 +84,7 @@ There are two types of possible curves:
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(auc, ops::AucOp, ops::AucOpMaker);
DECLARE_INFER_SHAPE_FUNCTOR(auc, AucInferShapeFunctor,
PD_INFER_META(phi::AucInferMeta));
REGISTER_OP_WITHOUT_GRADIENT(auc, ops::AucOp, ops::AucOpMaker,
AucInferShapeFunctor);
50 changes: 9 additions & 41 deletions paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@ limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/binary.h"

namespace paddle {
namespace operators {
Expand All @@ -26,46 +29,6 @@ const int kIgnoreIndex = -100;
class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X",
"SigmoidCrossEntropyWithLogitsOp");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"SigmoidCrossEntropyWithLogitsOp");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"SigmoidCrossEntropyWithLogitsOp");

auto x_dims = ctx->GetInputDim("X");
auto labels_dims = ctx->GetInputDim("Label");

int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank, labels_dims.size(),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d].",
rank, labels_dims.size()));

bool check = true;
if ((!ctx->IsRuntime()) &&
(phi::product(x_dims) <= 0 || phi::product(labels_dims) <= 0)) {
check = false;
}

if (check) {
PADDLE_ENFORCE_EQ(
phi::slice_ddim(x_dims, 0, rank),
phi::slice_ddim(labels_dims, 0, rank),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same shape "
"except the last dimension. But received: the shape of "
"Input(X) is [%s], the shape of Input(Label) is [%s].",
x_dims, labels_dims));
}

ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
}
};

class SigmoidCrossEntropyWithLogitsGradOp
Expand Down Expand Up @@ -201,12 +164,17 @@ DECLARE_INPLACE_OP_INFERER(SigmoidCrossEntropyWithLogitsGradInplaceInferer,
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(
sigmoid_cross_entropy_with_logits,
SigmoidCrossEntropyWithLogitsInferShapeFunctor,
PD_INFER_META(phi::SigmoidCrossEntropyWithLogitsInferMeta));
REGISTER_OPERATOR(
sigmoid_cross_entropy_with_logits, ops::SigmoidCrossEntropyWithLogitsOp,
ops::SigmoidCrossEntropyWithLogitsOpMaker,
ops::SigmoidCrossEntropyWithLogitsGradOpMaker<paddle::framework::OpDesc>,
ops::SigmoidCrossEntropyWithLogitsGradOpMaker<paddle::imperative::OpBase>,
ops::SigmoidCrossEntropyWithLogitsInplaceInferer);
ops::SigmoidCrossEntropyWithLogitsInplaceInferer,
SigmoidCrossEntropyWithLogitsInferShapeFunctor);
REGISTER_OPERATOR(sigmoid_cross_entropy_with_logits_grad,
ops::SigmoidCrossEntropyWithLogitsGradOp,
ops::SigmoidCrossEntropyWithLogitsGradInplaceInferer);
84 changes: 84 additions & 0 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,90 @@ void BCELossInferMeta(const MetaTensor& input,
out->share_lod(input);
}

void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
const MetaTensor& label,
bool normalize,
int ignore_index,
MetaTensor* out,
MetaConfig config) {
auto x_dims = x.dims();
auto labels_dims = label.dims();

int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank,
labels_dims.size(),
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d].",
rank,
labels_dims.size()));

bool check = true;
if ((!config.is_runtime) &&
(phi::product(x_dims) <= 0 || phi::product(labels_dims) <= 0)) {
check = false;
}

if (check) {
PADDLE_ENFORCE_EQ(
phi::slice_ddim(x_dims, 0, rank),
phi::slice_ddim(labels_dims, 0, rank),
phi::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same shape "
"except the last dimension. But received: the shape of "
"Input(X) is [%s], the shape of Input(Label) is [%s].",
x_dims,
labels_dims));
}

out->set_dims(x_dims);
out->set_dtype(x.dtype());
out->share_lod(x);
}

void LogLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float epsilon,
MetaTensor* out,
MetaConfig config) {
auto pred_dims = input.dims();
auto label_dims = label.dims();

if (config.is_runtime ||
(phi::product(pred_dims) > 0 && phi::product(label_dims) > 0)) {
PADDLE_ENFORCE_EQ(
pred_dims,
label_dims,
phi::errors::InvalidArgument(
"The dimensions of Input(Predicted) must be equal to the"
"dimensions of Input(Labels), but received dimensions of "
"Input(Predicted)"
"is [%s], received dimensions of Input(Labels) is [%s].",
pred_dims,
label_dims));
}
PADDLE_ENFORCE_EQ(pred_dims.size(),
2,
phi::errors::InvalidArgument(
"The dimensions of Input(Predicted) must be 2,"
"But received dimensions of Input(Predicted)"
"is [%d]",
pred_dims.size()));
if (config.is_runtime) {
PADDLE_ENFORCE_EQ(pred_dims[1],
1,
phi::errors::InvalidArgument(
"Each row of Input(Predicted) contains a real value, "
"so the 2nd dimension of Input(X) must be 1,"
"But got [%d]",
pred_dims[1]));
}
out->set_dims({pred_dims[0], 1});
out->set_dtype(input.dtype());
out->share_lod(input);
}

void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
Expand Down
13 changes: 13 additions & 0 deletions paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,19 @@ void BCELossInferMeta(const MetaTensor& input,
MetaTensor* out,
MetaConfig config = MetaConfig());

void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
const MetaTensor& label,
bool normalize,
int ignore_index,
MetaTensor* out,
MetaConfig config = MetaConfig());

void LogLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float epsilon,
MetaTensor* out,
MetaConfig config = MetaConfig());

void DistInferMeta(const MetaTensor& x,
const MetaTensor& y,
float p,
Expand Down
Loading