From 8db23c1739e3287c37e41ec577fc70f75894f1e8 Mon Sep 17 00:00:00 2001 From: Xiaoxu Chen Date: Thu, 8 Sep 2022 11:45:11 +0000 Subject: [PATCH] add ge_p abs_p primitive oparators --- .../fluid/operators/prim_ops/CMakeLists.txt | 4 +- paddle/fluid/operators/prim_ops/abs_p_op.cc | 71 +++++++++++ paddle/fluid/operators/prim_ops/ge_p_op.cc | 119 ++++++++++++++++++ paddle/fluid/operators/prim_ops/gt_p_op.cc | 119 ++++++++++++++++++ paddle/fluid/operators/prim_ops/ne_p_op.cc | 119 ++++++++++++++++++ .../fluid/operators/prim_ops/prim_op_test.cc | 8 +- .../auto_parallel/operators/__init__.py | 2 +- ...{dist_reduce_p.py => dist_reduce_sum_p.py} | 15 +-- .../auto_parallel/test_prim_dist_op.py | 2 +- .../autograd/test_jvp_and_transpose.py | 73 ++++++++++- .../unittests/autograd/test_orig2prim.py | 87 ++++++++++++- .../unittests/autograd/test_prim2orig.py | 43 ++++++- .../tests/unittests/autograd/test_primapi.py | 4 + .../tests/unittests/autograd/test_primops.py | 6 +- .../unittests/autograd/test_transform.py | 10 +- python/paddle/incubate/autograd/primops.py | 13 +- python/paddle/incubate/autograd/primrules.py | 45 ++++++- python/paddle/incubate/autograd/primx.py | 19 +-- 18 files changed, 716 insertions(+), 43 deletions(-) create mode 100644 paddle/fluid/operators/prim_ops/abs_p_op.cc create mode 100644 paddle/fluid/operators/prim_ops/ge_p_op.cc create mode 100644 paddle/fluid/operators/prim_ops/gt_p_op.cc create mode 100644 paddle/fluid/operators/prim_ops/ne_p_op.cc rename python/paddle/distributed/auto_parallel/operators/{dist_reduce_p.py => dist_reduce_sum_p.py} (92%) diff --git a/paddle/fluid/operators/prim_ops/CMakeLists.txt b/paddle/fluid/operators/prim_ops/CMakeLists.txt index 4cc4639ef63fb9..9d24cf89af494a 100644 --- a/paddle/fluid/operators/prim_ops/CMakeLists.txt +++ b/paddle/fluid/operators/prim_ops/CMakeLists.txt @@ -31,10 +31,12 @@ set(PRIM_OP_SRCS select_p_op.cc eq_p_op.cc gt_p_op.cc + ge_p_op.cc ne_p_op.cc pow_p_op.cc max_p_op.cc - erf_p_op.cc) + erf_p_op.cc + abs_p_op.cc) cc_test( prim_op_test diff --git a/paddle/fluid/operators/prim_ops/abs_p_op.cc b/paddle/fluid/operators/prim_ops/abs_p_op.cc new file mode 100644 index 00000000000000..8ad9d131689e70 --- /dev/null +++ b/paddle/fluid/operators/prim_ops/abs_p_op.cc @@ -0,0 +1,71 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace operators { +class AbsPrimOp : public framework::OperatorBase { + public: + AbsPrimOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + PADDLE_THROW(platform::errors::Unimplemented( + "Prim operator abs_p should not be excuted directly")); + } +}; + +class AbsPrimOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of abs_p op."); + AddOutput("Y", "(Tensor), The output tensor of abs_p op."); + AddComment(R"DOC(Autograd primitive abs_p operator.)DOC"); + } +}; + +class AbsPrimOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; + framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_var->GetShape()); + } +}; + +class AbsPrimOpVarTypeInference + : public framework::StaticGraphVarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto x_name = Input(ctx, "X")[0]; + auto y_name = Output(ctx, "Y")[0]; + SetType(ctx, y_name, GetType(ctx, x_name)); + SetDataType(ctx, y_name, GetDataType(ctx, x_name)); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(abs_p, + paddle::operators::AbsPrimOp, + paddle::operators::AbsPrimOpMaker, + paddle::operators::AbsPrimOpShapeInference, + paddle::operators::AbsPrimOpVarTypeInference); diff --git a/paddle/fluid/operators/prim_ops/ge_p_op.cc b/paddle/fluid/operators/prim_ops/ge_p_op.cc new file mode 100644 index 00000000000000..33fbd4cd71497f --- /dev/null +++ b/paddle/fluid/operators/prim_ops/ge_p_op.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace operators { +class GePrimOp : public framework::OperatorBase { + public: + GePrimOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + PADDLE_THROW(platform::errors::Unimplemented( + "Prim operator ge_p should not be excuted directly")); + } +}; + +class GePrimOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of ge_p op."); + AddInput("Y", "(Tensor), The input tensor of ge_p op."); + AddOutput("Z", "(Tensor), The output tensor of ge_p op."); + AddComment(R"DOC( +Autograd primitive ge_p operator. +)DOC"); + } +}; + +class GePrimOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; + framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; + framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; + + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); + auto x_shape = x_var->GetShape(); + auto y_shape = y_var->GetShape(); + size_t x_rank = x_shape.size(); + size_t y_rank = y_shape.size(); + PADDLE_ENFORCE_EQ(x_rank, + y_rank, + platform::errors::InvalidArgument( + "The dimensions of two input tensor should be same, " + "but get %d and %d", + x_rank, + y_rank)); + for (size_t i = 0; i < x_rank; ++i) { + PADDLE_ENFORCE_EQ( + x_shape[i], + y_shape[i], + platform::errors::InvalidArgument( + "The shape of two input tensor at dimension %d should be same, " + "but get %d and %d", + i, + x_shape[i], + y_shape[i])); + } + + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + } +}; + +class GePrimOpVarTypeInference : public framework::StaticGraphVarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto x_name = Input(ctx, "X")[0]; + auto y_name = Input(ctx, "Y")[0]; + auto z_name = Output(ctx, "Z")[0]; + auto x_type = GetType(ctx, x_name); + auto y_type = GetType(ctx, y_name); + auto x_dtype = GetDataType(ctx, x_name); + auto y_dtype = GetDataType(ctx, y_name); + PADDLE_ENFORCE_EQ(x_type, + y_type, + platform::errors::InvalidArgument( + "The type of two input tensor should be same, " + "but get %d and %d", + x_type, + y_type)); + PADDLE_ENFORCE_EQ(x_dtype, + y_dtype, + platform::errors::InvalidArgument( + "The datatype of two input tensor should be same, " + "but get %d and %d", + x_dtype, + y_dtype)); + + SetType(ctx, z_name, x_type); + SetDataType(ctx, z_name, framework::proto::VarType::BOOL); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(ge_p, + paddle::operators::GePrimOp, + paddle::operators::GePrimOpMaker, + paddle::operators::GePrimOpShapeInference, + paddle::operators::GePrimOpVarTypeInference); diff --git a/paddle/fluid/operators/prim_ops/gt_p_op.cc b/paddle/fluid/operators/prim_ops/gt_p_op.cc new file mode 100644 index 00000000000000..baacab62d8c3eb --- /dev/null +++ b/paddle/fluid/operators/prim_ops/gt_p_op.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace operators { +class GtPrimOp : public framework::OperatorBase { + public: + GtPrimOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + PADDLE_THROW(platform::errors::Unimplemented( + "Prim operator gt_p should not be excuted directly")); + } +}; + +class GtPrimOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of gt_p op."); + AddInput("Y", "(Tensor), The input tensor of gt_p op."); + AddOutput("Z", "(Tensor), The output tensor of gt_p op."); + AddComment(R"DOC( +Autograd primitive gt_p operator. +)DOC"); + } +}; + +class GtPrimOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; + framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; + framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; + + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); + auto x_shape = x_var->GetShape(); + auto y_shape = y_var->GetShape(); + size_t x_rank = x_shape.size(); + size_t y_rank = y_shape.size(); + PADDLE_ENFORCE_EQ(x_rank, + y_rank, + platform::errors::InvalidArgument( + "The dimensions of two input tensor should be same, " + "but get %d and %d", + x_rank, + y_rank)); + for (size_t i = 0; i < x_rank; ++i) { + PADDLE_ENFORCE_EQ( + x_shape[i], + y_shape[i], + platform::errors::InvalidArgument( + "The shape of two input tensor at dimension %d should be same, " + "but get %d and %d", + i, + x_shape[i], + y_shape[i])); + } + + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + } +}; + +class GtPrimOpVarTypeInference : public framework::StaticGraphVarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto x_name = Input(ctx, "X")[0]; + auto y_name = Input(ctx, "Y")[0]; + auto z_name = Output(ctx, "Z")[0]; + auto x_type = GetType(ctx, x_name); + auto y_type = GetType(ctx, y_name); + auto x_dtype = GetDataType(ctx, x_name); + auto y_dtype = GetDataType(ctx, y_name); + PADDLE_ENFORCE_EQ(x_type, + y_type, + platform::errors::InvalidArgument( + "The type of two input tensor should be same, " + "but get %d and %d", + x_type, + y_type)); + PADDLE_ENFORCE_EQ(x_dtype, + y_dtype, + platform::errors::InvalidArgument( + "The datatype of two input tensor should be same, " + "but get %d and %d", + x_dtype, + y_dtype)); + + SetType(ctx, z_name, x_type); + SetDataType(ctx, z_name, framework::proto::VarType::BOOL); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(gt_p, + paddle::operators::GtPrimOp, + paddle::operators::GtPrimOpMaker, + paddle::operators::GtPrimOpShapeInference, + paddle::operators::GtPrimOpVarTypeInference); diff --git a/paddle/fluid/operators/prim_ops/ne_p_op.cc b/paddle/fluid/operators/prim_ops/ne_p_op.cc new file mode 100644 index 00000000000000..fac503309de1b7 --- /dev/null +++ b/paddle/fluid/operators/prim_ops/ne_p_op.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace operators { +class NePrimOp : public framework::OperatorBase { + public: + NePrimOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + PADDLE_THROW(platform::errors::Unimplemented( + "Prim operator ne_p should not be excuted directly")); + } +}; + +class NePrimOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of ne_p op."); + AddInput("Y", "(Tensor), The input tensor of ne_p op."); + AddOutput("Z", "(Tensor), The output tensor of ne_p op."); + AddComment(R"DOC( +Autograd primitive ne_p operator. +)DOC"); + } +}; + +class NePrimOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; + framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; + framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; + + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); + auto x_shape = x_var->GetShape(); + auto y_shape = y_var->GetShape(); + size_t x_rank = x_shape.size(); + size_t y_rank = y_shape.size(); + PADDLE_ENFORCE_EQ(x_rank, + y_rank, + platform::errors::InvalidArgument( + "The dimensions of two input tensor should be same, " + "but get %d and %d", + x_rank, + y_rank)); + for (size_t i = 0; i < x_rank; ++i) { + PADDLE_ENFORCE_EQ( + x_shape[i], + y_shape[i], + platform::errors::InvalidArgument( + "The shape of two input tensor at dimension %d should be same, " + "but get %d and %d", + i, + x_shape[i], + y_shape[i])); + } + + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + } +}; + +class NePrimOpVarTypeInference : public framework::StaticGraphVarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto x_name = Input(ctx, "X")[0]; + auto y_name = Input(ctx, "Y")[0]; + auto z_name = Output(ctx, "Z")[0]; + auto x_type = GetType(ctx, x_name); + auto y_type = GetType(ctx, y_name); + auto x_dtype = GetDataType(ctx, x_name); + auto y_dtype = GetDataType(ctx, y_name); + PADDLE_ENFORCE_EQ(x_type, + y_type, + platform::errors::InvalidArgument( + "The type of two input tensor should be same, " + "but get %d and %d", + x_type, + y_type)); + PADDLE_ENFORCE_EQ(x_dtype, + y_dtype, + platform::errors::InvalidArgument( + "The datatype of two input tensor should be same, " + "but get %d and %d", + x_dtype, + y_dtype)); + + SetType(ctx, z_name, x_type); + SetDataType(ctx, z_name, framework::proto::VarType::BOOL); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(ne_p, + paddle::operators::NePrimOp, + paddle::operators::NePrimOpMaker, + paddle::operators::NePrimOpShapeInference, + paddle::operators::NePrimOpVarTypeInference); diff --git a/paddle/fluid/operators/prim_ops/prim_op_test.cc b/paddle/fluid/operators/prim_ops/prim_op_test.cc index 44872f9060bfe3..153a4575463bc8 100644 --- a/paddle/fluid/operators/prim_ops/prim_op_test.cc +++ b/paddle/fluid/operators/prim_ops/prim_op_test.cc @@ -18,7 +18,7 @@ USE_OP_ITSELF(reshape_p); USE_OP_ITSELF(broadcast_p); -USE_OP_ITSELF(reduce_p); +USE_OP_ITSELF(reduce_sum_p); USE_OP_ITSELF(transpose_p); USE_OP_ITSELF(split_p); USE_OP_ITSELF(concat_p); @@ -130,7 +130,7 @@ TEST(PrimOp, broadcast_p) { ASSERT_EQ(shapes[2], 5L); } -TEST(PrimOp, reduce_p) { +TEST(PrimOp, reduce_sum_p) { ProgramDesc program; auto *block = program.MutableBlock(0); std::vector shape{3, 4, 5}; @@ -141,7 +141,7 @@ TEST(PrimOp, reduce_p) { NewVar(block, x0, shape); AppendOp(block, - "reduce_p", + "reduce_sum_p", {{"X", {x0}}}, {{"Y", {x1}}}, {{"axis", std::vector{0, 2}}, {"keepdim", false}}); @@ -151,7 +151,7 @@ TEST(PrimOp, reduce_p) { ASSERT_EQ(shapes.size(), 1UL); ASSERT_EQ(shapes[0], 4L); AppendOp(block, - "reduce_p", + "reduce_sum_p", {{"X", {x0}}}, {{"Y", {x2}}}, {{"axis", std::vector{0, 2}}, {"keepdim", true}}); diff --git a/python/paddle/distributed/auto_parallel/operators/__init__.py b/python/paddle/distributed/auto_parallel/operators/__init__.py index 295e3557df27d3..02b5138be21467 100644 --- a/python/paddle/distributed/auto_parallel/operators/__init__.py +++ b/python/paddle/distributed/auto_parallel/operators/__init__.py @@ -32,4 +32,4 @@ from . import dist_slice from . import dist_fused_feedforward from . import dist_fused_attention -from . import dist_reduce_p +from . import dist_reduce_sum_p diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reduce_p.py b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py similarity index 92% rename from python/paddle/distributed/auto_parallel/operators/dist_reduce_p.py rename to python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py index bdd105ef64c303..6b53b2eed7ad00 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_reduce_p.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py @@ -33,21 +33,21 @@ from ..utils import _get_comm_group, _get_corresponding_rank -class DistributedReducePrimtive(DistributedOperatorImplContainer): +class DistributedReduceSumPrimtive(DistributedOperatorImplContainer): def __init__(self, op_type): - super(DistributedReducePrimtive, self).__init__(op_type) + super(DistributedReduceSumPrimtive, self).__init__(op_type) register_distributed_operator_impl_container( - DistributedReducePrimtive("reduce_p")) + DistributedReduceSumPrimtive("reduce_sum_p")) -# Batch Dimension Reduce Primitive -class DistributedReducePrimtiveImpl0(DistributedOperatorImpl): +# Batch Dimension ReduceSum Primitive +class DistributedReduceSumPrimtiveImpl0(DistributedOperatorImpl): def __init__(self, name): - super(DistributedReducePrimtiveImpl0, self).__init__(name) + super(DistributedReduceSumPrimtiveImpl0, self).__init__(name) self._forward_implemented = True self._backward_implemented = True @@ -149,4 +149,5 @@ def backward(ctx, *args, **kwargs): register_distributed_operator_impl( - "reduce_p", DistributedReducePrimtiveImpl0("batch_dimension_reduce_p")) + "reduce_sum_p", + DistributedReduceSumPrimtiveImpl0("batch_dimension_reduce_sum_p")) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py index 67894f6dd93df9..69f92012c17efb 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_prim_dist_op.py @@ -78,7 +78,7 @@ def init_prog(self): outputs={'Z': self.w_grad}, attrs=self.attrs) - op = self.layer_help.append_op(type="reduce_p", + op = self.layer_help.append_op(type="reduce_sum_p", inputs={'X': self.tmp2}, outputs={'Y': self.batch_reduced}, attrs={"axis": [0]}) diff --git a/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py b/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py index 6cac9cd5dcd38a..76698a7a8b5fd0 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_jvp_and_transpose.py @@ -400,6 +400,39 @@ def init_data(self): ] +class TestAbsPJVPAndTranspose(TestAddPJVPAndTranspose): + + def init_data(self): + # Set prim op + self.op_type = 'abs_p' + X = paddle.static.data(name='X', shape=[5, 6], dtype='int64') + self.prim_input = { + 'X': X, + } + self.prim_output = { + 'Y': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.prim_attrs = {} + + # Set JVP + X_DOT = paddle.static.data(name='X_DOT', shape=[5, 6], dtype='int64') + self.jvp_args = (X_DOT, ) + self.jvp_out_shape_map = {0: self.prim_output['Y']} + + self.all_ops = [ + # prim op: + 'abs_p', + # jvp op: + 'select_p', + 'ge_p', + 'fill_constant_p', + 'fill_constant_p', + 'sub_p', + # transpose op: + ] + + class TestLogPJVPAndTranspose(TestAddPJVPAndTranspose): def init_data(self): @@ -503,7 +536,7 @@ def init_data(self): # jvp op: 'broadcast_p', # transpose op: - 'reduce_p', + 'reduce_sum_p', 'reshape_p' ] @@ -650,11 +683,11 @@ def init_data(self): ] -class TestReducePJVPAndTranspose(TestAddPJVPAndTranspose): +class TestReduceSumPJVPAndTranspose(TestAddPJVPAndTranspose): def init_data(self): # Set prim op - self.op_type = 'reduce_p' + self.op_type = 'reduce_sum_p' X = paddle.static.data(name='X', shape=[2, 3, 4, 5], dtype='float64') self.prim_input = {'X': X} self.prim_output = { @@ -682,9 +715,9 @@ def init_data(self): self.all_ops = [ # prim op: - 'reduce_p', + 'reduce_sum_p', # jvp op: - 'reduce_p', + 'reduce_sum_p', # transpose op: 'reshape_p', 'broadcast_p', @@ -1008,6 +1041,36 @@ def init_data(self): ] +class TestGePJVPAndTranspose(TestAddPJVPAndTranspose): + + def init_data(self): + # Set prim op + self.op_type = 'ge_p' + X = paddle.static.data(name='X', shape=[4, 5], dtype='float64') + Y = paddle.static.data(name='Y', shape=[4, 5], dtype='float64') + + self.prim_input = {'X': X, 'Y': Y} + self.prim_output = { + 'Z': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.prim_attrs = {} + + # Set JVP + X_DOT = paddle.static.data(name='X_DOT', shape=[4, 5], dtype='float64') + Y_DOT = paddle.static.data(name='Y_DOT', shape=[4, 5], dtype='float64') + self.jvp_args = (X_DOT, Y_DOT) + self.jvp_out_shape_map = {0: self.prim_output['Z']} + + self.all_ops = [ + # prim op: + 'ge_p', + # jvp op: + 'fill_constant_p', + # transpose op: + ] + + class TestNePJVPAndTranspose(TestAddPJVPAndTranspose): def init_data(self): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py b/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py index 056411f0e0d87b..3cb78b040430a1 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py @@ -228,6 +228,26 @@ def init_data(self): self.out_map = {0: self.output['Out']} +class TestAbsOrig2Prim(TestElementWiseAddOrig2Prim): + + def init_data(self): + self.op_type = 'abs' + X = paddle.static.data(name='X', shape=[3, 4], dtype='float') + + self.input = { + 'X': X, + } + self.output = { + 'Out': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.attrs = {} + + self.orig2prim_args = (X, ) + self.all_ops = ['abs', 'abs_p'] + self.out_map = {0: self.output['Out']} + + class TestLogOrig2Prim(TestElementWiseAddOrig2Prim): def init_data(self): @@ -381,7 +401,9 @@ def init_data(self): } self.orig2prim_args = (X, ) - self.all_ops = ['p_norm', 'reshape_p', 'sqrt_p', 'reduce_p', 'mul_p'] + self.all_ops = [ + 'p_norm', 'reshape_p', 'sqrt_p', 'reduce_sum_p', 'mul_p' + ] self.out_map = {0: self.output['Out']} @@ -404,7 +426,9 @@ def init_data(self): } self.orig2prim_args = (X, ) - self.all_ops = ['p_norm', 'reshape_p', 'sqrt_p', 'reduce_p', 'mul_p'] + self.all_ops = [ + 'p_norm', 'reshape_p', 'sqrt_p', 'reduce_sum_p', 'mul_p' + ] self.out_map = {0: self.output['Out']} @@ -577,6 +601,25 @@ def init_data(self): self.out_map = {0: self.output['Out']} +class TestGeOrig2Prim(TestElementWiseAddOrig2Prim): + + def init_data(self): + self.op_type = 'greater_equal' + X = paddle.static.data(name='X', shape=[5, 8], dtype='float') + Y = paddle.static.data(name='Y', shape=[5, 8], dtype='float') + + self.input = {'X': X, 'Y': Y} + self.output = { + 'Out': + self.layer_help.create_variable_for_type_inference(dtype='bool') + } + self.attrs = {} + self.orig2prim_args = (X, Y) + self.all_ops = ['greater_equal', 'ge_p'] + # { prim_op_output_index: orig_op_output_var } + self.out_map = {0: self.output['Out']} + + class TestPowOrig2Prim(TestElementWiseAddOrig2Prim): def init_data(self): @@ -662,5 +705,45 @@ def init_data(self): self.out_map = {0: self.output['Out']} +class TestReduceSumOrig2Prim(TestElementWiseAddOrig2Prim): + + def init_data(self): + self.op_type = 'reduce_sum' + X = paddle.static.data(name='X', shape=[5, 8], dtype='float') + + self.input = {'X': X} + self.output = { + 'Out': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.attrs = {'axis': [0, 1], 'keep_dim': False} + + self.orig2prim_args = (X, ) + self.all_ops = ['reduce_sum', 'reduce_sum_p'] + # { prim_op_output_index: orig_op_output_var } + self.out_map = {0: self.output['Out']} + + +class TestReduceMeanOrig2Prim(TestElementWiseAddOrig2Prim): + + def init_data(self): + self.op_type = 'reduce_mean' + X = paddle.static.data(name='X', shape=[5, 8], dtype='float') + + self.input = {'X': X} + self.output = { + 'Out': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.attrs = {'axis': [0, 1], 'keep_dim': False} + + self.orig2prim_args = (X, ) + self.all_ops = [ + 'reduce_mean', 'reduce_sum_p', 'fill_constant_p', 'div_p' + ] + # { prim_op_output_index: orig_op_output_var } + self.out_map = {0: self.output['Out']} + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py b/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py index e830ccba53d426..c173cc4790dc26 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_prim2orig.py @@ -244,6 +244,26 @@ def init_data(self): self.out_map = {self.output['Y']: 0} +class TestAbsPPrim2Orig(TestAddPPrim2Orig): + + def init_data(self): + self.op_type = 'abs_p' + X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') + + self.input = { + 'X': X, + } + self.output = { + 'Y': + self.layer_help.create_variable_for_type_inference(dtype=X.dtype) + } + self.attrs = {} + + self.prim2orig_args = (X, ) + self.all_ops = ['abs_p', 'abs'] + self.out_map = {self.output['Y']: 0} + + class TestLogPPrim2Orig(TestAddPPrim2Orig): def init_data(self): @@ -375,7 +395,7 @@ def init_data(self): class TestReducePPrim2Orig(TestAddPPrim2Orig): def init_data(self): - self.op_type = 'reduce_p' + self.op_type = 'reduce_sum_p' X = paddle.static.data(name='X', shape=[3, 9, 5], dtype='float64') self.input = {'X': X} @@ -386,7 +406,7 @@ def init_data(self): self.attrs = {'axis': [1], 'keepdim': True} self.prim2orig_args = (X, ) - self.all_ops = ['reduce_p', 'reduce_sum'] + self.all_ops = ['reduce_sum_p', 'reduce_sum'] self.out_map = {self.output['Y']: 0} @@ -593,6 +613,25 @@ def init_data(self): self.out_map = {self.output['Z']: 0} +class TestGePPrim2Orig(TestAddPPrim2Orig): + + def init_data(self): + self.op_type = 'ge_p' + X = paddle.static.data(name='X', shape=[7, 8], dtype='float64') + Y = paddle.static.data(name='Y', shape=[7, 8], dtype='float64') + + self.input = {'X': X, 'Y': Y} + self.output = { + 'Z': + self.layer_help.create_variable_for_type_inference(dtype='bool') + } + self.attrs = {} + + self.prim2orig_args = (X, Y) + self.all_ops = ['ge_p', 'greater_equal'] + self.out_map = {self.output['Z']: 0} + + class TestPowPPrim2Orig(TestAddPPrim2Orig): def init_data(self): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_primapi.py b/python/paddle/fluid/tests/unittests/autograd/test_primapi.py index c35ff7fea0f150..d010e69e75950d 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_primapi.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_primapi.py @@ -150,6 +150,8 @@ def without_program_guard(): (np.random.rand(3, 3), np.random.rand(3, 3)), (np.random.rand(3, 3), np.random.rand(3, 3)), 'float64'), ('log', paddle.log, (np.random.rand(3, 4), ), None, 'float32'), + ('abs', paddle.abs, (np.random.uniform(-10, 10, + (10, 10)), ), None, 'float32'), )) # paddle.where, paddle.pow, paddle.maximum has no double grad definition, # can not compute forward grad use double trick @@ -296,6 +298,8 @@ def test_illegal_param(self): ('mean_with_axis_keepdim', lambda x: paddle.mean(x, axis=0, keepdim=True), (np.random.rand(200, 345), ), None, 'float32'), + ('abs', paddle.abs, (np.random.uniform(-10, 10, + (200, 345)), ), None, 'float32'), )) class TestGrad(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_primops.py b/python/paddle/fluid/tests/unittests/autograd/test_primops.py index 5a043fa8f69865..ba6f094e680084 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_primops.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_primops.py @@ -42,6 +42,7 @@ ('cos', primops.cos, randn(2, 3), {}, (2, 3), 'float64'), ('exp', primops.exp, randn(2, 3), {}, (2, 3), 'float64'), ('erf', primops.erf, randn(2, 3), {}, (2, 3), 'float64'), + ('abs', primops.abs, randn(2, 3), {}, (2, 3), 'float64'), ('log', primops.log, randn(2, 3), {}, (2, 3), 'float64'), ('reshape', primops.reshape, randn(2, 3), { 'shape': (3, 2) @@ -58,10 +59,10 @@ ('concat_axis1', primops.concat, ((randn(2, 3), randn(2, 3)), ), { 'axis': 1 }, (2, 6), 'float64'), - ('reduce_axis1', primops.reduce, randn(2, 3), { + ('reduce_axis1', primops.reduce_sum, randn(2, 3), { 'axis': (1, ) }, (2, ), 'float64'), - ('reduce_axis01', primops.reduce, randn(2, 3), { + ('reduce_axis01', primops.reduce_sum, randn(2, 3), { 'axis': (0, 1) }, (1, ), 'float64'), ('split', primops.split, randn(2, 3), { @@ -101,6 +102,7 @@ ('eq', primops.eq, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('ne', primops.ne, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('gt', primops.gt, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), + ('ge', primops.ge, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'bool'), ('pow', primops.pow, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), ('max', primops.max, (randn(2, 3), randn(2, 3)), {}, (2, 3), 'float64'), )) diff --git a/python/paddle/fluid/tests/unittests/autograd/test_transform.py b/python/paddle/fluid/tests/unittests/autograd/test_transform.py index f976ef729cc7a0..6c0aa697550bc3 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_transform.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_transform.py @@ -290,8 +290,8 @@ def init_data(self): 'index_select' ] self.orig2prim_ops = [ - 'broadcast_p', 'add_p', 'reshape_p', 'mul_p', 'reduce_p', 'sqrt_p', - 'broadcast_p', 'sub_p', 'concat_p', 'gather_p' + 'broadcast_p', 'add_p', 'reshape_p', 'mul_p', 'reduce_sum_p', + 'sqrt_p', 'broadcast_p', 'sub_p', 'concat_p', 'gather_p' ] self.linearize_ops = self.orig2prim_ops + [ # call fill_const() in linearize() function @@ -306,7 +306,7 @@ def init_data(self): 'mul_p', 'mul_p', 'add_p', - 'reduce_p', + 'reduce_sum_p', 'fill_constant_p', # 'sqrt_p', Will not append sqrt_p op when apply JVP for sqrt_p 'mul_p', 'div_p', @@ -326,7 +326,7 @@ def init_data(self): 'fill_constant_p', 'mul_p', # transposed op - 'reduce_p', + 'reduce_sum_p', 'reshape_p', 'reshape_p', 'mul_p', @@ -334,7 +334,7 @@ def init_data(self): 'reshape_p', 'broadcast_p', 'div_p', - 'reduce_p', + 'reduce_sum_p', 'reshape_p', 'fill_constant_p', 'sub_p', diff --git a/python/paddle/incubate/autograd/primops.py b/python/paddle/incubate/autograd/primops.py index 78684aa0dadafd..dde3fb492cd97c 100644 --- a/python/paddle/incubate/autograd/primops.py +++ b/python/paddle/incubate/autograd/primops.py @@ -137,6 +137,11 @@ def exp(x, out=None): return _simple_unop(LayerHelper('exp_p', **locals())) +@REGISTER_FN('abs_p', 'X', 'Y') +def abs(x, out=None): + return _simple_unop(LayerHelper('abs_p', **locals())) + + @REGISTER_FN('reshape_p', 'X', 'Y') def reshape(x, shape, out=None): return _manipulation_unop(LayerHelper('reshape_p', **locals())) @@ -202,7 +207,6 @@ def reduce_sum(x, axis=None, keepdim=False, out=None): if not isinstance(keepdim, bool): raise TypeError(f'keepdim must be bool, but got {type(keepdim)}') - attrs = {'axis': axis, 'keepdim': keepdim} attrs = {'axis': axis, 'keepdim': keepdim} helper = LayerHelper('reduce_sum_p', **locals()) if out is None: @@ -351,10 +355,15 @@ def eq(x, y, out=None): @REGISTER_FN('gt_p', 'X', 'Y', 'Z') -def ge(x, y, out=None): +def gt(x, y, out=None): return _simple_binop(LayerHelper('gt_p', **locals())) +@REGISTER_FN('ge_p', 'X', 'Y', 'Z') +def ge(x, y, out=None): + return _simple_binop(LayerHelper('ge_p', **locals())) + + @REGISTER_FN('ne_p', 'X', 'Y', 'Z') def ne(x, y, out=None): return _simple_binop(LayerHelper('ne_p', **locals())) diff --git a/python/paddle/incubate/autograd/primrules.py b/python/paddle/incubate/autograd/primrules.py index d37fd6f7c68c44..326f61088171a9 100644 --- a/python/paddle/incubate/autograd/primrules.py +++ b/python/paddle/incubate/autograd/primrules.py @@ -20,9 +20,9 @@ from . import primops from .primops import (add, broadcast, concat, cos, div, eq, erf, exp, - fill_const, gather, log, matmul, max, mul, neg, - reduce_sum, reshape, scatter_add, select, set_value, sin, - slice_assign, slice_select, split, sqrt, sub, tanh, + fill_const, gather, ge, gt, log, matmul, max, mul, ne, + neg, reduce_sum, reshape, scatter_add, select, set_value, + sin, slice_assign, slice_select, split, sqrt, sub, tanh, transpose) from .primreg import (REGISTER_JVP, REGISTER_ORIG2PRIM, REGISTER_PRIM2ORIG, REGISTER_TRANSPOSE, lookup_fn, lookup_jvp, @@ -180,6 +180,11 @@ def erf_orig2prim(op, x): return erf(x) +@REGISTER_ORIG2PRIM('abs') +def abs_orig2prim(op, x): + return primops.abs(x) + + @REGISTER_ORIG2PRIM('log') def log_orig2prim(op, x): return log(x) @@ -331,6 +336,13 @@ def gt_orig2prim(op, x, y): return gt(x, y) +@REGISTER_ORIG2PRIM('greater_equal') +def ge_orig2prim(op, x, y): + if x.shape != y.shape: + y = broadcast(y, shape=x.shape) + return ge(x, y) + + @REGISTER_ORIG2PRIM('elementwise_pow') def elementwise_pow_orig2prim(op, x, y): if x.shape != y.shape: @@ -441,6 +453,11 @@ def erf_prim2orig(op, x): return paddle.erf(x) +@REGISTER_PRIM2ORIG('abs_p') +def abs_prim2orig(op, x): + return paddle.abs(x) + + @REGISTER_PRIM2ORIG('log_p') def log_prim2orig(op, x): return paddle.log(x) @@ -542,6 +559,11 @@ def gt_prim2orig(op, x, y): return paddle.greater_than(x, y) +@REGISTER_PRIM2ORIG('ge_p') +def ge_prim2orig(op, x, y): + return paddle.greater_equal(x, y) + + @REGISTER_PRIM2ORIG('ne_p') def ne_prim2orig(op, x, y): return paddle.not_equal(x, y) @@ -662,6 +684,14 @@ def erf_jvp(op, x_dot): mul(x_dot, exp(neg(primops.pow(x, fill_const(2., x.shape, x.dtype)))))) +@REGISTER_JVP('abs_p') +def abs_jvp(op, x_dot): + if x_dot is None: + return None + x, = op_position_inputs(op) + return select(ge(x, fill_const(0., x.shape, x.dtype)), x_dot, neg(x_dot)) + + @REGISTER_JVP('log_p') def log_jvp(op, x_dot): if x_dot is None: @@ -820,6 +850,15 @@ def gt_jvp(op, x_dot, y_dot): return z_dot +@REGISTER_JVP('ge_p') +def ge_jvp(op, x_dot, y_dot): + if x_dot is None and y_dot is None: + return None + x, _ = op_position_inputs(op) + z_dot = fill_const(value=0., shape=x.shape, dtype=x.dtype) + return z_dot + + @REGISTER_JVP('ne_p') def ne_jvp(op, x_dot, y_dot): if x_dot is None and y_dot is None: diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index 19f87dd9292154..565fcb0b4ed836 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -12,18 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict + import paddle -from paddle.fluid import framework as framework -from paddle.fluid.framework import default_main_program -from paddle.fluid.framework import Operator from paddle import compat as cpt -from .primops import fill_const, add -from .primreg import op_position_inputs, op_position_output, lookup_orig2prim, lookup_prim2orig -from .primrules import _orig2prim, _prim2orig, _jvp, _transpose -from .utils import get_input_var_list, get_output_var_list, flatten, flatten_and_remove_none -from collections import OrderedDict +from paddle.fluid import framework as framework +from paddle.fluid.framework import Operator, default_main_program from paddle.incubate.autograd.utils import as_tensors +from .primops import add, fill_const +from .primreg import (lookup_orig2prim, lookup_prim2orig, op_position_inputs, + op_position_output) +from .primrules import _jvp, _orig2prim, _prim2orig, _transpose +from .utils import (flatten, flatten_and_remove_none, get_input_var_list, + get_output_var_list) + def topo_path(xs, ys, block=None): """ Returns the list of ops on the path from `xs` to `ys` in topological