diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index ecf8119ed2a191..1f1ea5c005db7f 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -141,8 +141,22 @@ void HandleLargeDim(const framework::ExecutionContext& context, // transpose to 2D tensor whose shape is {unreduced, reduced}. const int64_t unreduced = output->numel(); - const int64_t reduced = shuffled_input.numel() / unreduced; + const int64_t input_numel = shuffled_input.numel(); + // assume: 0 / 0 == 0, which allow process 0 dim tensor + const int64_t reduced = (unreduced != 0) ? (input_numel / unreduced) : 0; + + PADDLE_ENFORCE_EQ( + unreduced * reduced, + input_numel, + phi::errors::InvalidArgument( + "Reducing failed in HandleLargeDim, when try to transpose (%d) " + "operands into 2D tensor with shape (%d, %d).", + input_numel, + unreduced, + reduced)); + shuffled_input.Resize({unreduced, reduced}); + DDim output_dim = output->dims(); output->Resize({unreduced}); paddle::operators::ReduceFunctor( @@ -163,7 +177,20 @@ void HandleLargeDimGrad(const framework::ExecutionContext& context, Functor functor, const std::vector& dims) { const int64_t unreduced = out->numel(); - const int64_t reduced = x->numel() / unreduced; + const int64_t x_numel = x->numel(); + // assume: 0 / 0 == 0, which allow process 0 dim tensor + const int64_t reduced = (unreduced != 0) ? (x_numel / unreduced) : 0; + + PADDLE_ENFORCE_EQ( + unreduced * reduced, + x_numel, + phi::errors::InvalidArgument( + "Reducing failed in HandleLargeDimGrad, when try to transpose (%d) " + "operands into 2D tensor with shape (%d, %d).", + x_numel, + unreduced, + reduced)); + DDim out_dim(out->dims()); DDim x_dim(x->dims()); // transpose and reshape X diff --git a/paddle/phi/kernels/funcs/reduce_function.h b/paddle/phi/kernels/funcs/reduce_function.h index b48f2eb4cdf2b2..0b9b852a7585d0 100644 --- a/paddle/phi/kernels/funcs/reduce_function.h +++ b/paddle/phi/kernels/funcs/reduce_function.h @@ -1228,8 +1228,22 @@ void HandleLargeDim(const DeviceContext& dev_ctx, // transpose to 2D tensor whose shape is {unreduced, reduced}. const int64_t unreduced = output->numel(); - const int64_t reduced = shuffled_input.numel() / unreduced; + const int64_t input_numel = shuffled_input.numel(); + // assume: 0 / 0 == 0, which allow process 0 dim tensor + const int64_t reduced = (unreduced != 0) ? (input_numel / unreduced) : 0; + + PADDLE_ENFORCE_EQ( + unreduced * reduced, + input_numel, + phi::errors::InvalidArgument( + "Reducing failed in HandleLargeDim, when try to transpose (%d) " + "operands into 2D tensor with shape (%d, %d).", + input_numel, + unreduced, + reduced)); + shuffled_input.ResizeAndAllocate({unreduced, reduced}); + DDim output_dim = output->dims(); output->ResizeAndAllocate({unreduced}); ReduceFunctor( diff --git a/paddle/phi/kernels/funcs/reduce_grad_functions.h b/paddle/phi/kernels/funcs/reduce_grad_functions.h index 3ab7618adec48b..1b0f34b943d5af 100644 --- a/paddle/phi/kernels/funcs/reduce_grad_functions.h +++ b/paddle/phi/kernels/funcs/reduce_grad_functions.h @@ -87,7 +87,20 @@ void HandleLargeDimGrad(const Context& dev_ctx, Functor functor, const std::vector& dims) { const int64_t unreduced = out->numel(); - const int64_t reduced = x->numel() / unreduced; + const int64_t x_numel = x->numel(); + // assume: 0 / 0 == 0, which allow process 0 dim tensor + const int64_t reduced = (unreduced != 0) ? (x_numel / unreduced) : 0; + + PADDLE_ENFORCE_EQ( + unreduced * reduced, + x_numel, + phi::errors::InvalidArgument( + "Reducing failed in HandleLargeDimGrad, when try to transpose (%d) " + "operands into 2D tensor with shape (%d, %d).", + x_numel, + unreduced, + reduced)); + DDim out_dim(out->dims()); DDim x_dim(x->dims()); // transpose and reshape X diff --git a/paddle/phi/kernels/reduce_min_kernel.cc b/paddle/phi/kernels/reduce_min_kernel.cc index 660d3b753e97ea..c4c58c8342e600 100644 --- a/paddle/phi/kernels/reduce_min_kernel.cc +++ b/paddle/phi/kernels/reduce_min_kernel.cc @@ -26,6 +26,11 @@ void MinKernel(const Context& dev_ctx, bool keep_dim, DenseTensor* out) { bool reduce_all = recompute_reduce_all(x, dims); + PADDLE_ENFORCE_GT( + x.numel(), + 0, + errors::InvalidArgument("Zero-size tensor to reduction operation minimum " + "which has no identity.")); MinRawKernel(dev_ctx, x, dims, keep_dim, reduce_all, out); } diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py index c0e9803b140f01..70d41e11321693 100644 --- a/python/paddle/fluid/tests/unittests/test_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -20,6 +20,7 @@ import paddle import paddle.fluid.core as core +from paddle import fluid class ApiMinTest(unittest.TestCase): @@ -117,5 +118,18 @@ def init_data(self): self.keepdim = True +class TestMinAPIWithEmptyTensor(unittest.TestCase): + def test_empty_tensor(self): + with fluid.dygraph.guard(): + with self.assertRaises(ValueError): + data = np.array([], dtype=np.float32) + data = np.reshape(data, [0, 0, 0, 0, 0, 0, 0]) + x = paddle.to_tensor(data, dtype='float64') + np_axis = np.array([0], dtype='int64') + tensor_axis = paddle.to_tensor(np_axis, dtype='int64') + + out = paddle.min(x, tensor_axis) + + if __name__ == '__main__': unittest.main()