diff --git a/paddle/phi/kernels/impl/lu_kernel_impl.h b/paddle/phi/kernels/impl/lu_kernel_impl.h index ed3cc0801d9af5..1b65ac87e075b1 100644 --- a/paddle/phi/kernels/impl/lu_kernel_impl.h +++ b/paddle/phi/kernels/impl/lu_kernel_impl.h @@ -516,6 +516,14 @@ DenseTensor Transpose2DTo6D(const Context& dev_ctx, const DenseTensor& x) { auto x_dim = x.dims(); auto x_vec = phi::vectorize(x_dim); int rank = x_vec.size(); + + for (int i = 0; i < x_dim.size(); i++) { + PADDLE_ENFORCE_LT(0, + x_dim[i], + errors::InvalidArgument( + "The dims of Input(X) should be greater than 0.")); + } + std::swap(x_vec[rank - 1], x_vec[rank - 2]); std::vector out_shape = x_vec; std::vector axis(rank); diff --git a/python/paddle/fluid/tests/unittests/test_lu_op.py b/python/paddle/fluid/tests/unittests/test_lu_op.py index 790ebb36f6d7c2..3e083c76b71df5 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_op.py @@ -303,6 +303,20 @@ def run_lu_static(shape, dtype): run_lu_static(tensor_shape, dtype) +class TestLUAPIError(unittest.TestCase): + def test_errors(self): + with paddle.fluid.dygraph.guard(): + # The size of input in lu should not be 0. + def test_0_size(): + array = np.array([], dtype=np.float32) + x = paddle.to_tensor( + np.reshape(array, [0, 0, 0]), dtype='float32' + ) + paddle.linalg.lu(x, get_infos=True) + + self.assertRaises(ValueError, test_0_size) + + if __name__ == "__main__": paddle.enable_static() unittest.main()