Skip to content

Commit

Permalink
[Zero-Dim] Support output 0D for to_tensor. (#52741)
Browse files Browse the repository at this point in the history
* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* fix doc erros, test=allcase
  • Loading branch information
zhengqiwen1997 authored Apr 24, 2023
1 parent 83c2e68 commit 81fb7df
Show file tree
Hide file tree
Showing 29 changed files with 169 additions and 129 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1321,7 +1321,7 @@ void BindImperative(py::module *m_ptr) {
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
x = paddle.to_tensor([1.0], stop_gradient=False)
detach_x = x.detach()
detach_x[:] = 10.0
print(x) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=False,
Expand Down
9 changes: 8 additions & 1 deletion python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
import math
from typing import List, Tuple, Union

import numpy as np

import paddle
from paddle import Tensor

Expand All @@ -38,7 +40,12 @@ def get(self, name):

@window_function_register.register()
def _cat(x: List[Tensor], data_type: str) -> Tensor:
l = [paddle.to_tensor(_, data_type) for _ in x]
l = []
for t in x:
if np.isscalar(t) and not isinstance(t, str):
l.append(paddle.to_tensor([t], data_type))
else:
l.append(paddle.to_tensor(t, data_type))
return paddle.concat(l)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def recv_meta(self, group):

def _send_dims_shape_dtype(self, tensor, group):
# send len(shape)
dims = paddle.to_tensor(len(tensor.shape))
dims = paddle.to_tensor([len(tensor.shape)])
dst_rank = _hcg._get_p2p_next_rank()

paddle.distributed.send(dims, dst=dst_rank, group=group)
Expand All @@ -127,11 +127,11 @@ def _send_dims_shape_dtype(self, tensor, group):
paddle.distributed.send(shape, dst=dst_rank, group=group)

# send dtype
dtype = paddle.to_tensor(paddle_2_number(tensor.dtype))
dtype = paddle.to_tensor([paddle_2_number(tensor.dtype)])
paddle.distributed.send(dtype, dst=dst_rank, group=group)

# send trainable
stop_grad = paddle.to_tensor(int(tensor.stop_gradient))
stop_grad = paddle.to_tensor([int(tensor.stop_gradient)])
paddle.distributed.send(stop_grad, dst=dst_rank, group=group)

def send_meta(self, tensor, group):
Expand All @@ -148,7 +148,7 @@ def send_meta(self, tensor, group):
# send tensor type
paddle.distributed.send(tensor_type, dst=dst_rank, group=group)

nums = paddle.to_tensor(len(tensor))
nums = paddle.to_tensor([len(tensor)])
paddle.distributed.send(nums, dst=dst_rank, group=group)

for d in tensor:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self):
self.step = 0

def forward(self, x):
return paddle.to_tensor(0.0, dtype='float32')
return paddle.to_tensor([0.0], dtype='float32')


def fake_sample_reader():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_cholesky_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_dygraph(self):
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([x_data])
out = paddle.cholesky(x, upper=False)


Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_deg2rad.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def setUp(self):
def test_dygraph(self):
paddle.disable_static()

x2 = paddle.to_tensor(180)
x2 = paddle.to_tensor([180])
result2 = paddle.deg2rad(x2)
np.testing.assert_allclose(np.pi, result2.numpy(), rtol=1e-05)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def test_ops_elementwise_mul(self):
def test_tensor_gradient(self):
paddle.__version__ = '2.1.0'

x = paddle.to_tensor(5.0, stop_gradient=False)
x = paddle.to_tensor([5.0], stop_gradient=False)
y = paddle.pow(x, 4.0)
y.backward()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_einsum_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def test_shape(self):
B = paddle.to_tensor(np.array([2.0, 3.0])).astype(paddle.bfloat16)
B = B.cuda()
C = paddle.einsum('i,i->', A, B)
D = paddle.to_tensor(8.0).astype(paddle.bfloat16)
D = paddle.to_tensor([8.0]).astype(paddle.bfloat16)
self.assertEqual(C.item(), D.item())


Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,11 @@ def inplace_api_processing(self, var):

class TestDygraphInplaceReshapeTensor(TestDygraphInplace):
def non_inplace_api_processing(self, var):
shape = paddle.to_tensor(-1)
shape = paddle.to_tensor([-1])
return paddle.reshape(var, shape)

def inplace_api_processing(self, var):
shape = paddle.to_tensor(-1)
shape = paddle.to_tensor([-1])
return paddle.reshape_(var, shape)


Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_jit_save_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def __init__(self, in_size, out_size):
super().__init__()
self._linear_0 = Linear(in_size, out_size)
self._linear_1 = Linear(in_size, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

@paddle.jit.to_static
def forward(self, x):
Expand Down Expand Up @@ -1196,7 +1196,7 @@ def __init__(self, in_size, out_size):
self._linear_1_0 = Linear(self.hidden, self.hidden)
self._linear_1_1 = Linear(self.hidden, self.hidden)
self._linear_2 = Linear(self.hidden, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

@paddle.jit.to_static
def forward(self, x):
Expand Down Expand Up @@ -1319,7 +1319,7 @@ def __init__(self, in_size, out_size, load_path):
self._linear_1_0 = Linear(out_size, in_size)
self._linear_1_1 = Linear(out_size, in_size)
self._linear_2 = Linear(out_size, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

# Load multiple times
self._load_l1 = paddle.jit.load(load_path)
Expand Down
66 changes: 33 additions & 33 deletions python/paddle/fluid/tests/unittests/test_lbfgs_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,64 +208,64 @@ def error_func():

def test_line_search(self):
def func1(x, alpha, d):
return paddle.to_tensor(x + alpha * d), paddle.to_tensor(0.0)
return paddle.to_tensor(x + alpha * d), paddle.to_tensor([0.0])

def func2(x, alpha, d):
return paddle.to_tensor(x + alpha * d), paddle.to_tensor(1.0)
return paddle.to_tensor(x + alpha * d), paddle.to_tensor([1.0])

def func3(x, alpha, d):
return paddle.to_tensor(x + alpha * d), paddle.to_tensor(-1.0)
return paddle.to_tensor(x + alpha * d), paddle.to_tensor([-1.0])

_strong_wolfe(
func1,
paddle.to_tensor(1.0),
paddle.to_tensor(0.001),
paddle.to_tensor(0.0),
paddle.to_tensor(1.0),
paddle.to_tensor(0.0),
paddle.to_tensor(0.0),
paddle.to_tensor([1.0]),
paddle.to_tensor([0.001]),
paddle.to_tensor([0.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([0.0]),
paddle.to_tensor([0.0]),
max_ls=0,
)

_strong_wolfe(
func2,
paddle.to_tensor(1.0),
paddle.to_tensor(-0.001),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor([1.0]),
paddle.to_tensor([-0.001]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
max_ls=1,
)

_strong_wolfe(
func3,
paddle.to_tensor(1.0),
paddle.to_tensor(-0.001),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor([1.0]),
paddle.to_tensor([-0.001]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
max_ls=1,
)

_cubic_interpolate(
paddle.to_tensor(2.0),
paddle.to_tensor(1.0),
paddle.to_tensor(0.0),
paddle.to_tensor(1.0),
paddle.to_tensor(2.0),
paddle.to_tensor(0.0),
paddle.to_tensor([2.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([0.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([2.0]),
paddle.to_tensor([0.0]),
[0.1, 0.5],
)

_cubic_interpolate(
paddle.to_tensor(2.0),
paddle.to_tensor(0.0),
paddle.to_tensor(-3.0),
paddle.to_tensor(1.0),
paddle.to_tensor(1.0),
paddle.to_tensor(-0.1),
paddle.to_tensor([2.0]),
paddle.to_tensor([0.0]),
paddle.to_tensor([-3.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([1.0]),
paddle.to_tensor([-0.1]),
[0.1, 0.5],
)

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_rad2deg.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def setUp(self):
def test_dygraph(self):
paddle.disable_static()

x2 = paddle.to_tensor(np.pi / 2)
x2 = paddle.to_tensor([np.pi / 2])
result2 = paddle.rad2deg(x2)
np.testing.assert_allclose(90, result2.numpy(), rtol=1e-05)

Expand All @@ -91,7 +91,7 @@ def setUp(self):
def test_dygraph(self):
paddle.disable_static()

x2 = paddle.to_tensor(1)
x2 = paddle.to_tensor([1])
result2 = paddle.rad2deg(x2)
np.testing.assert_allclose(180 / np.pi, result2.numpy(), rtol=1e-05)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class TestStateDictReturn(unittest.TestCase):
def test_missing_keys_and_unexpected_keys(self):
model1 = MyModel2()
tmp_dict = {}
tmp_dict["unexpected_keys"] = paddle.to_tensor(1)
tmp_dict["unexpected_keys"] = paddle.to_tensor([1])
missing_keys, unexpected_keys = model1.set_state_dict(tmp_dict)
self.assertEqual(len(missing_keys), 2)
self.assertEqual(missing_keys[0], "linear.weight")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def test_register_backward_hook(self):
global HOOK_INIT_VALUE
global HOOK_IS_CALLED
for device in self.devices:
x = paddle.to_tensor(5.0, stop_gradient=False)
x = paddle.to_tensor([5.0], stop_gradient=False)
x._register_backward_hook(global_void_hook)
for i in range(5):
y = paddle.pow(x, 4.0)
Expand All @@ -567,14 +567,14 @@ def test_register_backward_hook(self):
HOOK_IS_CALLED = False

def test_register_backward_hook_for_interior_var(self):
x = paddle.to_tensor(5.0, stop_gradient=False)
x = paddle.to_tensor([5.0], stop_gradient=False)
y = paddle.pow(x, 4.0)

with self.assertRaises(ValueError):
y._register_backward_hook(global_void_hook)

def test_register_backward_hook_for_var_without_gradient(self):
x = paddle.to_tensor(5.0)
x = paddle.to_tensor([5.0])
y = paddle.pow(x, 4.0)

with self.assertRaises(ValueError):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def check_with_place(place):
)
np.testing.assert_array_equal(x.numpy(), [1.0])
self.assertEqual(x.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(x.shape, [1])
self.assertEqual(x.shape, [])
self.assertEqual(x.stop_gradient, False)
self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR)

Expand Down Expand Up @@ -407,7 +407,7 @@ def test_leaf_tensor(self):

def test_detach(self):
with fluid.dygraph.guard():
x = paddle.to_tensor(1.0, dtype="float64", stop_gradient=False)
x = paddle.to_tensor([1.0], dtype="float64", stop_gradient=False)
detach_x = x.detach()
self.assertTrue(detach_x.stop_gradient, True)

Expand Down
27 changes: 27 additions & 0 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2385,6 +2385,20 @@ def body(i, x):
self.assertEqual(x.grad.shape, [])
np.testing.assert_allclose(x.grad, np.array(1.0))

def test_to_tensor(self):
out1 = paddle.to_tensor(1)
out2 = paddle.to_tensor(2.5)

out1.retain_grads()
out1.backward()
out2.retain_grads()
out2.backward()

self.assertEqual(out1.shape, [])
self.assertEqual(out1, 1)
self.assertEqual(out2.shape, [])
self.assertEqual(out2, 2.5)

def test_linalg_slogdet(self):
# 2-D input
x = paddle.randn([3, 3])
Expand Down Expand Up @@ -4355,6 +4369,19 @@ def test_broadcast_tensors(self):
self.assertEqual(out1.shape, (2, 3))
self.assertEqual(out2.shape, (2, 3))

@prog_scope()
def test_to_tensor(self):
out1 = paddle.to_tensor(1)
out2 = paddle.to_tensor(2.5)

prog = paddle.static.default_main_program()
res = self.exe.run(prog, fetch_list=[out1, out2])

self.assertEqual(res[0].shape, ())
self.assertEqual(res[0], 1)
self.assertEqual(res[1].shape, ())
self.assertEqual(res[1], 2.5)

@prog_scope()
def test_linalg_slogdet(self):
# 2-D input
Expand Down
Loading

0 comments on commit 81fb7df

Please sign in to comment.