From a81b6236da0cbe09e147ebdd3bc83a5f9ffcdd53 Mon Sep 17 00:00:00 2001 From: pidack Date: Fri, 3 Mar 2023 05:35:48 +0000 Subject: [PATCH 1/9] wisemax fp16 support --- .../unittests/test_elementwise_max_op.py | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index f6d1774068738..9a7f08e09d89c 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -57,6 +57,17 @@ def test_check_grad_ingore_y(self): ) +class TestElementwiseFp16Op(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) + sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16) + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -67,6 +78,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFp16Op_ZeroDim1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float16") + y = np.random.uniform(0.1, 1, []).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -77,6 +98,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFp16Op_ZeroDim2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + y = np.random.uniform(0.1, 1, []).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" From 6b660ddfc1dceabbf44e2b4539d24212f4053641 Mon Sep 17 00:00:00 2001 From: pidack Date: Fri, 3 Mar 2023 07:06:32 +0000 Subject: [PATCH 2/9] add bf16 support 4 elementwise_max --- .../unittests/test_elementwise_max_op.py | 257 +++++++++++++++++- 1 file changed, 254 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 9a7f08e09d89c..0c9f04e635cd8 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -57,7 +57,7 @@ def test_check_grad_ingore_y(self): ) -class TestElementwiseFp16Op(TestElementwiseOp): +class TestElementwiseFP16Op(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -78,7 +78,7 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFp16Op_ZeroDim1(TestElementwiseOp): +class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -98,7 +98,7 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFp16Op_ZeroDim2(TestElementwiseOp): +class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum @@ -118,6 +118,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float16") + y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + @unittest.skipIf( core.is_compiled_with_cuda() and ( @@ -162,6 +172,59 @@ def test_check_grad_ingore_y(self): self.check_grad(['X'], 'Out', no_grad_set=set('Y')) +class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float32") + y = np.random.uniform(0.1, 1, []).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseBF16MaxOp_ZeroDim2(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + y = np.random.uniform(0.1, 1, []).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseBF16MaxOp_ZeroDim3(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float32") + y = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") + y = np.array([0.5]).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + self.__class__.no_need_check_grad = True + + @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." ) @@ -175,6 +238,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16") + y = np.array([0.5]).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -186,6 +259,31 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random((100,)).astype("float16") + sgn = np.random.choice([-1, 1], (100,)).astype("float16") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random((100,)).astype("float32") + sgn = np.random.choice([-1, 1], (100,)).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -205,6 +303,44 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_0(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 0} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(100, 1, 1))) + } + + +class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -224,6 +360,44 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_1(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 100, 1))) + } + + +class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -242,6 +416,43 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_2(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 1, 100))) + } + + +class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) + } + + class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -261,6 +472,46 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_3(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32) + sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16( + np.maximum(x, y.reshape(1, 50, 2, 1)) + ) + } + + +class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16) + sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" From ade5cccae86e2684f1550cb04e0c7c4910f9dd28 Mon Sep 17 00:00:00 2001 From: pidack Date: Fri, 3 Mar 2023 07:33:00 +0000 Subject: [PATCH 3/9] append broadcast 4 op 4 fp16 / bf16 --- .../unittests/test_elementwise_max_op.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 0c9f04e635cd8..d5b5cf3e51201 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -524,5 +524,31 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16) + sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16) + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxBF16Op_broadcast_4(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) + sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + if __name__ == '__main__': unittest.main() From 00c18b506d09774879da43c8e5ecb5f4b1f32b80 Mon Sep 17 00:00:00 2001 From: pidack Date: Mon, 6 Mar 2023 07:28:49 +0000 Subject: [PATCH 4/9] fix elewise_max ut bf16 numeric delta --- .../unittests/test_elementwise_max_op.py | 316 +----------------- 1 file changed, 6 insertions(+), 310 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index d5b5cf3e51201..1e6d85c275902 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -57,17 +57,6 @@ def test_check_grad_ingore_y(self): ) -class TestElementwiseFP16Op(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) - sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16) - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -78,16 +67,6 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float16") - y = np.random.uniform(0.1, 1, []).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -98,16 +77,6 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") - y = np.random.uniform(0.1, 1, []).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -118,16 +87,6 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float16") - y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - @unittest.skipIf( core.is_compiled_with_cuda() and ( @@ -161,9 +120,13 @@ def test_check_output(self): def test_check_grad_normal(self): if hasattr(self, 'attrs'): - self.check_grad(['X', 'Y'], 'Out', check_eager=False) + self.check_grad( + ['X', 'Y'], 'Out', numeric_grad_delta=0.01, check_eager=False + ) else: - self.check_grad(['X', 'Y'], 'Out', check_eager=True) + self.check_grad( + ['X', 'Y'], 'Out', numeric_grad_delta=0.01, check_eager=True + ) def test_check_grad_ingore_x(self): self.check_grad(['Y'], 'Out', no_grad_set=set("X")) @@ -172,59 +135,6 @@ def test_check_grad_ingore_y(self): self.check_grad(['X'], 'Out', no_grad_set=set('Y')) -class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float32") - y = np.random.uniform(0.1, 1, []).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - -class TestElementwiseBF16MaxOp_ZeroDim2(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - y = np.random.uniform(0.1, 1, []).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - -class TestElementwiseBF16MaxOp_ZeroDim3(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float32") - y = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - -class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") - y = np.array([0.5]).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - self.__class__.no_need_check_grad = True - - @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." ) @@ -238,16 +148,6 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16") - y = np.array([0.5]).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - class TestElementwiseMaxOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -259,31 +159,6 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random((100,)).astype("float16") - sgn = np.random.choice([-1, 1], (100,)).astype("float16") - y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - -class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random((100,)).astype("float32") - sgn = np.random.choice([-1, 1], (100,)).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -303,44 +178,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_0(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 0} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(100, 1, 1))) - } - - -class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -360,44 +197,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_1(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 100, 1))) - } - - -class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -416,43 +215,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_2(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 1, 100))) - } - - -class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) - y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) - ) - } - - class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -472,46 +234,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_3(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16( - np.maximum(x, y.reshape(1, 50, 2, 1)) - ) - } - - -class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - np.float16 - ) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.maximum( - self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) - ) - } - - class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -524,31 +246,5 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16) - sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16) - y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16) - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - -class TestElementwiseMaxBF16Op_broadcast_4(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) - sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) - y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - if __name__ == '__main__': unittest.main() From 8d2427de03d1c69eaac105c5ef6cb098b00b9c17 Mon Sep 17 00:00:00 2001 From: pidack Date: Thu, 9 Mar 2023 07:46:21 +0000 Subject: [PATCH 5/9] append fp/bf16 uts --- .../unittests/test_elementwise_max_op.py | 330 +++++++++++++++++- 1 file changed, 324 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 1e6d85c275902..6abe7d276ccc9 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -48,15 +48,32 @@ def test_check_grad_normal(self): def test_check_grad_ingore_x(self): self.check_grad( - ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X") + ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X"), ) def test_check_grad_ingore_y(self): self.check_grad( - ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y') + ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y'), ) +class TestElementwiseFP16Op(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) + sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16) + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -67,6 +84,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float16") + y = np.random.uniform(0.1, 1, []).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -77,6 +104,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + y = np.random.uniform(0.1, 1, []).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -87,6 +124,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float16") + y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + @unittest.skipIf( core.is_compiled_with_cuda() and ( @@ -121,18 +168,75 @@ def test_check_output(self): def test_check_grad_normal(self): if hasattr(self, 'attrs'): self.check_grad( - ['X', 'Y'], 'Out', numeric_grad_delta=0.01, check_eager=False + ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False ) else: self.check_grad( - ['X', 'Y'], 'Out', numeric_grad_delta=0.01, check_eager=True + ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set("X")) + self.check_grad( + ['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X") + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y')) + self.check_grad( + ['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y') + ) + + +class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float32") + y = np.random.uniform(0.1, 1, []).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseBF16MaxOp_ZeroDim2(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + y = np.random.uniform(0.1, 1, []).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseBF16MaxOp_ZeroDim3(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.1, 1, []).astype("float32") + y = np.random.uniform(0.1, 1, [13, 17]).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + +class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") + y = np.array([0.5]).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + self.__class__.no_need_check_grad = True @skip_check_grad_ci( @@ -148,6 +252,16 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16") + y = np.array([0.5]).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + class TestElementwiseMaxOp_Vector(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -159,6 +273,31 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random((100,)).astype("float16") + sgn = np.random.choice([-1, 1], (100,)).astype("float16") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float16") + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.random((100,)).astype("float32") + sgn = np.random.choice([-1, 1], (100,)).astype("float32") + y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float32") + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -178,6 +317,44 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_0(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 0} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(100, 1, 1))) + } + + +class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -197,6 +374,44 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_1(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 100, 1))) + } + + +class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -215,6 +430,43 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_2(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 1, 100))) + } + + +class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16) + sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) + y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100) + ) + } + + class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -234,6 +486,46 @@ def setUp(self): } +class TestElementwiseMaxBF16Op_broadcast_3(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32) + sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + np.float32 + ) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = { + 'Out': convert_float_to_uint16( + np.maximum(x, y.reshape(1, 50, 2, 1)) + ) + } + + +class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16) + sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16) + y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( + np.float16 + ) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum( + self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1) + ) + } + + class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -246,5 +538,31 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} +class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16) + sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16) + self.inputs = {'X': x, 'Y': y} + self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + + +class TestElementwiseMaxBF16Op_broadcast_4(TestElementwiseBF16Op): + def setUp(self): + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) + sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) + y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) + self.attrs = {'axis': 1} + self.inputs = { + 'X': convert_float_to_uint16(x), + 'Y': convert_float_to_uint16(y), + } + self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + + if __name__ == '__main__': unittest.main() From c6c2829a634f7f02fcb5b12824c59258ddd3337a Mon Sep 17 00:00:00 2001 From: pidack Date: Thu, 9 Mar 2023 09:16:01 +0000 Subject: [PATCH 6/9] add fp/bf16 uts --- .../unittests/test_elementwise_max_op.py | 124 +----------------- 1 file changed, 3 insertions(+), 121 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 6abe7d276ccc9..e16c09b40c5f8 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -137,8 +137,9 @@ def setUp(self): @unittest.skipIf( core.is_compiled_with_cuda() and ( - core.cudnn_version() < 8100 - or paddle.device.cuda.get_device_capability()[0] < 8 + core.cudnn_version() + < 8100 + # or paddle.device.cuda.get_device_capability()[0] < 8 ), "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.", ) @@ -199,32 +200,6 @@ def setUp(self): self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} -class TestElementwiseBF16MaxOp_ZeroDim2(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - y = np.random.uniform(0.1, 1, []).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - -class TestElementwiseBF16MaxOp_ZeroDim3(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float32") - y = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): def setUp(self): self.op_type = "elementwise_max" @@ -317,25 +292,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_0(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 0} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(100, 1, 1))) - } - - class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -374,25 +330,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_1(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 100, 1))) - } - - class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -430,25 +367,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_2(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100,)).astype(np.float32) - y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16(np.maximum(x, y.reshape(1, 1, 100))) - } - - class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -486,27 +404,6 @@ def setUp(self): } -class TestElementwiseMaxBF16Op_broadcast_3(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) - y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( - np.float32 - ) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = { - 'Out': convert_float_to_uint16( - np.maximum(x, y.reshape(1, 50, 2, 1)) - ) - } - - class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" @@ -549,20 +446,5 @@ def setUp(self): self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} -class TestElementwiseMaxBF16Op_broadcast_4(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) - sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) - y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) - self.attrs = {'axis': 1} - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} - - if __name__ == '__main__': unittest.main() From 0d42ba0323f8ba867b2c639cdd657d5091962f37 Mon Sep 17 00:00:00 2001 From: pidack Date: Mon, 13 Mar 2023 03:14:49 +0000 Subject: [PATCH 7/9] change bf16 uts delta --- .../unittests/test_elementwise_max_op.py | 41 ++++++++++++------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index e16c09b40c5f8..3c512c41bc3e7 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -137,9 +137,8 @@ def setUp(self): @unittest.skipIf( core.is_compiled_with_cuda() and ( - core.cudnn_version() - < 8100 - # or paddle.device.cuda.get_device_capability()[0] < 8 + core.cudnn_version() < 8100 + or paddle.device.cuda.get_device_capability()[0] < 8 ), "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.", ) @@ -168,23 +167,15 @@ def test_check_output(self): def test_check_grad_normal(self): if hasattr(self, 'attrs'): - self.check_grad( - ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False - ) + self.check_grad(['X', 'Y'], 'Out', check_eager=False) else: - self.check_grad( - ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True - ) + self.check_grad(['X', 'Y'], 'Out', check_eager=True) def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X") - ) + self.check_grad(['Y'], 'Out', no_grad_set=set("X")) def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y') - ) + self.check_grad(['X'], 'Out', no_grad_set=set('Y')) class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): @@ -199,6 +190,26 @@ def setUp(self): } self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + def test_check_grad_normal(self): + if hasattr(self, 'attrs'): + self.check_grad( + ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False + ) + else: + self.check_grad( + ['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True + ) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X") + ) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y') + ) + class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): def setUp(self): From b0c2bf8f5ec1a47f7188d0ffb582cc24c42f7767 Mon Sep 17 00:00:00 2001 From: pidack Date: Mon, 13 Mar 2023 09:10:23 +0000 Subject: [PATCH 8/9] fix some issue --- .../unittests/test_elementwise_max_op.py | 199 +++++++----------- 1 file changed, 77 insertions(+), 122 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 3c512c41bc3e7..0249a6a24b682 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -22,16 +22,21 @@ class TestElementwiseOp(OpTest): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum + def init_data(self): # If x and y have the same value, the max() is not differentiable. # So we generate test data by the following method # to avoid them being too close to each other. - x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") + self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") sgn = np.random.choice([-1, 1], [13, 17]).astype("float64") - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64") - self.inputs = {'X': x, 'Y': y} + self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype( + "float64" + ) + + def setUp(self): + self.init_data() + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + self.inputs = {'X': self.x, 'Y': self.y} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): @@ -64,74 +69,48 @@ def test_check_grad_ingore_y(self): class TestElementwiseFP16Op(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) + def init_data(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16) - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float16) - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype( + np.float16 + ) class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float64") - y = np.random.uniform(0.1, 1, []).astype("float64") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, []).astype("float64") + self.y = np.random.uniform(0.1, 1, []).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float16") - y = np.random.uniform(0.1, 1, []).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, []).astype("float16") + self.y = np.random.uniform(0.1, 1, []).astype("float16") class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") - y = np.random.uniform(0.1, 1, []).astype("float64") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") + self.y = np.random.uniform(0.1, 1, []).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") - y = np.random.uniform(0.1, 1, []).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16") + self.y = np.random.uniform(0.1, 1, []).astype("float16") class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float64") - y = np.random.uniform(0.1, 1, [13, 17]).astype("float64") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, []).astype("float64") + self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float16") - y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.uniform(0.1, 1, []).astype("float16") + self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16") @unittest.skipIf( @@ -143,21 +122,28 @@ def setUp(self): "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.", ) class TestElementwiseBF16Op(OpTest): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - self.dtype = np.uint16 + def init_data(self): # If x and y have the same value, the max() is not differentiable. # So we generate test data by the following method # to avoid them being too close to each other. - x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32) - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float32) + self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype( + np.float32 + ) + + def setUp(self): + self.init_data() + self.op_type = "elementwise_max" + self.python_api = paddle.maximum + self.dtype = np.uint16 self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), + 'X': convert_float_to_uint16(self.x), + 'Y': convert_float_to_uint16(self.y), + } + self.outputs = { + 'Out': convert_float_to_uint16(np.maximum(self.x, self.y)) } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} def test_check_output(self): if hasattr(self, 'attrs'): @@ -179,16 +165,9 @@ def test_check_grad_ingore_y(self): class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.uniform(0.1, 1, []).astype("float32") - y = np.random.uniform(0.1, 1, []).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + def init_data(self): + self.x = np.random.uniform(0.1, 1, []).astype("float32") + self.y = np.random.uniform(0.1, 1, []).astype("float32") def test_check_grad_normal(self): if hasattr(self, 'attrs'): @@ -212,16 +191,9 @@ def test_check_grad_ingore_y(self): class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") - y = np.array([0.5]).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + def init_data(self): + self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") + self.y = np.array([0.5]).astype("float32") self.__class__.no_need_check_grad = True @@ -229,59 +201,42 @@ def setUp(self): reason="[skip shape check] Use y_shape(1) to test broadcast." ) class TestElementwiseMaxOp_scalar(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64") - y = np.array([0.5]).astype("float64") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64") + self.y = np.array([0.5]).astype("float64") class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16") - y = np.array([0.5]).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + def init_data(self): + self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16") + self.y = np.array([0.5]).astype("float16") class TestElementwiseMaxOp_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random((100,)).astype("float64") + def init_data(self): + self.x = np.random.random((100,)).astype("float64") sgn = np.random.choice([-1, 1], (100,)).astype("float64") - y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype( + "float64" + ) class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random((100,)).astype("float16") + def init_data(self): + self.x = np.random.random((100,)).astype("float16") sgn = np.random.choice([-1, 1], (100,)).astype("float16") - y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float16") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} + self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype( + "float16" + ) class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op): - def setUp(self): - self.op_type = "elementwise_max" - self.python_api = paddle.maximum - x = np.random.random((100,)).astype("float32") + def init_data(self): + self.x = np.random.random((100,)).astype("float32") sgn = np.random.choice([-1, 1], (100,)).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float32") - self.inputs = { - 'X': convert_float_to_uint16(x), - 'Y': convert_float_to_uint16(y), - } - self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))} + self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype( + "float32" + ) class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): From e45e05f5716d20e71250dfe20b29ed26c2091d12 Mon Sep 17 00:00:00 2001 From: pidack Date: Tue, 14 Mar 2023 01:49:48 +0000 Subject: [PATCH 9/9] add prim 4 fp16 --- .../paddle/fluid/tests/unittests/test_elementwise_max_op.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py index 7ba6b4d02df0c..27f8b70521953 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py @@ -293,6 +293,7 @@ class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum + self.prim_op_type = "prim" x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16) sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( @@ -332,6 +333,7 @@ class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum + self.prim_op_type = "prim" x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16) sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype( @@ -370,6 +372,7 @@ class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum + self.prim_op_type = "prim" x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16) sgn = np.random.choice([-1, 1], (100,)).astype(np.float16) y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype( @@ -408,6 +411,7 @@ class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum + self.prim_op_type = "prim" x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16) sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16) y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype( @@ -440,6 +444,7 @@ class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp): def setUp(self): self.op_type = "elementwise_max" self.python_api = paddle.maximum + self.prim_op_type = "prim" x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16) y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16)