Skip to content

Commit

Permalink
support uniform & fix some tests
Browse files Browse the repository at this point in the history
  • Loading branch information
zhanghonggeng committed Feb 25, 2025
1 parent 90df978 commit f8c836a
Show file tree
Hide file tree
Showing 12 changed files with 270 additions and 183 deletions.
57 changes: 57 additions & 0 deletions paddle2onnx/mapper/tensor/uniform.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle2onnx/mapper/tensor/uniform.h"

namespace paddle2onnx {
REGISTER_PIR_MAPPER(uniform, UniformMapper)

int32_t UniformMapper::GetMinOpsetVersion(bool verbose) { return 7; }

void UniformMapper::Opset7() {
auto output_info = GetOutput("out");
auto shape_info = GetInput("shape");
auto min_info = GetInput("min");
auto max_info = GetInput("max");

if (min_info[0].Rank() != 0 || max_info[0].Rank() != 0) {
Error() << "[ERROR] min/max must be scalar tensors for op "
"uniform "
<< std::endl;
}
std::vector<float> min_val{0.0f}, max_val{1.0f};
bool is_min_const =
helper_->TryGetTensorValue<float>(min_info[0].name, &min_val);
bool is_max_const =
helper_->TryGetTensorValue<float>(max_info[0].name, &max_val);

std::vector<int64_t> shape_values;
helper_->TryGetTensorValue<int64_t>(shape_info[0].name, &shape_values);

auto onnx_dtype = GetOnnxDtype(dtype_);

auto random_node =
helper_->MakeNode("RandomUniform", {}, {output_info[0].name});

AddAttribute(random_node, "shape", shape_values);
AddAttribute(random_node, "low", min_val[0]);
AddAttribute(random_node, "high", max_val[0]);
AddAttribute(random_node, "dtype", static_cast<int64_t>(onnx_dtype));
if (seed_ != 0) {
AddAttribute(random_node, "seed", static_cast<float>(seed_));
}

std::cout << "dtype_" << dtype_ << std::endl;
}
} // namespace paddle2onnx
39 changes: 39 additions & 0 deletions paddle2onnx/mapper/tensor/uniform.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <string>
#include <vector>

#include "paddle2onnx/mapper/mapper.h"

namespace paddle2onnx {

class UniformMapper : public Mapper {
public:
UniformMapper(const PaddlePirParser& p, OnnxHelper* helper, int64_t i, bool c)
: Mapper(p, helper, i, c) {
GetAttr("dtype", &dtype_);
GetAttr("seed", &seed_);
}

int32_t GetMinOpsetVersion(bool verbose) override;
void Opset7() override;

private:
int64_t dtype_;
int64_t seed_;
};

} // namespace paddle2onnx
9 changes: 2 additions & 7 deletions tests/onnxbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import paddle
import paddle2onnx
import paddle.static as static
from paddle2onnx.convert import dygraph2onnx, decompose_program
from paddle2onnx.convert import dygraph2onnx
import shutil
from functools import wraps

Expand Down Expand Up @@ -232,8 +232,6 @@ def __init__(
self.input_spec_shape = input_spec_shape
self.input_dtype = []
self.res_fict = {}
self.dist_prim_all = False
self.auto_upgrade_opset = False

if isfunction(self.func):
# self._func = self.BuildFunc(self.func, **self.kwargs_dict_dygraph["params_group1"])
Expand Down Expand Up @@ -497,10 +495,7 @@ def run(self):
# clip extra
model_file = None
if paddle.get_flags("FLAGS_enable_pir_api")["FLAGS_enable_pir_api"]:
if self.dist_prim_all and self.auto_upgrade_opset:
model_file = decompose_program(original_model_file)
else:
model_file = original_model_file
model_file = original_model_file
else:
model_file = os.path.join(self.name, "cliped_model.pdmodel")
self.clip_extra_program_only(original_model_file, model_file)
Expand Down
11 changes: 0 additions & 11 deletions tests/run.bat
Original file line number Diff line number Diff line change
Expand Up @@ -35,26 +35,16 @@ set ignore=test_auto_scan_multiclass_nms.py
set ignore=!ignore! test_auto_scan_roi_align.py
set ignore=!ignore! test_auto_scan_pool_adaptive_max_ops.py
set ignore=!ignore! test_auto_scan_pad2d.py
set ignore=!ignore! test_auto_scan_roll.py
set ignore=!ignore! test_auto_scan_unfold.py
set ignore=!ignore! test_auto_scan_uniform_random_batch_size_like.py
set ignore=!ignore! test_auto_scan_uniform_random.py
set ignore=!ignore! test_auto_scan_dist.py
set ignore=!ignore! test_auto_scan_distribute_fpn_proposals1.py
set ignore=!ignore! test_auto_scan_distribute_fpn_proposals_v2.py
set ignore=!ignore! test_auto_scan_fill_constant_batch_size_like.py
set ignore=!ignore! test_auto_scan_generate_proposals.py
set ignore=!ignore! test_uniform.py
set ignore=!ignore! test_ceil.py
set ignore=!ignore! test_deform_conv2d.py
set ignore=!ignore! test_floor_divide.py
set ignore=!ignore! test_has_nan.py
set ignore=!ignore! test_median.py
set ignore=!ignore! test_nn_GroupNorm.py
set ignore=!ignore! test_nn_InstanceNorm3D.py
set ignore=!ignore! test_nn_Upsample.py
set ignore=!ignore! test_normalize.py
set ignore=!ignore! test_scatter_nd_add.py
set ignore=!ignore! test_unsqueeze.py
set ignore=!ignore! test_quantize_model.py
set ignore=!ignore! test_quantize_model_minist.py
Expand All @@ -72,7 +62,6 @@ set ignore=!ignore! test_auto_scan_conv2d.py
set ignore=!ignore! test_auto_scan_conv2d_transpose.py
set ignore=!ignore! test_auto_scan_conv3d.py
set ignore=!ignore! test_auto_scan_grid_sampler.py
set ignore=!ignore! test_auto_scan_set_value.py
set ignore=!ignore! test_auto_scan_dequantize_linear.py
set ignore=!ignore! test_auto_scan_gaussian_random.py
set ignore=!ignore! test_auto_scan_partial_ops.py
Expand Down
14 changes: 1 addition & 13 deletions tests/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,30 +29,19 @@ ignore="test_auto_scan_multiclass_nms.py
test_auto_scan_roi_align.py \ # need to be rewrite
test_auto_scan_pool_adaptive_max_ops.py \
test_auto_scan_pad2d.py \
test_auto_scan_roll.py \
test_auto_scan_unfold.py \
test_auto_scan_uniform_random_batch_size_like.py \
test_auto_scan_uniform_random.py \
test_auto_scan_gaussian_random.py \
test_auto_scan_dist.py \
test_auto_scan_distribute_fpn_proposals1.py \
test_auto_scan_distribute_fpn_proposals_v2.py \
test_auto_scan_fill_constant_batch_size_like.py \
test_auto_scan_unary_ops.py \
test_auto_scan_generate_proposals.py \
test_uniform.py \
test_ceil.py \
test_deform_conv2d.py \
test_floor_divide.py \
test_has_nan.py \
test_median.py \
test_nn_GroupNorm.py \
test_nn_InstanceNorm3D.py \
test_nn_Upsample.py \
test_normalize.py \
test_hardtanh.py \
test_nn_GRU.py \
test_scatter_nd_add.py \
test_quantize_model.py \
test_quantize_model_minist.py \
test_auto_scan_partial_ops.py \
Expand All @@ -63,8 +52,7 @@ ignore="test_auto_scan_multiclass_nms.py
test_resnet_fp16.py \
test_empty.py \
test_auto_scan_pool_max_ops.py \
test_auto_scan_fill_constant.py \
test_auto_scan_set_value.py"
test_auto_scan_fill_constant.py"
bug=0

# Install Python Packet
Expand Down
4 changes: 2 additions & 2 deletions tests/test_auto_scan_roll.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import hypothesis.strategies as st
import unittest
import paddle
from onnxbase import _test_only_pir
from onnxbase import _test_with_pir


class Net(BaseNet):
Expand Down Expand Up @@ -112,7 +112,7 @@ def sample_convert_config(self, draw):

return (config, models)

@_test_only_pir
@_test_with_pir
def test(self):
self.run_and_statis(max_examples=80)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_auto_scan_set_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import hypothesis.strategies as st
import unittest
import copy
from onnxbase import _test_only_pir
from onnxbase import _test_with_pir


# TODO(wangmingkai02): add test for set_value which none_axes_ > 0
Expand Down Expand Up @@ -62,7 +62,7 @@ def sample_convert_config(self, draw):

return (config, models)

@_test_only_pir
@_test_with_pir
def test(self):
self.run_and_statis(max_examples=30)

Expand Down
17 changes: 9 additions & 8 deletions tests/test_nn_GroupNorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
# limitations under the License.

import paddle
from onnxbase import APIOnnx
from onnxbase import randtool
from onnxbase import APIOnnx, randtool, _test_with_pir


class Net(paddle.nn.Layer):
Expand All @@ -34,6 +33,7 @@ def forward(self, inputs):
return x


@_test_with_pir
def test_GroupNorm_11():
"""
api: paddle.nn.GroupNorm
Expand All @@ -42,14 +42,15 @@ def test_GroupNorm_11():
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_GroupNorm', [11])
obj = APIOnnx(op, "nn_GroupNorm", [11])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [5, 10, 8, 8]).astype('float32')))
paddle.to_tensor(randtool("float", -1, 1, [5, 10, 8, 8]).astype("float32")),
)
obj.run()


@_test_with_pir
def test_GroupNorm_12():
"""
api: paddle.nn.GroupNorm
Expand All @@ -58,9 +59,9 @@ def test_GroupNorm_12():
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_GroupNorm', [12])
obj = APIOnnx(op, "nn_GroupNorm", [12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [5, 10, 8, 8]).astype('float32')))
paddle.to_tensor(randtool("float", -1, 1, [5, 10, 8, 8]).astype("float32")),
)
obj.run()
13 changes: 7 additions & 6 deletions tests/test_nn_InstanceNorm3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
# limitations under the License.

import paddle
from onnxbase import APIOnnx
from onnxbase import randtool
from onnxbase import APIOnnx, randtool, _test_with_pir


class Net(paddle.nn.Layer):
Expand All @@ -31,7 +30,8 @@ def __init__(self):
weight_attr=None,
bias_attr=None,
data_format="NCDHW",
name=None)
name=None,
)

def forward(self, inputs):
"""
Expand All @@ -41,6 +41,7 @@ def forward(self, inputs):
return x


@_test_with_pir
def test_InstanceNorm_base():
"""
api: paddle.InstanceNorm
Expand All @@ -49,9 +50,9 @@ def test_InstanceNorm_base():
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_InstanceNorm', [9, 10, 11, 12])
obj = APIOnnx(op, "nn_InstanceNorm", [9, 10, 11, 12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [2, 2, 2, 2, 3]).astype('float32')))
paddle.to_tensor(randtool("float", -1, 1, [2, 2, 2, 2, 3]).astype("float32")),
)
obj.run()
Loading

0 comments on commit f8c836a

Please sign in to comment.