From f0c4b877119e7326f7b26876390fb3215c48fa40 Mon Sep 17 00:00:00 2001 From: qzylalala Date: Mon, 21 Oct 2024 21:35:15 +0800 Subject: [PATCH 1/5] [OSPP][PIR] support some ops in pir --- paddle2onnx/mapper/activation/activation.cc | 19 +++- paddle2onnx/mapper/activation/activation.h | 29 +++++ paddle2onnx/mapper/activation/sigmoid.cc | 3 +- paddle2onnx/mapper/activation/sigmoid.h | 12 +- paddle2onnx/mapper/activation/swish.cc | 14 ++- paddle2onnx/mapper/activation/swish.h | 11 +- paddle2onnx/mapper/exporter.h | 32 +++--- paddle2onnx/mapper/nn/conv2d_transpose.cc | 2 + paddle2onnx/mapper/nn/conv2d_transpose.h | 29 ++++- paddle2onnx/mapper/nn/group_norm.cc | 1 + paddle2onnx/mapper/nn/group_norm.h | 7 ++ paddle2onnx/mapper/nn/shape.cc | 1 + paddle2onnx/mapper/nn/shape.h | 5 + paddle2onnx/mapper/tensor/assign_value.cc | 2 +- paddle2onnx/mapper/tensor/bmm.cc | 1 + paddle2onnx/mapper/tensor/bmm.h | 5 + paddle2onnx/mapper/tensor/cast.cc | 1 + paddle2onnx/mapper/tensor/cast.h | 6 + paddle2onnx/mapper/tensor/clip.cc | 31 +++++- paddle2onnx/mapper/tensor/clip.h | 5 + paddle2onnx/mapper/tensor/logical_not.cc | 1 + paddle2onnx/mapper/tensor/logical_not.h | 5 + paddle2onnx/mapper/tensor/meshgrid.cc | 1 + paddle2onnx/mapper/tensor/meshgrid.h | 6 + paddle2onnx/mapper/tensor/not_equal.cc | 1 + paddle2onnx/mapper/tensor/not_equal.h | 5 + paddle2onnx/mapper/tensor/pow.cc | 1 + paddle2onnx/mapper/tensor/pow.h | 6 + paddle2onnx/mapper/tensor/stack.cc | 1 + paddle2onnx/mapper/tensor/stack.h | 6 + paddle2onnx/mapper/tensor/tile.cc | 1 + paddle2onnx/mapper/tensor/tile.h | 5 + paddle2onnx/parser/pir_parser.cc | 115 +++++++++++++------- tests/test_abs.py | 29 +++-- tests/test_auto_scan_bmm.py | 18 ++- tests/test_auto_scan_cast.py | 17 ++- tests/test_auto_scan_clip.py | 41 +++---- tests/test_auto_scan_conv2d_transpose.py | 84 +++++++------- tests/test_auto_scan_gelu.py | 9 +- tests/test_auto_scan_group_norm.py | 24 ++-- tests/test_auto_scan_leakyrelu.py | 12 +- tests/test_auto_scan_meshgrid.py | 63 +++++------ tests/test_auto_scan_pow.py | 11 +- tests/test_auto_scan_shape.py | 9 +- tests/test_auto_scan_squeeze2.py | 25 +++-- tests/test_auto_scan_stack.py | 16 ++- tests/test_auto_scan_tile.py | 30 +++-- tests/test_bmm.py | 39 ++++--- tests/test_cast.py | 27 +++-- tests/test_clip.py | 25 +++-- tests/test_cos.py | 25 +++-- tests/test_exp.py | 25 +++-- tests/test_floor.py | 29 +++-- tests/test_gelu.py | 29 +++-- tests/test_leaky_relu.py | 25 +++-- tests/test_logical_and.py | 25 +++-- tests/test_logical_not.py | 22 ++-- tests/test_meshgrid.py | 37 ++++--- tests/test_pow.py | 29 +++-- tests/test_shape.py | 25 +++-- tests/test_sigmoid.py | 9 +- tests/test_sin.py | 8 +- tests/test_stack.py | 33 ++++-- tests/test_swish.py | 29 +++-- tests/test_tile.py | 13 ++- 65 files changed, 771 insertions(+), 441 deletions(-) diff --git a/paddle2onnx/mapper/activation/activation.cc b/paddle2onnx/mapper/activation/activation.cc index fe59c8493..1ebd963d9 100644 --- a/paddle2onnx/mapper/activation/activation.cc +++ b/paddle2onnx/mapper/activation/activation.cc @@ -17,19 +17,26 @@ namespace paddle2onnx { REGISTER_MAPPER(abs, ActivationMapper) +REGISTER_PIR_MAPPER(abs, ActivationMapper) REGISTER_MAPPER(acos, ActivationMapper) REGISTER_MAPPER(asin, ActivationMapper) REGISTER_MAPPER(atan, ActivationMapper) REGISTER_MAPPER(brelu, BReluMapper) REGISTER_MAPPER(ceil, ActivationMapper) REGISTER_MAPPER(cos, ActivationMapper) +REGISTER_PIR_MAPPER(cos, ActivationMapper) REGISTER_MAPPER(elu, EluMapper) REGISTER_MAPPER(erf, ActivationMapper) REGISTER_MAPPER(exp, ActivationMapper) +REGISTER_PIR_MAPPER(exp, ActivationMapper) REGISTER_MAPPER(floor, ActivationMapper) +REGISTER_PIR_MAPPER(floor, ActivationMapper) REGISTER_MAPPER(gelu, GeluMapper) +REGISTER_PIR_MAPPER(gelu, GeluMapper) REGISTER_MAPPER(leaky_relu, LeakyReluMapper) +REGISTER_PIR_MAPPER(leaky_relu, LeakyReluMapper) REGISTER_MAPPER(log, ActivationMapper) +REGISTER_PIR_MAPPER(log, ActivationMapper) REGISTER_MAPPER(log10, Log10Mapper) REGISTER_MAPPER(log1p, Log1PMapper) REGISTER_MAPPER(log2, Log2Mapper) @@ -45,13 +52,17 @@ REGISTER_MAPPER(rsqrt, RsqrtMapper) REGISTER_MAPPER(sel, ActivationMapper) REGISTER_MAPPER(selu, SeluMapper) REGISTER_MAPPER(silu, SiluMapper) +REGISTER_PIR_MAPPER(silu, SiluMapper) REGISTER_MAPPER(sin, ActivationMapper) +REGISTER_PIR_MAPPER(sin, ActivationMapper) REGISTER_MAPPER(size, SizeMapper) REGISTER_MAPPER(softmax, SoftMaxMapper) +REGISTER_PIR_MAPPER(softmax, SoftMaxMapper) REGISTER_MAPPER(softplus, ActivationMapper) REGISTER_MAPPER(softshrink, SoftShrinkMapper) REGISTER_MAPPER(softsign, ActivationMapper) REGISTER_MAPPER(sqrt, ActivationMapper) +REGISTER_PIR_MAPPER(sqrt, ActivationMapper) REGISTER_MAPPER(square, SquareMapper) REGISTER_MAPPER(tan, ActivationMapper) REGISTER_MAPPER(tanh, ActivationMapper) @@ -85,7 +96,9 @@ void ActivationMapper::Opset7() { auto output_info = GetOutput("Out"); auto iter = op_mapper_.find(convert_pir_op_name(OpType())); Assert(op_mapper_.end() != iter, - "Cannot find " + convert_pir_op_name(OpType()) + " in activation op_mapper."); + "Cannot find " + + convert_pir_op_name(OpType()) + + " in activation op_mapper."); if (convert_pir_op_name(OpType()) == "erf") { auto input = helper_->AutoCast(input_info[0].name, input_info[0].dtype, P2ODataType::FP32); @@ -367,7 +380,9 @@ void ThresholdedReluMapper::Opset10() { void Log1PMapper::Opset7() { auto x_info = GetInput("X"); auto out_info = GetOutput("Out"); - auto one = helper_->Constant({}, GetOnnxDtype(x_info[0].dtype), float(1.0)); + auto one = helper_->Constant({}, + GetOnnxDtype(x_info[0].dtype), + static_cast(1.0)); auto input = helper_->MakeNode("Add", {x_info[0].name, one})->output(0); helper_->MakeNode("Log", {input}, {out_info[0].name}); } diff --git a/paddle2onnx/mapper/activation/activation.h b/paddle2onnx/mapper/activation/activation.h index efb9f6380..760771e17 100644 --- a/paddle2onnx/mapper/activation/activation.h +++ b/paddle2onnx/mapper/activation/activation.h @@ -112,6 +112,13 @@ class LeakyReluMapper : public Mapper { GetAttr("alpha", &alpha_); } + LeakyReluMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("alpha", &alpha_); + } + void Opset7() override; private: @@ -124,6 +131,12 @@ class GeluMapper : public Mapper { int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + GeluMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } + int32_t GetMinOpsetVersion(bool verbose) override { Logger(verbose, 9) << RequireOpset(9) << std::endl; return 9; @@ -144,6 +157,17 @@ class SoftMaxMapper : public Mapper { } } + SoftMaxMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + if (HasAttr("axis")) { + GetAttr("axis", &axis_); + } else { + axis_ = -1; + } + } + void Opset7() override; void Opset13() override; @@ -310,6 +334,11 @@ class SiluMapper : public Mapper { SiluMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + SiluMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; diff --git a/paddle2onnx/mapper/activation/sigmoid.cc b/paddle2onnx/mapper/activation/sigmoid.cc index 18bf2e0dc..f643e9a7c 100644 --- a/paddle2onnx/mapper/activation/sigmoid.cc +++ b/paddle2onnx/mapper/activation/sigmoid.cc @@ -16,10 +16,11 @@ namespace paddle2onnx { REGISTER_MAPPER(sigmoid, SigmoidMapper) +REGISTER_PIR_MAPPER(sigmoid, SigmoidMapper) void SigmoidMapper::Opset7() { auto input_info = GetInput("X"); auto output_info = GetOutput("Out"); helper_->MakeNode("Sigmoid", {input_info[0].name}, {output_info[0].name}); } -} \ No newline at end of file +} diff --git a/paddle2onnx/mapper/activation/sigmoid.h b/paddle2onnx/mapper/activation/sigmoid.h index 96fc14252..c1121b023 100644 --- a/paddle2onnx/mapper/activation/sigmoid.h +++ b/paddle2onnx/mapper/activation/sigmoid.h @@ -13,20 +13,24 @@ // limitations under the License. #pragma once - -#include "paddle2onnx/mapper/mapper.h" - #include #include #include #include +#include "paddle2onnx/mapper/mapper.h" + namespace paddle2onnx { class SigmoidMapper : public Mapper { public: SigmoidMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + SigmoidMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; -} \ No newline at end of file +} // namespace paddle2onnx diff --git a/paddle2onnx/mapper/activation/swish.cc b/paddle2onnx/mapper/activation/swish.cc index 449c86335..f687e9845 100644 --- a/paddle2onnx/mapper/activation/swish.cc +++ b/paddle2onnx/mapper/activation/swish.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(swish, SwishMapper) +REGISTER_PIR_MAPPER(swish, SwishMapper) void SwishMapper::Opset7() { auto input_info = GetInput("X"); @@ -25,13 +26,18 @@ void SwishMapper::Opset7() { if (HasAttr("beta")) { float temp_beta = 1.0; GetAttr("beta", &temp_beta); - std::string beta_node = helper_->Constant({}, GetOnnxDtype(input_info[0].dtype), temp_beta); - auto beta_x_node = helper_->MakeNode("Mul", {input_info[0].name, beta_node}); + std::string beta_node = helper_->Constant({}, + GetOnnxDtype(input_info[0].dtype), + temp_beta); + auto beta_x_node = helper_->MakeNode("Mul", + {input_info[0].name, beta_node}); sigmod_node = helper_->MakeNode("Sigmoid", {beta_x_node->output(0)}); } else { sigmod_node = helper_->MakeNode("Sigmoid", {input_info[0].name}); } - helper_->MakeNode("Mul", {input_info[0].name, sigmod_node->output(0)}, {output_info[0].name}); + helper_->MakeNode("Mul", + {input_info[0].name, sigmod_node->output(0)}, + {output_info[0].name}); } -} \ No newline at end of file +} // namespace paddle2onnx diff --git a/paddle2onnx/mapper/activation/swish.h b/paddle2onnx/mapper/activation/swish.h index 9282ede7a..14d5f3efb 100644 --- a/paddle2onnx/mapper/activation/swish.h +++ b/paddle2onnx/mapper/activation/swish.h @@ -14,19 +14,24 @@ #pragma once -#include "paddle2onnx/mapper/mapper.h" - #include #include #include #include +#include "paddle2onnx/mapper/mapper.h" + namespace paddle2onnx { class SwishMapper : public Mapper { public: SwishMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + SwishMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; -} \ No newline at end of file +} // namespace paddle2onnx diff --git a/paddle2onnx/mapper/exporter.h b/paddle2onnx/mapper/exporter.h index a226d0c3b..34f872243 100644 --- a/paddle2onnx/mapper/exporter.h +++ b/paddle2onnx/mapper/exporter.h @@ -34,12 +34,13 @@ #endif inline std::string convert_pir_op_name(const std::string pir_op_name) { - std::unordered_map op_name_mappings = { - {"matmul", "matmul_v2"}, - // {"relu", "relu6"}, - {"batch_norm_", "batch_norm"}, - {"flatten", "flatten_contiguous_range"}, - {"add", "elementwise_add"}}; + std::unordered_map op_name_mappings = { + {"matmul", "matmul_v2"}, + // {"relu", "relu6"}, + {"batch_norm_", "batch_norm"}, + {"assign_value_", "assign_value"}, + {"flatten", "flatten_contiguous_range"}, + {"add", "elementwise_add"}}; std::string op_name = pir_op_name; std::string prefix = "pd_op."; std::string builtin_prefix = "builtin."; @@ -47,11 +48,10 @@ inline std::string convert_pir_op_name(const std::string pir_op_name) { size_t prefix_pos = op_name.find(prefix); if (prefix_pos != std::string::npos) { op_name = op_name.substr(prefix_pos + prefix.size()); - } - else { - if(op_name.substr(0, builtin_prefix.size()) == builtin_prefix) { + } else { + if(op_name.substr(0, builtin_prefix.size()) == builtin_prefix) { op_name[builtin_prefix.size() - 1] = '_'; - } + } } auto it = op_name_mappings.find(op_name); if (it != op_name_mappings.end()) { @@ -162,8 +162,10 @@ class ModelExporter { &inputs, std::vector> &outputs, - std::vector> &nodes, - std::map &quantize_info); + std::vector> + &nodes, + std::map + &quantize_info); // Update constant node in parameters. When process quantize model, the weight // dtype may be int8, it should be convet to float32 and use this function to // update converted params. @@ -181,7 +183,8 @@ class ModelExporter { ONNX_NAMESPACE::GraphProto ExportBlock( const PaddleParser &parser, int32_t block_id, - std::vector> ¶meters, + std::vector> + ¶meters, std::vector> &inputs, std::vector> @@ -189,7 +192,8 @@ class ModelExporter { ONNX_NAMESPACE::GraphProto ExportBlock( const PaddlePirParser &pir_parser, - std::vector> ¶meters, + std::vector> + ¶meters, std::vector> &inputs, std::vector> diff --git a/paddle2onnx/mapper/nn/conv2d_transpose.cc b/paddle2onnx/mapper/nn/conv2d_transpose.cc index 5daa194ab..41b5bf6a0 100755 --- a/paddle2onnx/mapper/nn/conv2d_transpose.cc +++ b/paddle2onnx/mapper/nn/conv2d_transpose.cc @@ -19,7 +19,9 @@ namespace paddle2onnx { REGISTER_MAPPER(conv2d_transpose, Conv2dTransposeMapper) +REGISTER_PIR_MAPPER(conv2d_transpose, Conv2dTransposeMapper) REGISTER_MAPPER(depthwise_conv2d_transpose, Conv2dTransposeMapper) +REGISTER_PIR_MAPPER(depthwise_conv2d_transpose, Conv2dTransposeMapper) int32_t Conv2dTransposeMapper::GetMinOpsetVersion(bool verbose) { // NHWC is not supported diff --git a/paddle2onnx/mapper/nn/conv2d_transpose.h b/paddle2onnx/mapper/nn/conv2d_transpose.h index 5404a0842..ca8657cb9 100755 --- a/paddle2onnx/mapper/nn/conv2d_transpose.h +++ b/paddle2onnx/mapper/nn/conv2d_transpose.h @@ -32,7 +32,7 @@ class Conv2dTransposeMapper : public Mapper { GetAttr("padding_algorithm", &padding_algorithm_); GetAttr("data_format", &data_format_); - if (HasAttr("output_padding")){ + if (HasAttr("output_padding")) { GetAttr("output_padding", &output_padding_); } GetAttr("output_size", &output_size_); @@ -46,6 +46,33 @@ class Conv2dTransposeMapper : public Mapper { } } + Conv2dTransposeMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("groups", &groups_); + GetAttr("dilations", &dilations_); + GetAttr("strides", &strides_); + GetAttr("paddings", &paddings_); + GetAttr("padding_algorithm", &padding_algorithm_); + GetAttr("data_format", &data_format_); + + if (HasAttr("output_padding")) { + GetAttr("output_padding", &output_padding_); + } + if (HasAttr("output_size")) { + GetAttr("output_size", &output_size_); + } + if (paddings_.size() == 2) { + paddings_.push_back(paddings_[0]); + paddings_.push_back(paddings_[1]); + } else if (paddings_.size() == 4) { + int32_t tmp = paddings_[1]; + paddings_[1] = paddings_[2]; + paddings_[2] = tmp; + } + } + int32_t GetMinOpsetVersion(bool verbose) override; void Opset7() override; diff --git a/paddle2onnx/mapper/nn/group_norm.cc b/paddle2onnx/mapper/nn/group_norm.cc index 24bb35442..79aa2fbdf 100644 --- a/paddle2onnx/mapper/nn/group_norm.cc +++ b/paddle2onnx/mapper/nn/group_norm.cc @@ -20,6 +20,7 @@ namespace paddle2onnx { REGISTER_MAPPER(group_norm, GroupNormMapper) +REGISTER_PIR_MAPPER(group_norm, GroupNormMapper) int32_t GroupNormMapper::GetMinOpsetVersion(bool verbose) { auto input_info = GetInput("X"); diff --git a/paddle2onnx/mapper/nn/group_norm.h b/paddle2onnx/mapper/nn/group_norm.h index be4831b6b..5056f2b24 100755 --- a/paddle2onnx/mapper/nn/group_norm.h +++ b/paddle2onnx/mapper/nn/group_norm.h @@ -28,6 +28,13 @@ class GroupNormMapper : public Mapper { GetAttr("groups", &groups_); GetAttr("epsilon", &epsilon_); } + GroupNormMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("groups", &groups_); + GetAttr("epsilon", &epsilon_); + } int32_t GetMinOpsetVersion(bool verbose) override; void Opset7() override; diff --git a/paddle2onnx/mapper/nn/shape.cc b/paddle2onnx/mapper/nn/shape.cc index d82a1d00f..9528c9881 100644 --- a/paddle2onnx/mapper/nn/shape.cc +++ b/paddle2onnx/mapper/nn/shape.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(shape, ShapeMapper) +REGISTER_PIR_MAPPER(shape, ShapeMapper) void ShapeMapper::Opset7() { auto input_info = GetInput("Input"); diff --git a/paddle2onnx/mapper/nn/shape.h b/paddle2onnx/mapper/nn/shape.h index 3db2cca18..20d98a955 100644 --- a/paddle2onnx/mapper/nn/shape.h +++ b/paddle2onnx/mapper/nn/shape.h @@ -25,6 +25,11 @@ class ShapeMapper : public Mapper { ShapeMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + ShapeMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; diff --git a/paddle2onnx/mapper/tensor/assign_value.cc b/paddle2onnx/mapper/tensor/assign_value.cc index ff199409f..cc736d203 100644 --- a/paddle2onnx/mapper/tensor/assign_value.cc +++ b/paddle2onnx/mapper/tensor/assign_value.cc @@ -20,7 +20,7 @@ namespace paddle2onnx { REGISTER_MAPPER(assign_value, AssignValueMapper) -REGISTER_PIR_MAPPER(assign_value_, AssignValueMapper) +REGISTER_PIR_MAPPER(assign_value, AssignValueMapper) int32_t AssignValueMapper::GetMinOpsetVersion(bool verbose) { int32_t dtype = static_cast(dtype_); diff --git a/paddle2onnx/mapper/tensor/bmm.cc b/paddle2onnx/mapper/tensor/bmm.cc index 7d8c0b76d..ce57af36f 100644 --- a/paddle2onnx/mapper/tensor/bmm.cc +++ b/paddle2onnx/mapper/tensor/bmm.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(bmm, BmmMapper) +REGISTER_PIR_MAPPER(bmm, BmmMapper) void BmmMapper::Opset7() { auto x_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/bmm.h b/paddle2onnx/mapper/tensor/bmm.h index 891261669..e9c70214b 100644 --- a/paddle2onnx/mapper/tensor/bmm.h +++ b/paddle2onnx/mapper/tensor/bmm.h @@ -25,6 +25,11 @@ class BmmMapper : public Mapper { BmmMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + BmmMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; diff --git a/paddle2onnx/mapper/tensor/cast.cc b/paddle2onnx/mapper/tensor/cast.cc index 8e9c6c647..20688da79 100644 --- a/paddle2onnx/mapper/tensor/cast.cc +++ b/paddle2onnx/mapper/tensor/cast.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(cast, CastMapper) +REGISTER_PIR_MAPPER(cast, CastMapper) void CastMapper::Opset7() { auto input_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/cast.h b/paddle2onnx/mapper/tensor/cast.h index ff7c09b9f..9abd708c1 100644 --- a/paddle2onnx/mapper/tensor/cast.h +++ b/paddle2onnx/mapper/tensor/cast.h @@ -27,6 +27,12 @@ class CastMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("out_dtype", &out_dtype_); } + CastMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("dtype", &out_dtype_); + } void Opset7() override; private: diff --git a/paddle2onnx/mapper/tensor/clip.cc b/paddle2onnx/mapper/tensor/clip.cc index 1ed0e0355..c0ea4790b 100644 --- a/paddle2onnx/mapper/tensor/clip.cc +++ b/paddle2onnx/mapper/tensor/clip.cc @@ -16,8 +16,10 @@ namespace paddle2onnx { REGISTER_MAPPER(clip, ClipMapper) +REGISTER_PIR_MAPPER(clip, ClipMapper) int32_t ClipMapper::GetMinOpsetVersion(bool verbose) { + if (in_pir_mode) return 7; bool has_max_tensor_input = HasInput("Max"); bool has_min_tensor_input = HasInput("Min"); if (has_max_tensor_input || has_min_tensor_input) { @@ -32,6 +34,11 @@ void ClipMapper::Opset7() { bool has_max_tensor_input = HasInput("Max"); bool has_min_tensor_input = HasInput("Min"); + if (in_pir_mode) { + bool has_input = helper_->opset_version >= 11; + has_max_tensor_input = has_input; + has_min_tensor_input = has_input; + } if (has_max_tensor_input || has_min_tensor_input) { bool dtype_converted = false; @@ -53,7 +60,11 @@ void ClipMapper::Opset7() { } } else { float max_val; - GetAttr("max", &max_val); + if (in_pir_mode) { + TryGetInputValue("max", &max_val); + } else { + GetAttr("max", &max_val); + } max_name = helper_->Constant({}, GetOnnxDtype(dtype), max_val); } std::string min_name; @@ -65,7 +76,11 @@ void ClipMapper::Opset7() { } } else { float min_val; - GetAttr("min", &min_val); + if (in_pir_mode) { + TryGetInputValue("min", &min_val); + } else { + GetAttr("min", &min_val); + } min_name = helper_->Constant({}, GetOnnxDtype(dtype), min_val); } if (dtype_converted) { @@ -77,10 +92,14 @@ void ClipMapper::Opset7() { {output_info[0].name}); } } else { - float max_val; - GetAttr("max", &max_val); - float min_val; - GetAttr("min", &min_val); + float max_val, min_val; + if (in_pir_mode) { + TryGetInputValue("max", &max_val); + TryGetInputValue("min", &min_val); + } else { + GetAttr("max", &max_val); + GetAttr("min", &min_val); + } helper_->Clip(input_info[0].name, output_info[0].name, min_val, max_val, input_info[0].dtype); } diff --git a/paddle2onnx/mapper/tensor/clip.h b/paddle2onnx/mapper/tensor/clip.h index ebcb97967..32c11b924 100644 --- a/paddle2onnx/mapper/tensor/clip.h +++ b/paddle2onnx/mapper/tensor/clip.h @@ -25,6 +25,11 @@ class ClipMapper : public Mapper { ClipMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + ClipMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } int32_t GetMinOpsetVersion(bool verbose) override; void Opset7() override; }; diff --git a/paddle2onnx/mapper/tensor/logical_not.cc b/paddle2onnx/mapper/tensor/logical_not.cc index c1b79aba0..b11d671fc 100644 --- a/paddle2onnx/mapper/tensor/logical_not.cc +++ b/paddle2onnx/mapper/tensor/logical_not.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(logical_not, LogicalNotMapper) +REGISTER_PIR_MAPPER(logical_not, LogicalNotMapper) void LogicalNotMapper::Opset7() { auto input_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/logical_not.h b/paddle2onnx/mapper/tensor/logical_not.h index 755d3d95d..f8b2b3478 100644 --- a/paddle2onnx/mapper/tensor/logical_not.h +++ b/paddle2onnx/mapper/tensor/logical_not.h @@ -25,6 +25,11 @@ class LogicalNotMapper : public Mapper { LogicalNotMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + LogicalNotMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; diff --git a/paddle2onnx/mapper/tensor/meshgrid.cc b/paddle2onnx/mapper/tensor/meshgrid.cc index b855d1544..de9ee0c88 100644 --- a/paddle2onnx/mapper/tensor/meshgrid.cc +++ b/paddle2onnx/mapper/tensor/meshgrid.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(meshgrid, MeshgridMapper) +REGISTER_PIR_MAPPER(meshgrid, MeshgridMapper) void MeshgridMapper::Opset8() { auto x_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/meshgrid.h b/paddle2onnx/mapper/tensor/meshgrid.h index 5eeabdf72..60ddfebf3 100644 --- a/paddle2onnx/mapper/tensor/meshgrid.h +++ b/paddle2onnx/mapper/tensor/meshgrid.h @@ -27,6 +27,12 @@ class MeshgridMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { MarkAsExperimentalOp(); } + MeshgridMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + MarkAsExperimentalOp(); + } int32_t GetMinOpsetVersion(bool verbose) override { return 8; } void Opset8() override; diff --git a/paddle2onnx/mapper/tensor/not_equal.cc b/paddle2onnx/mapper/tensor/not_equal.cc index fc274bccc..8af3ccab6 100644 --- a/paddle2onnx/mapper/tensor/not_equal.cc +++ b/paddle2onnx/mapper/tensor/not_equal.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(not_equal, NotEqualMapper) +REGISTER_PIR_MAPPER(not_equal, NotEqualMapper) int32_t NotEqualMapper::GetMinOpsetVersion(bool verbose) { auto x_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/not_equal.h b/paddle2onnx/mapper/tensor/not_equal.h index 1c96a72ad..4492383d6 100644 --- a/paddle2onnx/mapper/tensor/not_equal.h +++ b/paddle2onnx/mapper/tensor/not_equal.h @@ -22,6 +22,11 @@ class NotEqualMapper : public Mapper { NotEqualMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + NotEqualMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } int32_t GetMinOpsetVersion(bool verbose) override; void Opset7() override; }; diff --git a/paddle2onnx/mapper/tensor/pow.cc b/paddle2onnx/mapper/tensor/pow.cc index 429b7e8d7..3549ccd34 100644 --- a/paddle2onnx/mapper/tensor/pow.cc +++ b/paddle2onnx/mapper/tensor/pow.cc @@ -18,6 +18,7 @@ namespace paddle2onnx { REGISTER_MAPPER(pow, PowMapper) +REGISTER_PIR_MAPPER(pow, PowMapper) void PowMapper::Opset7() { auto input_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/pow.h b/paddle2onnx/mapper/tensor/pow.h index 9128ca612..cb7fec311 100644 --- a/paddle2onnx/mapper/tensor/pow.h +++ b/paddle2onnx/mapper/tensor/pow.h @@ -27,6 +27,12 @@ class PowMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("factor", &factor_); } + PowMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("factor", &factor_); + } void Opset7() override; private: diff --git a/paddle2onnx/mapper/tensor/stack.cc b/paddle2onnx/mapper/tensor/stack.cc index 4c6f249db..719890a95 100644 --- a/paddle2onnx/mapper/tensor/stack.cc +++ b/paddle2onnx/mapper/tensor/stack.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(stack, StackMapper) +REGISTER_PIR_MAPPER(stack, StackMapper) void StackMapper::Opset7() { auto x_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/stack.h b/paddle2onnx/mapper/tensor/stack.h index 2297b1817..41bfa0379 100644 --- a/paddle2onnx/mapper/tensor/stack.h +++ b/paddle2onnx/mapper/tensor/stack.h @@ -24,6 +24,12 @@ class StackMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("axis", &axis_); } + StackMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + GetAttr("axis", &axis_); + } void Opset7() override; private: diff --git a/paddle2onnx/mapper/tensor/tile.cc b/paddle2onnx/mapper/tensor/tile.cc index 990775da9..3ca9c134c 100644 --- a/paddle2onnx/mapper/tensor/tile.cc +++ b/paddle2onnx/mapper/tensor/tile.cc @@ -16,6 +16,7 @@ namespace paddle2onnx { REGISTER_MAPPER(tile, TileMapper) +REGISTER_PIR_MAPPER(tile, TileMapper) void TileMapper::Opset7() { auto x_info = GetInput("X"); diff --git a/paddle2onnx/mapper/tensor/tile.h b/paddle2onnx/mapper/tensor/tile.h index f1904a742..6c3b88f39 100644 --- a/paddle2onnx/mapper/tensor/tile.h +++ b/paddle2onnx/mapper/tensor/tile.h @@ -22,6 +22,11 @@ class TileMapper : public Mapper { TileMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + TileMapper(const PaddlePirParser& p, OnnxHelper* helper, + int64_t op_id) + : Mapper(p, helper, op_id) { + in_pir_mode = true; + } void Opset7() override; }; diff --git a/paddle2onnx/parser/pir_parser.cc b/paddle2onnx/parser/pir_parser.cc index 2ba9eac33..8ebc5ffe0 100644 --- a/paddle2onnx/parser/pir_parser.cc +++ b/paddle2onnx/parser/pir_parser.cc @@ -93,26 +93,27 @@ phi::DataType TransToPhiDataType(pir::Type dtype) { } namespace paddle2onnx { - std::string PaddlePirParser::GenOpInputOutputName(const std::string& name) const - { + std::string PaddlePirParser::GenOpInputOutputName( + const std::string& name) const { std::string new_name = "p2o." + name; if(_name_counter.find(new_name) != _name_counter.end()) { _name_counter[new_name] += 1; - } - else { + } else { _name_counter[new_name] = 0; } new_name += "." + std::to_string(_name_counter[new_name]); return new_name; } - void PaddlePirParser::AddOpOutputName(pir::Operation *op, std::string var_name, int64_t output_idx) const { + void PaddlePirParser::AddOpOutputName(pir::Operation *op, + std::string var_name, + int64_t output_idx) const { if(_op_outputs.count(op) == 0) { int num_outputs = op->num_results(); _op_outputs[op] = std::vector(num_outputs, ""); } _op_outputs[op][output_idx] = var_name; } - + std::string PaddlePirParser::GetOpOutputName(const pir::Value& source) const { auto op = source.defining_op(); auto output_idx = source.dyn_cast().index(); @@ -139,7 +140,9 @@ namespace paddle2onnx { std::string var_name = op->attribute("name").AsString(); auto value = op->operand(0).source(); - AddOpOutputName(value.defining_op(), var_name, value.dyn_cast().index()); + AddOpOutputName(value.defining_op(), + var_name, + value.dyn_cast().index()); } } } @@ -163,29 +166,36 @@ namespace paddle2onnx { auto& normalizer = paddle::translator::OpNameNormalizer::instance(); const auto& op_name_mappings = normalizer.GetOpNameMappings(); const auto& op_arg_name_mappings = normalizer.GetOpArgNameMappings(); - const auto& op_mutable_attribute_infos = normalizer.GetOpMutableAttributeInfos(); + const auto& op_mutable_attribute_infos + = normalizer.GetOpMutableAttributeInfos(); for(auto& item : op_arg_name_mappings) { - std::string op_name = pir_op_name_prefix + (op_name_mappings.count(item.first) ? op_name_mappings.at(item.first) : item.first ); + std::string op_name = pir_op_name_prefix + + (op_name_mappings.count(item.first) + ? op_name_mappings.at(item.first) + : item.first ); std::unordered_map arg_name_mapping; for(auto& arg : item.second) { arg_name_mapping[arg.second] = arg.first; } _op_arg_name_mappings[op_name] = arg_name_mapping; } - + // mutable attibute name mappings for(auto& item : op_mutable_attribute_infos) { - std::string op_name = pir_op_name_prefix + (op_name_mappings.count(item.first) ? op_name_mappings.at(item.first) : item.first ); + std::string op_name = pir_op_name_prefix + + (op_name_mappings.count(item.first) + ? op_name_mappings.at(item.first) + : item.first ); for(auto& attr : item.second) { - for(auto& attr_item : attr.second) - { + for(auto& attr_item : attr.second) { _op_arg_name_mappings[op_name][attr_item] = attr.first; } } } } - std::string PaddlePirParser::GetOpArgName(int64_t op_id, std::string name) const { + std::string PaddlePirParser::GetOpArgName(int64_t op_id, + std::string name) const { auto& op = global_blocks_ops[op_id]; pir::IrContext* ctx = pir::IrContext::Instance(); std::string op_name = op->name(); @@ -197,23 +207,31 @@ namespace paddle2onnx { } std::string builtin_prefix = "builtin."; if(op_name.substr(0, builtin_prefix.size()) == builtin_prefix) { - Assert(false, "builtin op " + op_name + " is not supported by GetOpInputOutputName2Idx."); + Assert(false, + "builtin op " + + op_name + + " is not supported by GetOpInputOutputName2Idx."); } if(_op_arg_name_mappings.count(op_name)) { - name = _op_arg_name_mappings.at(op_name).count(name) ? _op_arg_name_mappings.at(op_name).at(name) : name; - } - else { - if(op_name[op_name.size() - 1] == '_') { - std::string temp_op_name = op_name.substr(0, op_name.size() - 1); - if(_op_arg_name_mappings.count(temp_op_name)) { - name = _op_arg_name_mappings.at(temp_op_name).count(name) ? _op_arg_name_mappings.at(temp_op_name).at(name) : name; + name = _op_arg_name_mappings.at(op_name).count(name) + ? _op_arg_name_mappings.at(op_name).at(name) + : name; + } else { + if(op_name[op_name.size() - 1] == '_') { + std::string temp_op_name = op_name.substr(0, op_name.size() - 1); + if(_op_arg_name_mappings.count(temp_op_name)) { + name = _op_arg_name_mappings.at(temp_op_name).count(name) + ? _op_arg_name_mappings.at(temp_op_name).at(name) + : name; + } } - } } return name; } - int32_t PaddlePirParser::GetOpInputOutputName2Idx(int64_t op_id, std::string name, bool is_input) const { + int32_t PaddlePirParser::GetOpInputOutputName2Idx(int64_t op_id, + std::string name, + bool is_input) const { auto& op = global_blocks_ops[op_id]; pir::IrContext* ctx = pir::IrContext::Instance(); std::string op_name = op->name(); @@ -225,11 +243,15 @@ namespace paddle2onnx { } pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); paddle::dialect::OpYamlInfoParser yaml_parser( - op_info.GetInterfaceImpl()->get_op_info_(op_name), + op_info + .GetInterfaceImpl() + ->get_op_info_(op_name), // paddle::dialect::IsLegacyOp(op_name)); false); name = GetOpArgName(op_id, name); - bool exist = is_input ? yaml_parser.InputName2Id().count(name) : yaml_parser.OutputName2Id().count(name); + bool exist = is_input + ? yaml_parser.InputName2Id().count(name) + : yaml_parser.OutputName2Id().count(name); if (!exist) { P2OLogger() << "Cannot find input/output name '" << name << "' in op yaml info of " << op_name << std::endl; @@ -241,7 +263,9 @@ namespace paddle2onnx { // common::errors::InvalidArgument( // "Cannot find input/output name '%s' in op yaml info of %s.", // name, op_name)); - return is_input ? yaml_parser.InputName2Id().at(name) : yaml_parser.OutputName2Id().at(name); + return is_input + ? yaml_parser.InputName2Id().at(name) + : yaml_parser.OutputName2Id().at(name); } bool PaddlePirParser::LoadProgram(const std::string& model) { @@ -256,7 +280,7 @@ bool PaddlePirParser::LoadProgram(const std::string& model) { } std::ostringstream print_stream; pir_program_.get()->Print(print_stream); - P2OLogger() << "PIR Program: \n" + P2OLogger() << "PIR Program: \n" << print_stream.str() << std::endl; return true; } @@ -416,7 +440,8 @@ void PaddlePirParser::GetGlobalBlocksOps() { } } -TensorInfo PaddlePirParser::GetTensorInfo(const std::string& name, const pir::Type& value_type) const { +TensorInfo PaddlePirParser::GetTensorInfo(const std::string& name, + const pir::Type& value_type) const { if (value_type.isa()) { TensorInfo info; // get info.name @@ -440,7 +465,8 @@ TensorInfo PaddlePirParser::GetTensorInfo(const std::string& name, const pir::Ty } } -std::vector PaddlePirParser::GetTensorInfo(const pir::Value& value) const { +std::vector PaddlePirParser::GetTensorInfo( + const pir::Value& value) const { std::vector results; if (value.type().isa()) { auto vec_type = value.type().cast(); @@ -477,9 +503,9 @@ void PaddlePirParser::GetGlobalBlockInputOutputInfo() { } } -bool PaddlePirParser::IsAttrVar(const pir::Operation *op, +bool PaddlePirParser::IsAttrVar(const pir::Operation *op, const int64_t &attr_id) const { - // TODO: For Resnet50, this interface always return false. + // TODO(qzylalala): For Resnet50, this interface always return false. return false; } @@ -493,7 +519,7 @@ bool PaddlePirParser::OpIsAttrVar(int64_t op_id, is_attr_var = true; break; } - i ++; + i++; } return is_attr_var; @@ -527,7 +553,16 @@ void PaddlePirParser::GetOpAttr(const pir::Operation* op, *res = pair.second.dyn_cast<::pir::Int32Attribute>().data(); } else if (pair.second.isa()) { *res = pair.second.dyn_cast<::pir::Int64Attribute>().data(); - } + } else if (pair.second.isa()) { + // a_dtype + auto type = op->result(0).type().cast().dtype(); + auto data_type = TransToPhiDataType(type); + auto it = pir_dtype_to_onnx_dtype.find(data_type); + if (it == pir_dtype_to_onnx_dtype.end()) { + std::cerr << "data_type not found" << std::endl; + } + *res = it->second; + } break; } } @@ -629,9 +664,9 @@ void PaddlePirParser::GetOpAttr(const pir::Operation* op, auto array_list = pair.second.dyn_cast<::pir::ArrayAttribute>().AsVector(); if (array_list.size() > 0) { - // TODO: Need double check. + // TODO(qzylalala): Need double check. PADDLE_ENFORCE_EQ( - array_list[0].isa<::pir::Int64Attribute>() + array_list[0].isa<::pir::Int64Attribute>() || array_list[0].isa<::pir::Int32Attribute>(), true, ::common::errors::Unimplemented( @@ -769,13 +804,17 @@ std::vector PaddlePirParser::GetOpOutput( } */ - bool PaddlePirParser::IsConstantTensor(int64_t op_id, int64_t input_idx) const { + bool PaddlePirParser::IsConstantTensor(int64_t op_id, + int64_t input_idx) const { PADDLE_ENFORCE_GT( input_idx, -1, common::errors::InvalidArgument( "input_idx should be greater than -1 in IsConstantTensor.")); // todo(wangmingkai02): need to check - return global_blocks_ops[op_id]->operand(input_idx).source().defining_op()->num_operands() == 0; + return global_blocks_ops[op_id]->operand(input_idx) + .source() + .defining_op() + ->num_operands() == 0; } } // namespace paddle2onnx diff --git a/tests/test_abs.py b/tests/test_abs.py index 4fdbc64b2..9b5950017 100644 --- a/tests/test_abs.py +++ b/tests/test_abs.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_abs_9(): """ api: paddle.abs @@ -41,14 +43,15 @@ def test_abs_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'abs', [9]) + obj = APIOnnx(op, "abs", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_abs_10(): """ api: paddle.abs @@ -57,14 +60,15 @@ def test_abs_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'abs', [10]) + obj = APIOnnx(op, "abs", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_abs_11(): """ api: paddle.abs @@ -73,14 +77,15 @@ def test_abs_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'abs', [11]) + obj = APIOnnx(op, "abs", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_abs_12(): """ api: paddle.abs @@ -89,9 +94,9 @@ def test_abs_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'abs', [12]) + obj = APIOnnx(op, "abs", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_auto_scan_bmm.py b/tests/test_auto_scan_bmm.py index 72d442ca1..60a77a088 100755 --- a/tests/test_auto_scan_bmm.py +++ b/tests/test_auto_scan_bmm.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -36,19 +35,17 @@ def forward(self, inputs1, inputs2): class TestBmmConvert(OPConvertAutoScanTest): """ api: paddle.bmm - OPset version: 7, 9, 15 + OPset version: 7, 9, 15 """ def sample_convert_config(self, draw): input1_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=3, max_size=3)) + st.lists(st.integers(min_value=10, max_value=20), min_size=3, max_size=3) + ) input2_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=3, max_size=3)) + st.lists(st.integers(min_value=10, max_value=20), min_size=3, max_size=3) + ) input2_shape[0] = input1_shape[0] input2_shape[1] = input1_shape[2] @@ -60,13 +57,14 @@ def sample_convert_config(self, draw): "test_data_shapes": [input1_shape, input2_shape], "test_data_types": [[dtype], [dtype]], "opset_version": [15], - "input_spec_shape": [] + "input_spec_shape": [], } models = Net(config) return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_cast.py b/tests/test_auto_scan_cast.py index a1169229c..efa490878 100755 --- a/tests/test_auto_scan_cast.py +++ b/tests/test_auto_scan_cast.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,17 +40,14 @@ class TestCastConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) - input_spec = [-1] * len(input_shape) - - dtype = draw( - st.sampled_from(["bool", "float32", "float64", "int32", "int64"])) + dtype = draw(st.sampled_from(["bool", "float32", "float64", "int32", "int64"])) output_dtype = draw( - st.sampled_from(["bool", "float32", "float64", "int32", "int64"])) + st.sampled_from(["bool", "float32", "float64", "int32", "int64"]) + ) config = { "op_names": ["cast"], @@ -66,6 +62,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_clip.py b/tests/test_auto_scan_clip.py index 3c0ea1060..e713892ef 100755 --- a/tests/test_auto_scan_clip.py +++ b/tests/test_auto_scan_clip.py @@ -13,12 +13,11 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st from onnxbase import randtool -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net0(BaseNet): @@ -94,11 +93,8 @@ class TestClipConvert0(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) - - input_spec = [-1] * len(input_shape) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -120,6 +116,7 @@ def sample_convert_config(self, draw): return (config0, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -132,11 +129,8 @@ class TestClipConvert1(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) - - input_spec = [-1] * len(input_shape) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -159,6 +153,7 @@ def generator_max(): return (config1, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -171,9 +166,8 @@ class TestClipConvert2(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -192,6 +186,7 @@ def sample_convert_config(self, draw): return (config2, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -204,11 +199,8 @@ class TestClipConvert3(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) - - input_spec = [-1] * len(input_shape) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -232,6 +224,7 @@ def generator_max(): return (config3, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -244,11 +237,8 @@ class TestClipConvert4(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) - - input_spec = [-1] * len(input_shape) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -265,6 +255,7 @@ def sample_convert_config(self, draw): return (config0, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_conv2d_transpose.py b/tests/test_auto_scan_conv2d_transpose.py index 3f2c4678c..e1b87ee1c 100755 --- a/tests/test_auto_scan_conv2d_transpose.py +++ b/tests/test_auto_scan_conv2d_transpose.py @@ -13,11 +13,11 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,7 +41,8 @@ def forward(self, inputs, weight): dilation=self.config["dilation"], groups=self.config["groups"], output_size=output_size, - data_format=self.config["data_format"]) + data_format=self.config["data_format"], + ) return x @@ -55,14 +56,12 @@ class TestConv2dTransposeConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=4, max_size=4)) + st.lists(st.integers(min_value=20, max_value=30), min_size=4, max_size=4) + ) kernel_size = draw( - st.lists( - st.integers( - min_value=1, max_value=7), min_size=4, max_size=4)) + st.lists(st.integers(min_value=1, max_value=7), min_size=4, max_size=4) + ) data_format = "NCHW" @@ -78,9 +77,8 @@ def sample_convert_config(self, draw): kernel_size[0] = groups strides = draw( - st.lists( - st.integers( - min_value=1, max_value=5), min_size=1, max_size=2)) + st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=2) + ) if len(strides) == 1: strides = strides[0] if strides > kernel_size[2]: @@ -114,20 +112,26 @@ def sample_convert_config(self, draw): np.array( draw( st.lists( - st.integers( - min_value=1, max_value=5), + st.integers(min_value=1, max_value=5), min_size=2, - max_size=2))), - axis=0).tolist() + max_size=2, + ) + ) + ), + axis=0, + ).tolist() padding2 = np.expand_dims( np.array( draw( st.lists( - st.integers( - min_value=1, max_value=5), + st.integers(min_value=1, max_value=5), min_size=2, - max_size=2))), - axis=0).tolist() + max_size=2, + ) + ) + ), + axis=0, + ).tolist() if data_format == "NCHW": padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 else: @@ -140,10 +144,9 @@ def sample_convert_config(self, draw): if draw(st.booleans()): padding = draw( st.lists( - st.integers( - min_value=1, max_value=5), - min_size=2, - max_size=2)) + st.integers(min_value=1, max_value=5), min_size=2, max_size=2 + ) + ) padding_1_1 = padding[0] padding_1_2 = padding[0] padding_2_1 = padding[1] @@ -151,19 +154,17 @@ def sample_convert_config(self, draw): else: padding = draw( st.lists( - st.integers( - min_value=1, max_value=5), - min_size=4, - max_size=4)) + st.integers(min_value=1, max_value=5), min_size=4, max_size=4 + ) + ) padding_1_1 = padding[0] padding_1_2 = padding[1] padding_2_1 = padding[2] padding_2_2 = padding[3] dilations = draw( - st.lists( - st.integers( - min_value=1, max_value=3), min_size=1, max_size=2)) + st.lists(st.integers(min_value=1, max_value=3), min_size=1, max_size=2) + ) if len(dilations) == 1: dilations = dilations[0] dilations_1 = dilations @@ -177,13 +178,19 @@ def sample_convert_config(self, draw): output_size = None if draw(st.booleans()): output_size_1 = ( - input_shape[2] - 1 - ) * stride_1 - padding_1_1 - padding_1_2 + dilations_1 * ( - kernel_size[2] - 1) + 1 + (input_shape[2] - 1) * stride_1 + - padding_1_1 + - padding_1_2 + + dilations_1 * (kernel_size[2] - 1) + + 1 + ) output_size_2 = ( - input_shape[3] - 1 - ) * stride_2 - padding_2_1 - padding_2_2 + dilations_2 * ( - kernel_size[3] - 1) + 1 + (input_shape[3] - 1) * stride_2 + - padding_2_1 + - padding_2_2 + + dilations_2 * (kernel_size[3] - 1) + + 1 + ) if output_size_1 == output_size_2: output_size = output_size_1 else: @@ -194,7 +201,7 @@ def sample_convert_config(self, draw): config = { "op_names": ["conv2d_transpose"], "test_data_shapes": [input_shape, kernel_size], - "test_data_types": [['float32'], ['float32']], + "test_data_types": [["float32"], ["float32"]], "opset_version": [7, 9, 15], "input_spec_shape": [[-1, input_shape[1], -1, -1], kernel_size], "data_format": data_format, @@ -207,13 +214,14 @@ def sample_convert_config(self, draw): "delta": 1e-4, "rtol": 1e-4, "output_size": output_size, - "tensor_attr": tensor_attr + "tensor_attr": tensor_attr, } models = Net(config) return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_gelu.py b/tests/test_auto_scan_gelu.py index 16463c37d..a2bd11e8a 100644 --- a/tests/test_auto_scan_gelu.py +++ b/tests/test_auto_scan_gelu.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,9 +40,8 @@ class TestGeluConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=5, max_value=20), min_size=0, max_size=4)) + st.lists(st.integers(min_value=5, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32", "float64"])) @@ -59,6 +57,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_group_norm.py b/tests/test_auto_scan_group_norm.py index 0217f4652..7501431ff 100755 --- a/tests/test_auto_scan_group_norm.py +++ b/tests/test_auto_scan_group_norm.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -27,17 +26,18 @@ class Net(BaseNet): def __init__(self, config=None): super(Net, self).__init__(config) - groups = self.config['groups'] - epsilon = self.config['epsilon'] - num_channels = self.config['num_channels'] - data_format = self.config['data_format'] + groups = self.config["groups"] + epsilon = self.config["epsilon"] + num_channels = self.config["num_channels"] + data_format = self.config["data_format"] self.group_norm = paddle.nn.GroupNorm( num_groups=groups, num_channels=num_channels, epsilon=epsilon, - weight_attr=None if self.config['has_weight_attr'] else False, - bias_attr=None if self.config['has_bias_attr'] else False, - data_format=data_format) + weight_attr=None if self.config["has_weight_attr"] else False, + bias_attr=None if self.config["has_bias_attr"] else False, + data_format=data_format, + ) def forward(self, inputs): """ @@ -55,9 +55,8 @@ class TestGroupNormConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=4, max_value=10), min_size=4, max_size=4)) + st.lists(st.integers(min_value=4, max_value=10), min_size=4, max_size=4) + ) dtype = draw(st.sampled_from(["float32"])) data_format = draw(st.sampled_from(["NCHW"])) @@ -84,6 +83,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_leakyrelu.py b/tests/test_auto_scan_leakyrelu.py index 0ffe2fbda..6375debd0 100644 --- a/tests/test_auto_scan_leakyrelu.py +++ b/tests/test_auto_scan_leakyrelu.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -30,7 +29,8 @@ def forward(self, inputs): forward """ x = paddle.nn.functional.leaky_relu( - inputs, negative_slope=self.config["negative_slope"]) + inputs, negative_slope=self.config["negative_slope"] + ) return x @@ -42,9 +42,8 @@ class TestLeakyreluConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["float32"])) negative_slope = draw(st.floats(min_value=0, max_value=1)) @@ -62,6 +61,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_meshgrid.py b/tests/test_auto_scan_meshgrid.py index 3c891dcc5..4b24dab1a 100644 --- a/tests/test_auto_scan_meshgrid.py +++ b/tests/test_auto_scan_meshgrid.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,14 +40,12 @@ class TestMeshgridConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape1 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape2 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) @@ -64,6 +61,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -89,19 +87,16 @@ class TestMeshgridConvert1(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape1 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape2 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape3 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) @@ -117,6 +112,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -142,37 +138,35 @@ class TestMeshgridConvert2(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape1 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape2 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape3 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape4 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) input_shape5 = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=1, max_size=1)) + st.lists(st.integers(min_value=4, max_value=8), min_size=1, max_size=1) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) config = { "op_names": ["meshgrid"], "test_data_shapes": [ - input_shape1, input_shape2, input_shape3, input_shape4, - input_shape5 + input_shape1, + input_shape2, + input_shape3, + input_shape4, + input_shape5, ], "test_data_types": [[dtype], [dtype], [dtype], [dtype], [dtype]], "opset_version": [8, 9, 15], @@ -183,6 +177,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_pow.py b/tests/test_auto_scan_pow.py index 26c5e7d67..db774464a 100644 --- a/tests/test_auto_scan_pow.py +++ b/tests/test_auto_scan_pow.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,9 +40,8 @@ class TestPowConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=10, max_value=20), min_size=0, max_size=4)) + st.lists(st.integers(min_value=10, max_value=20), min_size=0, max_size=4) + ) dtype = draw(st.sampled_from(["int32", "int64", "float32", "float64"])) y = draw(st.integers(min_value=1, max_value=5)) @@ -54,13 +52,14 @@ def sample_convert_config(self, draw): "test_data_types": [[dtype]], "opset_version": [7, 9, 15], "input_spec_shape": [], - "y": y + "y": y, } models = Net(config) return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_shape.py b/tests/test_auto_scan_shape.py index 36e1eac7b..62b513938 100755 --- a/tests/test_auto_scan_shape.py +++ b/tests/test_auto_scan_shape.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -41,9 +40,8 @@ class TestShapeConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=0, max_size=5)) + st.lists(st.integers(min_value=4, max_value=8), min_size=0, max_size=5) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) @@ -59,6 +57,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_squeeze2.py b/tests/test_auto_scan_squeeze2.py index bc087d474..c8027bdfb 100755 --- a/tests/test_auto_scan_squeeze2.py +++ b/tests/test_auto_scan_squeeze2.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -29,13 +28,13 @@ def forward(self, inputs): """ forward """ - if self.config["tensor_attr"] and self.config['axis'] is not None: - if isinstance(self.config['axis'], list): - axis = [paddle.to_tensor(i) for i in self.config['axis']] + if self.config["tensor_attr"] and self.config["axis"] is not None: + if isinstance(self.config["axis"], list): + axis = [paddle.to_tensor(i) for i in self.config["axis"]] else: - axis = paddle.to_tensor(self.config['axis']) + axis = paddle.to_tensor(self.config["axis"]) else: - axis = self.config['axis'] + axis = self.config["axis"] x = paddle.squeeze(inputs, axis=axis) return x @@ -48,13 +47,14 @@ class TestSqueezeConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=4, max_value=10), min_size=3, max_size=5)) + st.lists(st.integers(min_value=4, max_value=10), min_size=3, max_size=5) + ) dtype = draw(st.sampled_from(["bool", "float32", "float64", "int32", "int64"])) - axis = draw(st.integers(min_value=-len(input_shape), max_value=len(input_shape) - 1)) + axis = draw( + st.integers(min_value=-len(input_shape), max_value=len(input_shape) - 1) + ) if axis == 0: axis = [0, -1] else: @@ -76,13 +76,14 @@ def sample_convert_config(self, draw): "opset_version": [7, 9, 15], "input_spec_shape": input_spec_shape, "axis": axis, - "tensor_attr": tensor_attr + "tensor_attr": tensor_attr, } models = Net(config) return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_stack.py b/tests/test_auto_scan_stack.py index a223d03e9..359b9e9bb 100755 --- a/tests/test_auto_scan_stack.py +++ b/tests/test_auto_scan_stack.py @@ -13,11 +13,10 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st -import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -29,7 +28,7 @@ def forward(self, inputs1, inputs2): """ forward """ - x = paddle.stack([inputs1, inputs2], axis=self.config['axis']) + x = paddle.stack([inputs1, inputs2], axis=self.config["axis"]) return x @@ -41,16 +40,14 @@ class TestStackConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=4, max_value=8), min_size=0, max_size=5)) + st.lists(st.integers(min_value=4, max_value=8), min_size=0, max_size=5) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) if len(input_shape) > 0: axis = draw( - st.integers( - min_value=-len(input_shape), max_value=len(input_shape) - - 1)) + st.integers(min_value=-len(input_shape), max_value=len(input_shape) - 1) + ) else: axis = 0 @@ -67,6 +64,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_auto_scan_tile.py b/tests/test_auto_scan_tile.py index 0b541442e..3cca92bb5 100755 --- a/tests/test_auto_scan_tile.py +++ b/tests/test_auto_scan_tile.py @@ -13,11 +13,11 @@ # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet -from hypothesis import reproduce_failure import hypothesis.strategies as st import numpy as np import unittest import paddle +from onnxbase import _test_with_pir class Net(BaseNet): @@ -29,13 +29,14 @@ def forward(self, inputs): """ forward """ - repeat_times = self.config['repeat_times'] - if self.config['repeat_times_dtype'] == "list": + repeat_times = self.config["repeat_times"] + if self.config["repeat_times_dtype"] == "list": repeat_times = repeat_times - elif self.config['repeat_times_dtype'] == "Tensor": + elif self.config["repeat_times_dtype"] == "Tensor": repeat_times = paddle.to_tensor( - np.array(repeat_times).astype(self.config['shape_dtype'])) - elif self.config['repeat_times_dtype'] == "int": + np.array(repeat_times).astype(self.config["shape_dtype"]) + ) + elif self.config["repeat_times_dtype"] == "int": repeat_times = [repeat_times[0]] x = paddle.tile(inputs, repeat_times=repeat_times) return x @@ -49,9 +50,8 @@ class TestTileConvert(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=2, max_value=5), min_size=0, max_size=5)) + st.lists(st.integers(min_value=2, max_value=5), min_size=0, max_size=5) + ) dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) # when repeat_times_dtype is tensor has a bug @@ -77,6 +77,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) @@ -90,10 +91,7 @@ def forward(self, inputs): """ forward """ - repeat_times = [ - 4, paddle.to_tensor( - 3, dtype=self.config['shape_dtype']), 2, 1 - ] + repeat_times = [4, paddle.to_tensor(3, dtype=self.config["shape_dtype"]), 2, 1] # repeat_times = [4, 3, 2, 1] # repeat_times = paddle.to_tensor( # np.array([4, 3, 2, 1]).astype('int32')) @@ -111,9 +109,8 @@ class TestTileConvert1(OPConvertAutoScanTest): def sample_convert_config(self, draw): input_shape = draw( - st.lists( - st.integers( - min_value=2, max_value=5), min_size=0, max_size=5)) + st.lists(st.integers(min_value=2, max_value=5), min_size=0, max_size=5) + ) input_shape = [4, 3, 2, 1] dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) shape_dtype = draw(st.sampled_from(["int32", "int64"])) @@ -140,6 +137,7 @@ def sample_convert_config(self, draw): return (config, models) + @_test_with_pir def test(self): self.run_and_statis(max_examples=30) diff --git a/tests/test_bmm.py b/tests/test_bmm.py index 4a626272c..4020a02dc 100644 --- a/tests/test_bmm.py +++ b/tests/test_bmm.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -30,9 +31,10 @@ def forward(self, inputs, inputs_): forward """ x = paddle.bmm(inputs, inputs_) - return x.astype('float32') + return x.astype("float32") +@_test_with_pir def test_bmm_9(): """ api: paddle.bmm @@ -41,15 +43,16 @@ def test_bmm_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'bmm', [9]) + obj = APIOnnx(op, "bmm", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_bmm_10(): """ api: paddle.bmm @@ -58,15 +61,16 @@ def test_bmm_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'bmm', [10]) + obj = APIOnnx(op, "bmm", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_bmm_11(): """ api: paddle.bmm @@ -75,15 +79,16 @@ def test_bmm_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'bmm', [11]) + obj = APIOnnx(op, "bmm", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_bmm_12(): """ api: paddle.bmm @@ -92,10 +97,10 @@ def test_bmm_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'bmm', [12]) + obj = APIOnnx(op, "bmm", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_cast.py b/tests/test_cast.py index f28e8100a..1e08486d0 100755 --- a/tests/test_cast.py +++ b/tests/test_cast.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -29,10 +30,11 @@ def forward(self, inputs): """ forward """ - x = paddle.cast(inputs, 'float64') + x = paddle.cast(inputs, "float64") return x +@_test_with_pir def test_cast_9(): """ api: paddle.cast @@ -41,13 +43,15 @@ def test_cast_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cast', [9]) + obj = APIOnnx(op, "cast", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cast_10(): """ api: paddle.cast @@ -56,13 +60,15 @@ def test_cast_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cast', [10]) + obj = APIOnnx(op, "cast", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cast_11(): """ api: paddle.cast @@ -71,13 +77,15 @@ def test_cast_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cast', [11]) + obj = APIOnnx(op, "cast", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cast_12(): """ api: paddle.cast @@ -86,8 +94,9 @@ def test_cast_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cast', [12]) + obj = APIOnnx(op, "cast", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_clip.py b/tests/test_clip.py index 1d00dd309..d0ec2eab0 100644 --- a/tests/test_clip.py +++ b/tests/test_clip.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_clip_9(): """ api: paddle.clip @@ -41,13 +43,15 @@ def test_clip_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'clip', [9]) + obj = APIOnnx(op, "clip", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_clip_10(): """ api: paddle.clip @@ -56,13 +60,15 @@ def test_clip_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'clip', [10]) + obj = APIOnnx(op, "clip", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_clip_11(): """ api: paddle.clip @@ -71,13 +77,15 @@ def test_clip_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'clip', [11]) + obj = APIOnnx(op, "clip", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_clip_12(): """ api: paddle.clip @@ -86,8 +94,9 @@ def test_clip_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'clip', [12]) + obj = APIOnnx(op, "clip", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_cos.py b/tests/test_cos.py index 911e6d78d..aeb0bb607 100644 --- a/tests/test_cos.py +++ b/tests/test_cos.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_cos_9(): """ api: paddle.cos @@ -41,13 +43,15 @@ def test_cos_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cos', [9]) + obj = APIOnnx(op, "cos", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cos_10(): """ api: paddle.cos @@ -56,13 +60,15 @@ def test_cos_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cos', [10]) + obj = APIOnnx(op, "cos", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cos_11(): """ api: paddle.cos @@ -71,13 +77,15 @@ def test_cos_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cos', [11]) + obj = APIOnnx(op, "cos", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_cos_12(): """ api: paddle.cos @@ -86,8 +94,9 @@ def test_cos_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'cos', [12]) + obj = APIOnnx(op, "cos", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_exp.py b/tests/test_exp.py index 226b779fe..25f7b9793 100644 --- a/tests/test_exp.py +++ b/tests/test_exp.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_exp_9(): """ api: paddle.exp @@ -41,13 +43,15 @@ def test_exp_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'exp', [9]) + obj = APIOnnx(op, "exp", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_exp_10(): """ api: paddle.exp @@ -56,13 +60,15 @@ def test_exp_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'exp', [10]) + obj = APIOnnx(op, "exp", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_exp_11(): """ api: paddle.exp @@ -71,13 +77,15 @@ def test_exp_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'exp', [11]) + obj = APIOnnx(op, "exp", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_exp_12(): """ api: paddle.exp @@ -86,8 +94,9 @@ def test_exp_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'exp', [12]) + obj = APIOnnx(op, "exp", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_floor.py b/tests/test_floor.py index afa107d06..d11f2f925 100644 --- a/tests/test_floor.py +++ b/tests/test_floor.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_floor_9(): """ api: paddle.floor @@ -41,14 +43,15 @@ def test_floor_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'floor', [9]) + obj = APIOnnx(op, "floor", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_floor_10(): """ api: paddle.floor @@ -57,14 +60,15 @@ def test_floor_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'floor', [10]) + obj = APIOnnx(op, "floor", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_floor_11(): """ api: paddle.floor @@ -73,14 +77,15 @@ def test_floor_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'floor', [11]) + obj = APIOnnx(op, "floor", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_floor_12(): """ api: paddle.floor @@ -89,9 +94,9 @@ def test_floor_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'floor', [12]) + obj = APIOnnx(op, "floor", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_gelu.py b/tests/test_gelu.py index ca4b3914b..b7d37be39 100644 --- a/tests/test_gelu.py +++ b/tests/test_gelu.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_gelu_9(): """ api: paddle.gelu @@ -41,14 +43,15 @@ def test_gelu_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'gelu', [9]) + obj = APIOnnx(op, "gelu", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_gelu_10(): """ api: paddle.gelu @@ -57,14 +60,15 @@ def test_gelu_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'gelu', [10]) + obj = APIOnnx(op, "gelu", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_gelu_11(): """ api: paddle.gelu @@ -73,14 +77,15 @@ def test_gelu_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'gelu', [11]) + obj = APIOnnx(op, "gelu", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_gelu_12(): """ api: paddle.gelu @@ -89,9 +94,9 @@ def test_gelu_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'gelu', [12]) + obj = APIOnnx(op, "gelu", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_leaky_relu.py b/tests/test_leaky_relu.py index 3ab6ae5fd..cc88b7a2a 100644 --- a/tests/test_leaky_relu.py +++ b/tests/test_leaky_relu.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_leaky_relu_9(): """ api: paddle.leaky_relu @@ -41,13 +43,15 @@ def test_leaky_relu_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'leaky_relu', [9]) + obj = APIOnnx(op, "leaky_relu", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_leaky_relu_10(): """ api: paddle.leaky_relu @@ -56,13 +60,15 @@ def test_leaky_relu_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'leaky_relu', [10]) + obj = APIOnnx(op, "leaky_relu", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_leaky_relu_11(): """ api: paddle.leaky_relu @@ -71,13 +77,15 @@ def test_leaky_relu_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'leaky_relu', [11]) + obj = APIOnnx(op, "leaky_relu", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_leaky_relu_12(): """ api: paddle.leaky_relu @@ -86,8 +94,9 @@ def test_leaky_relu_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'leaky_relu', [12]) + obj = APIOnnx(op, "leaky_relu", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_logical_and.py b/tests/test_logical_and.py index 3357e8026..fb3abccc9 100644 --- a/tests/test_logical_and.py +++ b/tests/test_logical_and.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs, inputs_): return x +@_test_with_pir def test_logical_and_10(): """ api: paddle.logical_and @@ -41,14 +43,16 @@ def test_logical_and_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_and', [10]) + obj = APIOnnx(op, "logical_and", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('bool'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("bool")), + ) obj.run() +@_test_with_pir def test_logical_and_11(): """ api: paddle.logical_and @@ -57,14 +61,16 @@ def test_logical_and_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_and', [11]) + obj = APIOnnx(op, "logical_and", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('bool'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("bool")), + ) obj.run() +@_test_with_pir def test_logical_and_12(): """ api: paddle.logical_and @@ -73,9 +79,10 @@ def test_logical_and_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_and', [12]) + obj = APIOnnx(op, "logical_and", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('bool'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("bool")), + ) obj.run() diff --git a/tests/test_logical_not.py b/tests/test_logical_not.py index 522633ddc..86be25f0e 100644 --- a/tests/test_logical_not.py +++ b/tests/test_logical_not.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_logical_not_10(): """ api: paddle.logical_not @@ -41,13 +43,14 @@ def test_logical_not_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_not', [10]) + obj = APIOnnx(op, "logical_not", [10]) obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool'))) + "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")) + ) obj.run() +@_test_with_pir def test_logical_not_11(): """ api: paddle.logical_not @@ -56,13 +59,14 @@ def test_logical_not_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_not', [11]) + obj = APIOnnx(op, "logical_not", [11]) obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool'))) + "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")) + ) obj.run() +@_test_with_pir def test_logical_not_12(): """ api: paddle.logical_not @@ -71,8 +75,8 @@ def test_logical_not_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'logical_not', [12]) + obj = APIOnnx(op, "logical_not", [12]) obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool'))) + "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("bool")) + ) obj.run() diff --git a/tests/test_meshgrid.py b/tests/test_meshgrid.py index 39cf3fd95..b043850a9 100644 --- a/tests/test_meshgrid.py +++ b/tests/test_meshgrid.py @@ -14,7 +14,7 @@ import paddle from onnxbase import APIOnnx -from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +33,7 @@ def forward(self, inputs, _inputs): return x + y +@_test_with_pir def test_meshgrid_base(): """ api: paddle.meshgrid @@ -41,13 +42,16 @@ def test_meshgrid_base(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'meshgrid', [11, 12]) - obj.set_input_data("input_data", - paddle.to_tensor([1, 2, 3]).astype('float32'), - paddle.to_tensor([4, 5, 6]).astype('float32')) + obj = APIOnnx(op, "meshgrid", [11, 12]) + obj.set_input_data( + "input_data", + paddle.to_tensor([1, 2, 3]).astype("float32"), + paddle.to_tensor([4, 5, 6]).astype("float32"), + ) obj.run() +@_test_with_pir def test_meshgrid_unlikeSize(): """ api: paddle.meshgrid @@ -56,10 +60,12 @@ def test_meshgrid_unlikeSize(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'meshgrid', [11, 12]) - obj.set_input_data("input_data", - paddle.to_tensor([1, 2, 3]).astype('float32'), - paddle.to_tensor([5, 6]).astype('float32')) + obj = APIOnnx(op, "meshgrid", [11, 12]) + obj.set_input_data( + "input_data", + paddle.to_tensor([1, 2, 3]).astype("float32"), + paddle.to_tensor([5, 6]).astype("float32"), + ) obj.run() @@ -79,6 +85,7 @@ def forward(self, inputs, _inputs, _input): return x + y + z +@_test_with_pir def test_meshgrid_3(): """ api: paddle.meshgrid @@ -87,9 +94,11 @@ def test_meshgrid_3(): op = Net_3() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'meshgrid', [11, 12]) - obj.set_input_data("input_data", - paddle.to_tensor([1, 2, 3]).astype('float32'), - paddle.to_tensor([5, 6]).astype('float32'), - paddle.to_tensor([1, 2, 3, 4]).astype('float32')) + obj = APIOnnx(op, "meshgrid", [11, 12]) + obj.set_input_data( + "input_data", + paddle.to_tensor([1, 2, 3]).astype("float32"), + paddle.to_tensor([5, 6]).astype("float32"), + paddle.to_tensor([1, 2, 3, 4]).astype("float32"), + ) obj.run() diff --git a/tests/test_pow.py b/tests/test_pow.py index 1283fe2d0..ce09250b1 100644 --- a/tests/test_pow.py +++ b/tests/test_pow.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_pow_9(): """ api: paddle.pow @@ -41,14 +43,15 @@ def test_pow_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'pow', [9]) + obj = APIOnnx(op, "pow", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_pow_10(): """ api: paddle.pow @@ -57,14 +60,15 @@ def test_pow_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'pow', [10]) + obj = APIOnnx(op, "pow", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_pow_11(): """ api: paddle.pow @@ -73,14 +77,15 @@ def test_pow_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'pow', [11]) + obj = APIOnnx(op, "pow", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_pow_12(): """ api: paddle.pow @@ -89,9 +94,9 @@ def test_pow_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'pow', [12]) + obj = APIOnnx(op, "pow", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_shape.py b/tests/test_shape.py index 4bc340efe..5111db273 100644 --- a/tests/test_shape.py +++ b/tests/test_shape.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_shape_9(): """ api: paddle.shape @@ -41,13 +43,15 @@ def test_shape_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'shape', [9]) + obj = APIOnnx(op, "shape", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_shape_10(): """ api: paddle.shape @@ -56,13 +60,15 @@ def test_shape_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'shape', [10]) + obj = APIOnnx(op, "shape", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_shape_11(): """ api: paddle.shape @@ -71,13 +77,15 @@ def test_shape_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'shape', [11]) + obj = APIOnnx(op, "shape", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_shape_12(): """ api: paddle.shape @@ -86,8 +94,9 @@ def test_shape_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'shape', [12]) + obj = APIOnnx(op, "shape", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_sigmoid.py b/tests/test_sigmoid.py index c0c3f6582..62bebc98c 100644 --- a/tests/test_sigmoid.py +++ b/tests/test_sigmoid.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_sigmoid_7(): """ api: paddle.nn.functional.sigmoid @@ -41,8 +43,9 @@ def test_sigmoid_7(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'sigmoid', [7]) + obj = APIOnnx(op, "sigmoid", [7]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) - obj.run() \ No newline at end of file + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) + obj.run() diff --git a/tests/test_sin.py b/tests/test_sin.py index 243e64fdb..3a3683262 100644 --- a/tests/test_sin.py +++ b/tests/test_sin.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_sin_9(): """ api: paddle.sin @@ -41,11 +43,13 @@ def test_sin_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'sin', [7]) + obj = APIOnnx(op, "sin", [7]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() + if __name__ == "__main__": test_sin_9() diff --git a/tests/test_stack.py b/tests/test_stack.py index 2a3eca92a..7acf78d35 100644 --- a/tests/test_stack.py +++ b/tests/test_stack.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs, inputs_): return x +@_test_with_pir def test_stack_9(): """ api: paddle.stack @@ -41,14 +43,16 @@ def test_stack_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'stack', [9]) + obj = APIOnnx(op, "stack", [9]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_stack_10(): """ api: paddle.stack @@ -57,14 +61,16 @@ def test_stack_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'stack', [10]) + obj = APIOnnx(op, "stack", [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_stack_11(): """ api: paddle.stack @@ -73,14 +79,16 @@ def test_stack_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'stack', [11]) + obj = APIOnnx(op, "stack", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_stack_12(): """ api: paddle.stack @@ -89,9 +97,10 @@ def test_stack_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'stack', [12]) + obj = APIOnnx(op, "stack", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')), - paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype("float32")), + ) obj.run() diff --git a/tests/test_swish.py b/tests/test_swish.py index 3e9477c71..8411e942b 100644 --- a/tests/test_swish.py +++ b/tests/test_swish.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_swish_9(): """ api: paddle.swish @@ -41,14 +43,15 @@ def test_swish_9(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'swish', [9]) + obj = APIOnnx(op, "swish", [9]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_swish_10(): """ api: paddle.swish @@ -57,14 +60,15 @@ def test_swish_10(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'swish', [10]) + obj = APIOnnx(op, "swish", [10]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_swish_11(): """ api: paddle.swish @@ -73,14 +77,15 @@ def test_swish_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'swish', [11]) + obj = APIOnnx(op, "swish", [11]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() +@_test_with_pir def test_swish_12(): """ api: paddle.swish @@ -89,9 +94,9 @@ def test_swish_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'swish', [12]) + obj = APIOnnx(op, "swish", [12]) obj.set_input_data( "input_data", - paddle.to_tensor( - randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype("float32")), + ) obj.run() diff --git a/tests/test_tile.py b/tests/test_tile.py index a506323a9..ffc2b1090 100644 --- a/tests/test_tile.py +++ b/tests/test_tile.py @@ -15,6 +15,7 @@ import paddle from onnxbase import APIOnnx from onnxbase import randtool +from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,6 +34,7 @@ def forward(self, inputs): return x +@_test_with_pir def test_tile_11(): """ api: paddle.tile @@ -41,13 +43,15 @@ def test_tile_11(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'tile', [11]) + obj = APIOnnx(op, "tile", [11]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() +@_test_with_pir def test_tile_12(): """ api: paddle.tile @@ -56,8 +60,9 @@ def test_tile_12(): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'tile', [12]) + obj = APIOnnx(op, "tile", [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype("float32")), + ) obj.run() From 5202fb1c71cbeb2d00b67c8b72b4a4fdeeab880c Mon Sep 17 00:00:00 2001 From: qzylalala Date: Mon, 21 Oct 2024 22:18:18 +0800 Subject: [PATCH 2/5] [OSPP][PIR] adjust to new REGISTER_PIR_MAPPER --- paddle2onnx/mapper/activation/sigmoid.h | 8 +++++--- paddle2onnx/mapper/activation/swish.h | 8 +++++--- paddle2onnx/mapper/nn/conv2d_transpose.h | 8 +++++--- paddle2onnx/mapper/nn/group_norm.h | 8 +++++--- paddle2onnx/mapper/nn/shape.h | 8 +++++--- paddle2onnx/mapper/tensor/bmm.h | 8 +++++--- paddle2onnx/mapper/tensor/cast.h | 8 +++++--- paddle2onnx/mapper/tensor/clip.h | 8 +++++--- paddle2onnx/mapper/tensor/logical_not.h | 8 +++++--- paddle2onnx/mapper/tensor/meshgrid.h | 8 +++++--- paddle2onnx/mapper/tensor/not_equal.h | 8 +++++--- paddle2onnx/mapper/tensor/pow.h | 8 +++++--- paddle2onnx/mapper/tensor/stack.h | 8 +++++--- paddle2onnx/mapper/tensor/tile.h | 8 +++++--- 14 files changed, 70 insertions(+), 42 deletions(-) diff --git a/paddle2onnx/mapper/activation/sigmoid.h b/paddle2onnx/mapper/activation/sigmoid.h index c1121b023..2d5b8a7be 100644 --- a/paddle2onnx/mapper/activation/sigmoid.h +++ b/paddle2onnx/mapper/activation/sigmoid.h @@ -26,9 +26,11 @@ class SigmoidMapper : public Mapper { SigmoidMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - SigmoidMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + SigmoidMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } void Opset7() override; diff --git a/paddle2onnx/mapper/activation/swish.h b/paddle2onnx/mapper/activation/swish.h index 14d5f3efb..4332d9784 100644 --- a/paddle2onnx/mapper/activation/swish.h +++ b/paddle2onnx/mapper/activation/swish.h @@ -27,9 +27,11 @@ class SwishMapper : public Mapper { SwishMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - SwishMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + SwishMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } void Opset7() override; diff --git a/paddle2onnx/mapper/nn/conv2d_transpose.h b/paddle2onnx/mapper/nn/conv2d_transpose.h index ca8657cb9..e787126bf 100755 --- a/paddle2onnx/mapper/nn/conv2d_transpose.h +++ b/paddle2onnx/mapper/nn/conv2d_transpose.h @@ -46,9 +46,11 @@ class Conv2dTransposeMapper : public Mapper { } } - Conv2dTransposeMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + Conv2dTransposeMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; GetAttr("groups", &groups_); GetAttr("dilations", &dilations_); diff --git a/paddle2onnx/mapper/nn/group_norm.h b/paddle2onnx/mapper/nn/group_norm.h index 5056f2b24..7a4bdb494 100755 --- a/paddle2onnx/mapper/nn/group_norm.h +++ b/paddle2onnx/mapper/nn/group_norm.h @@ -28,9 +28,11 @@ class GroupNormMapper : public Mapper { GetAttr("groups", &groups_); GetAttr("epsilon", &epsilon_); } - GroupNormMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + GroupNormMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; GetAttr("groups", &groups_); GetAttr("epsilon", &epsilon_); diff --git a/paddle2onnx/mapper/nn/shape.h b/paddle2onnx/mapper/nn/shape.h index 20d98a955..4c2c4da1d 100644 --- a/paddle2onnx/mapper/nn/shape.h +++ b/paddle2onnx/mapper/nn/shape.h @@ -25,9 +25,11 @@ class ShapeMapper : public Mapper { ShapeMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - ShapeMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + ShapeMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } diff --git a/paddle2onnx/mapper/tensor/bmm.h b/paddle2onnx/mapper/tensor/bmm.h index e9c70214b..1288e7101 100644 --- a/paddle2onnx/mapper/tensor/bmm.h +++ b/paddle2onnx/mapper/tensor/bmm.h @@ -25,9 +25,11 @@ class BmmMapper : public Mapper { BmmMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - BmmMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + BmmMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } void Opset7() override; diff --git a/paddle2onnx/mapper/tensor/cast.h b/paddle2onnx/mapper/tensor/cast.h index 9abd708c1..891c17207 100644 --- a/paddle2onnx/mapper/tensor/cast.h +++ b/paddle2onnx/mapper/tensor/cast.h @@ -27,9 +27,11 @@ class CastMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("out_dtype", &out_dtype_); } - CastMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + CastMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; GetAttr("dtype", &out_dtype_); } diff --git a/paddle2onnx/mapper/tensor/clip.h b/paddle2onnx/mapper/tensor/clip.h index 32c11b924..fd7ea1f43 100644 --- a/paddle2onnx/mapper/tensor/clip.h +++ b/paddle2onnx/mapper/tensor/clip.h @@ -25,9 +25,11 @@ class ClipMapper : public Mapper { ClipMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - ClipMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + ClipMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } int32_t GetMinOpsetVersion(bool verbose) override; diff --git a/paddle2onnx/mapper/tensor/logical_not.h b/paddle2onnx/mapper/tensor/logical_not.h index f8b2b3478..12eb412ed 100644 --- a/paddle2onnx/mapper/tensor/logical_not.h +++ b/paddle2onnx/mapper/tensor/logical_not.h @@ -25,9 +25,11 @@ class LogicalNotMapper : public Mapper { LogicalNotMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - LogicalNotMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + LogicalNotMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } void Opset7() override; diff --git a/paddle2onnx/mapper/tensor/meshgrid.h b/paddle2onnx/mapper/tensor/meshgrid.h index 60ddfebf3..1360e8ff2 100644 --- a/paddle2onnx/mapper/tensor/meshgrid.h +++ b/paddle2onnx/mapper/tensor/meshgrid.h @@ -27,9 +27,11 @@ class MeshgridMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { MarkAsExperimentalOp(); } - MeshgridMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + MeshgridMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; MarkAsExperimentalOp(); } diff --git a/paddle2onnx/mapper/tensor/not_equal.h b/paddle2onnx/mapper/tensor/not_equal.h index 4492383d6..6ff205cf6 100644 --- a/paddle2onnx/mapper/tensor/not_equal.h +++ b/paddle2onnx/mapper/tensor/not_equal.h @@ -22,9 +22,11 @@ class NotEqualMapper : public Mapper { NotEqualMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - NotEqualMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + NotEqualMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } int32_t GetMinOpsetVersion(bool verbose) override; diff --git a/paddle2onnx/mapper/tensor/pow.h b/paddle2onnx/mapper/tensor/pow.h index cb7fec311..ac1e9ac55 100644 --- a/paddle2onnx/mapper/tensor/pow.h +++ b/paddle2onnx/mapper/tensor/pow.h @@ -27,9 +27,11 @@ class PowMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("factor", &factor_); } - PowMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + PowMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; GetAttr("factor", &factor_); } diff --git a/paddle2onnx/mapper/tensor/stack.h b/paddle2onnx/mapper/tensor/stack.h index 41bfa0379..12df7efe1 100644 --- a/paddle2onnx/mapper/tensor/stack.h +++ b/paddle2onnx/mapper/tensor/stack.h @@ -24,9 +24,11 @@ class StackMapper : public Mapper { : Mapper(p, helper, block_id, op_id) { GetAttr("axis", &axis_); } - StackMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + StackMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; GetAttr("axis", &axis_); } diff --git a/paddle2onnx/mapper/tensor/tile.h b/paddle2onnx/mapper/tensor/tile.h index 6c3b88f39..c63e9e7b3 100644 --- a/paddle2onnx/mapper/tensor/tile.h +++ b/paddle2onnx/mapper/tensor/tile.h @@ -22,9 +22,11 @@ class TileMapper : public Mapper { TileMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - TileMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + TileMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t op_id + bool c) + : Mapper(p, helper, op_id, c) { in_pir_mode = true; } void Opset7() override; From 516b9738e790e4069c44cf47122f10ae74a89dc2 Mon Sep 17 00:00:00 2001 From: qzylalala Date: Mon, 21 Oct 2024 22:33:46 +0800 Subject: [PATCH 3/5] [OSPP][PIR] fix --- paddle2onnx/mapper/activation/sigmoid.h | 2 +- paddle2onnx/mapper/activation/swish.h | 2 +- paddle2onnx/mapper/nn/conv2d_transpose.h | 2 +- paddle2onnx/mapper/nn/group_norm.h | 2 +- paddle2onnx/mapper/nn/shape.h | 2 +- paddle2onnx/mapper/tensor/bmm.h | 2 +- paddle2onnx/mapper/tensor/cast.h | 2 +- paddle2onnx/mapper/tensor/clip.h | 2 +- paddle2onnx/mapper/tensor/logical_not.h | 2 +- paddle2onnx/mapper/tensor/meshgrid.h | 2 +- paddle2onnx/mapper/tensor/not_equal.h | 2 +- paddle2onnx/mapper/tensor/pow.h | 2 +- paddle2onnx/mapper/tensor/stack.h | 2 +- paddle2onnx/mapper/tensor/tile.h | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/paddle2onnx/mapper/activation/sigmoid.h b/paddle2onnx/mapper/activation/sigmoid.h index 2d5b8a7be..86b8b32f8 100644 --- a/paddle2onnx/mapper/activation/sigmoid.h +++ b/paddle2onnx/mapper/activation/sigmoid.h @@ -28,7 +28,7 @@ class SigmoidMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} SigmoidMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/activation/swish.h b/paddle2onnx/mapper/activation/swish.h index 4332d9784..f581447bd 100644 --- a/paddle2onnx/mapper/activation/swish.h +++ b/paddle2onnx/mapper/activation/swish.h @@ -29,7 +29,7 @@ class SwishMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} SwishMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/nn/conv2d_transpose.h b/paddle2onnx/mapper/nn/conv2d_transpose.h index e787126bf..3b4960706 100755 --- a/paddle2onnx/mapper/nn/conv2d_transpose.h +++ b/paddle2onnx/mapper/nn/conv2d_transpose.h @@ -48,7 +48,7 @@ class Conv2dTransposeMapper : public Mapper { Conv2dTransposeMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/nn/group_norm.h b/paddle2onnx/mapper/nn/group_norm.h index 7a4bdb494..b3390c63c 100755 --- a/paddle2onnx/mapper/nn/group_norm.h +++ b/paddle2onnx/mapper/nn/group_norm.h @@ -30,7 +30,7 @@ class GroupNormMapper : public Mapper { } GroupNormMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/nn/shape.h b/paddle2onnx/mapper/nn/shape.h index 4c2c4da1d..c477335f1 100644 --- a/paddle2onnx/mapper/nn/shape.h +++ b/paddle2onnx/mapper/nn/shape.h @@ -27,7 +27,7 @@ class ShapeMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} ShapeMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/bmm.h b/paddle2onnx/mapper/tensor/bmm.h index 1288e7101..c4ebf6109 100644 --- a/paddle2onnx/mapper/tensor/bmm.h +++ b/paddle2onnx/mapper/tensor/bmm.h @@ -27,7 +27,7 @@ class BmmMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} BmmMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/cast.h b/paddle2onnx/mapper/tensor/cast.h index 891c17207..4ef2d3de5 100644 --- a/paddle2onnx/mapper/tensor/cast.h +++ b/paddle2onnx/mapper/tensor/cast.h @@ -29,7 +29,7 @@ class CastMapper : public Mapper { } CastMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/clip.h b/paddle2onnx/mapper/tensor/clip.h index fd7ea1f43..736dd3dd2 100644 --- a/paddle2onnx/mapper/tensor/clip.h +++ b/paddle2onnx/mapper/tensor/clip.h @@ -27,7 +27,7 @@ class ClipMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} ClipMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/logical_not.h b/paddle2onnx/mapper/tensor/logical_not.h index 12eb412ed..e3be65117 100644 --- a/paddle2onnx/mapper/tensor/logical_not.h +++ b/paddle2onnx/mapper/tensor/logical_not.h @@ -27,7 +27,7 @@ class LogicalNotMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} LogicalNotMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/meshgrid.h b/paddle2onnx/mapper/tensor/meshgrid.h index 1360e8ff2..d3ac79452 100644 --- a/paddle2onnx/mapper/tensor/meshgrid.h +++ b/paddle2onnx/mapper/tensor/meshgrid.h @@ -29,7 +29,7 @@ class MeshgridMapper : public Mapper { } MeshgridMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/not_equal.h b/paddle2onnx/mapper/tensor/not_equal.h index 6ff205cf6..e6a3effb4 100644 --- a/paddle2onnx/mapper/tensor/not_equal.h +++ b/paddle2onnx/mapper/tensor/not_equal.h @@ -24,7 +24,7 @@ class NotEqualMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} NotEqualMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/pow.h b/paddle2onnx/mapper/tensor/pow.h index ac1e9ac55..52aceedf7 100644 --- a/paddle2onnx/mapper/tensor/pow.h +++ b/paddle2onnx/mapper/tensor/pow.h @@ -29,7 +29,7 @@ class PowMapper : public Mapper { } PowMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/stack.h b/paddle2onnx/mapper/tensor/stack.h index 12df7efe1..82fad21b6 100644 --- a/paddle2onnx/mapper/tensor/stack.h +++ b/paddle2onnx/mapper/tensor/stack.h @@ -26,7 +26,7 @@ class StackMapper : public Mapper { } StackMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; diff --git a/paddle2onnx/mapper/tensor/tile.h b/paddle2onnx/mapper/tensor/tile.h index c63e9e7b3..f1ceb88a6 100644 --- a/paddle2onnx/mapper/tensor/tile.h +++ b/paddle2onnx/mapper/tensor/tile.h @@ -24,7 +24,7 @@ class TileMapper : public Mapper { : Mapper(p, helper, block_id, op_id) {} TileMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id + int64_t op_id, bool c) : Mapper(p, helper, op_id, c) { in_pir_mode = true; From a13023fa06c5927caf0f0bfb5015b96629b17540 Mon Sep 17 00:00:00 2001 From: qzylalala Date: Mon, 21 Oct 2024 22:48:56 +0800 Subject: [PATCH 4/5] [OSPP][PIR] fix --- paddle2onnx/mapper/activation/activation.h | 32 ++++++++++++++-------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/paddle2onnx/mapper/activation/activation.h b/paddle2onnx/mapper/activation/activation.h index 4d90e368a..1c1f51df5 100644 --- a/paddle2onnx/mapper/activation/activation.h +++ b/paddle2onnx/mapper/activation/activation.h @@ -122,9 +122,11 @@ class LeakyReluMapper : public Mapper { GetAttr("alpha", &alpha_); } - LeakyReluMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + LeakyReluMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t i, + bool c) + : Mapper(p, helper, i, c) { in_pir_mode = true; GetAttr("alpha", &alpha_); } @@ -143,9 +145,11 @@ class GeluMapper : public Mapper { int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - GeluMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + GeluMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t i, + bool c) + : Mapper(p, helper, i, c) { in_pir_mode = true; } @@ -171,9 +175,11 @@ class SoftMaxMapper : public Mapper { } } - SoftMaxMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + SoftMaxMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t i, + bool c) + : Mapper(p, helper, i, c) { in_pir_mode = true; if (HasAttr("axis")) { GetAttr("axis", &axis_); @@ -378,9 +384,11 @@ class SiluMapper : public Mapper { int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} - SiluMapper(const PaddlePirParser& p, OnnxHelper* helper, - int64_t op_id) - : Mapper(p, helper, op_id) { + SiluMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t i, + bool c) + : Mapper(p, helper, i, c) { in_pir_mode = true; } void Opset7() override; From 2d1d7c32d5917351bcb4706f1705f5f4e63f9da8 Mon Sep 17 00:00:00 2001 From: qzylalala Date: Mon, 21 Oct 2024 23:16:18 +0800 Subject: [PATCH 5/5] [OSPP][PIR] support logical op --- paddle2onnx/mapper/tensor/logical_op.cc | 10 +++++++++- paddle2onnx/mapper/tensor/logical_op.h | 7 +++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/paddle2onnx/mapper/tensor/logical_op.cc b/paddle2onnx/mapper/tensor/logical_op.cc index 5f0646228..81accceb5 100644 --- a/paddle2onnx/mapper/tensor/logical_op.cc +++ b/paddle2onnx/mapper/tensor/logical_op.cc @@ -12,12 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "paddle2onnx/mapper/exporter.h" #include "paddle2onnx/mapper/tensor/logical_op.h" namespace paddle2onnx { REGISTER_MAPPER(logical_and, LogicalOpMapper) REGISTER_MAPPER(logical_or, LogicalOpMapper) REGISTER_MAPPER(logical_xor, LogicalOpMapper) +REGISTER_PIR_MAPPER(logical_and, LogicalOpMapper) +REGISTER_PIR_MAPPER(logical_or, LogicalOpMapper) +REGISTER_PIR_MAPPER(logical_xor, LogicalOpMapper) void LogicalOpMapper::Opset7() { auto x_info = GetInput("X"); @@ -29,7 +33,11 @@ void LogicalOpMapper::Opset7() { op_mapper["logical_or"] = "Or"; op_mapper["logical_xor"] = "Xor"; - helper_->MakeNode(op_mapper[OpType()], {x_info[0].name, y_info[0].name}, + auto node_name = OpType(); + if (in_pir_mode) { + node_name = convert_pir_op_name(node_name); + } + helper_->MakeNode(op_mapper[node_name], {x_info[0].name, y_info[0].name}, {out_info[0].name}); } diff --git a/paddle2onnx/mapper/tensor/logical_op.h b/paddle2onnx/mapper/tensor/logical_op.h index 00401b881..3cbe2702c 100644 --- a/paddle2onnx/mapper/tensor/logical_op.h +++ b/paddle2onnx/mapper/tensor/logical_op.h @@ -25,6 +25,13 @@ class LogicalOpMapper : public Mapper { LogicalOpMapper(const PaddleParser& p, OnnxHelper* helper, int64_t block_id, int64_t op_id) : Mapper(p, helper, block_id, op_id) {} + LogicalOpMapper(const PaddlePirParser& p, + OnnxHelper* helper, + int64_t i, + bool c) + : Mapper(p, helper, i, c) { + in_pir_mode = true; + } void Opset7() override; };