diff --git a/.gitignore b/.gitignore index 8727b7919..8286d89d4 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ protobuf-* *.pdmodel *.pdiparams *.pdiparams.info +*inference.yml *.onnx *.temptxt tests/__pycache_* diff --git a/README.md b/README.md index 046d51b46..81d6670cd 100644 --- a/README.md +++ b/README.md @@ -41,9 +41,9 @@ Paddle2ONNX 在导出模型时,需要传入部署模型格式,包括两个 你可以通过使用命令行并通过以下命令将Paddle模型转换为ONNX模型 ```bash -paddle2onnx --model_dir saved_inference_model \ - --model_filename model.pdmodel \ - --params_filename model.pdiparams \ +paddle2onnx --model_dir model_dir \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ --save_file model.onnx ``` @@ -72,9 +72,9 @@ paddle2onnx --model_dir saved_inference_model \ ## 4.5 优化ONNX -如你对导出的 ONNX 模型有优化的需求,推荐使用 `onnx-simplifier`,也可使用如下命令对模型进行优化 +如你对导出的 ONNX 模型有优化的需求,推荐使用 `onnxslim` 对模型进行优化: -``` +```bash pip install onnxslim onnxslim model.onnx slim.onnx ``` diff --git a/VERSION_NUMBER b/VERSION_NUMBER index 732aa7d7d..bb7a48bb9 100644 --- a/VERSION_NUMBER +++ b/VERSION_NUMBER @@ -1 +1 @@ -1.2.10 \ No newline at end of file +1.2.11 \ No newline at end of file diff --git a/paddle2onnx/mapper/tensor/assign_value.cc b/paddle2onnx/mapper/tensor/assign_value.cc index 49646cf0f..48bdab68c 100644 --- a/paddle2onnx/mapper/tensor/assign_value.cc +++ b/paddle2onnx/mapper/tensor/assign_value.cc @@ -21,28 +21,21 @@ namespace paddle2onnx { REGISTER_MAPPER(assign_value, AssignValueMapper) -int32_t AssignValueMapper::GetMinOpsetVersion(bool verbose) { - int32_t dtype = static_cast(dtype_); - if (dtype != P2ODataType::INT32 && dtype != P2ODataType::INT64 && - dtype != P2ODataType::FP32) { - Error() << "Only supports int32/int64/float32." << std::endl; - return -1; - } - return 7; -} - void AssignValueMapper::Opset7() { auto output_info = GetOutput("Out"); int32_t dtype = static_cast(dtype_); if (dtype == P2ODataType::INT32) { helper_->Assign(output_info[0].name, GetOnnxDtype(output_info[0].dtype), shape_, int64_values_); + } else if (dtype == P2ODataType::INT64) { + helper_->Assign(output_info[0].name, GetOnnxDtype(output_info[0].dtype), + shape_, int64_values_); } else if (dtype == P2ODataType::FP32) { helper_->Assign(output_info[0].name, GetOnnxDtype(output_info[0].dtype), shape_, fp32_values_); - } else if (dtype == P2ODataType::INT64) { + } else if (dtype == P2ODataType::FP64) { helper_->Assign(output_info[0].name, GetOnnxDtype(output_info[0].dtype), - shape_, int64_values_); + shape_, double_values_); } } diff --git a/paddle2onnx/mapper/tensor/assign_value.h b/paddle2onnx/mapper/tensor/assign_value.h index 64cf85384..a8bb481ed 100644 --- a/paddle2onnx/mapper/tensor/assign_value.h +++ b/paddle2onnx/mapper/tensor/assign_value.h @@ -13,11 +13,12 @@ // limitations under the License. #pragma once +#include #include +#include #include + #include "paddle2onnx/mapper/mapper.h" -#include -#include namespace paddle2onnx { @@ -30,42 +31,48 @@ class AssignValueMapper : public Mapper { GetAttr("shape", &shape_); GetAttrValues(); } - int32_t GetMinOpsetVersion(bool verbose) override; + void Opset7() override; private: - void GetAttrValues(){ + void GetAttrValues() { int32_t dtype = static_cast(dtype_); - const std::string attr_name = HasAttr("values") ? "values" : GetAttrNameByDtype(dtype); + const std::string attr_name = + HasAttr("values") ? "values" : GetAttrNameByDtype(dtype); std::unordered_map> type_handlers = { - {P2ODataType::INT32, [&](){ - if (attr_name == "values") GetScalars(attr_name, &int64_values_); - else if (attr_name == "int32_values") GetAttr(attr_name, &int64_values_); - }}, - {P2ODataType::INT64, [&](){ - if (attr_name == "values") GetScalars(attr_name, &int64_values_); - else if (attr_name == "int64_values") GetAttr(attr_name, &int64_values_); - }}, - {P2ODataType::FP32, [&](){ - if (attr_name == "values") GetScalars(attr_name, &fp32_values_); - else if (attr_name == "fp32_values") GetAttr(attr_name, &fp32_values_); - }}, - {P2ODataType::FP64, [&](){ - if (attr_name == "values") GetScalars(attr_name, &double_values_); - else if (attr_name == "fp32_values") GetAttr(attr_name, &double_values_); - }}, - {P2ODataType::BOOL, [&](){ - if (attr_name == "values") GetScalars(attr_name, &bool_values_); - else if (attr_name == "bool_values") GetAttr(attr_name, &bool_values_); - }}, + {P2ODataType::INT32, + [&]() { + if (attr_name == "values") + GetScalars(attr_name, &int64_values_); + else if (attr_name == GetAttrNameByDtype(dtype_)) + GetAttr(attr_name, &int64_values_); + }}, + {P2ODataType::INT64, + [&]() { + if (attr_name == "values") + GetScalars(attr_name, &int64_values_); + else if (attr_name == GetAttrNameByDtype(dtype_)) + GetAttr(attr_name, &int64_values_); + }}, + {P2ODataType::FP32, + [&]() { + if (attr_name == "values") + GetScalars(attr_name, &fp32_values_); + else if (attr_name == GetAttrNameByDtype(dtype_)) + GetAttr(attr_name, &fp32_values_); + }}, + {P2ODataType::FP64, + [&]() { + if (attr_name == "values") + GetScalars(attr_name, &double_values_); + else if (attr_name == GetAttrNameByDtype(dtype_)) + GetAttr(attr_name, &double_values_); + }}, }; auto handler = type_handlers.find(dtype); - if (handler != type_handlers.end()) { - handler->second(); - } else { - Error() << "Unsupported dtype value" << std::endl; - } + Assert(handler != type_handlers.end(), "Unsupported dtype value"); + handler->second(); } std::string GetAttrNameByDtype(int32_t dtype) { @@ -73,22 +80,18 @@ class AssignValueMapper : public Mapper { return "int32_values"; } else if (dtype == P2ODataType::INT64) { return "int64_values"; - }else if (dtype == P2ODataType::FP32) { + } else if (dtype == P2ODataType::FP32) { return "fp32_values"; } else if (dtype == P2ODataType::FP64) { return "double_values"; - } else if (dtype == P2ODataType::BOOL) { - return "bool_values"; } - Error() << "Unsupported dtype value" << std::endl; - + Assert(false, "Only supports int32/int64/fp32/fp64."); + return ""; } - std::vector fp32_values_; std::vector int64_values_; - std::vector bool_values_; + std::vector fp32_values_; std::vector double_values_; - std::vector int32_values_; std::vector shape_; int64_t dtype_; }; diff --git a/paddle2onnx/mapper/tensor/unsqueeze2.cc b/paddle2onnx/mapper/tensor/unsqueeze2.cc index b6659b1cc..0bddc6673 100644 --- a/paddle2onnx/mapper/tensor/unsqueeze2.cc +++ b/paddle2onnx/mapper/tensor/unsqueeze2.cc @@ -18,24 +18,21 @@ namespace paddle2onnx { REGISTER_MAPPER(unsqueeze2, Unsqueeze2Mapper) int32_t Unsqueeze2Mapper::GetMinOpsetVersion(bool verbose) { - if (axes_.size() == 0) { - if (HasInput("AxesTensorList")) { - Logger(verbose, 13) << "While AxisTensorList as input, " - << RequireOpset(13) << std::endl; - return 13; - } else if (HasInput("AxesTensor")) { - auto info = GetInput("AxesTensor"); - if (!IsConstantInput("AxesTensor")) { - Logger(verbose, 13) - << "While AxesTensor as input, and it's not a constant tensor, " - << RequireOpset(13) << std::endl; - return 13; - } else { - return 7; - } + int32_t opset = 7; + if (!axes_.empty()) { + return opset; + } + + opset = 13; + if (HasInput("AxesTensorList")) { + opset = 13; + } else if (HasInput("AxesTensor")) { + auto info = GetInput("AxesTensor"); + if (!IsConstantInput("AxesTensor")) { + opset = 13; } } - return 7; + return opset; } void Unsqueeze2Mapper::Opset7() { @@ -44,9 +41,17 @@ void Unsqueeze2Mapper::Opset7() { std::vector axes; if (axes_.empty()) { - Assert(TryGetInputValue("AxesTensor", &axes), - "While unsqueeze2 has input AxesTensor, it cannot be exported by " - "Paddle2ONNX"); + if (HasInput("AxesTensor")) { + Assert(TryGetInputValue("AxesTensor", &axes), + "While unsqueeze2 has input AxesTensor, it cannot be exported by " + "Paddle2ONNX"); + } else { + Warn() << "AxesTensor not found, using Identity instead of Unsqueeze." + << std::endl; + helper_->MakeNode("Identity", {input_info[0].name}, + {output_info[0].name}); + return; + } } else { axes.assign(axes_.begin(), axes_.end()); } @@ -64,10 +69,19 @@ void Unsqueeze2Mapper::Opset13() { std::vector axes; if (axes_.empty()) { - TryGetInputValue("AxesTensor", &axes); + if (HasInput("AxesTensor")) { + TryGetInputValue("AxesTensor", &axes); + } else { + Warn() << "AxesTensor not found, using Identity instead of Unsqueeze." + << std::endl; + helper_->MakeNode("Identity", {input_info[0].name}, + {output_info[0].name}); + return; + } } else { axes.assign(axes_.begin(), axes_.end()); } + for (size_t i = 0; i < axes.size(); ++i) { if (axes[i] < 0) { axes[i] = axes[i] + input_info[0].Rank() + i + 1; @@ -81,10 +95,17 @@ void Unsqueeze2Mapper::Opset13() { if (HasInput("AxesTensorList")) { auto info = GetInput("AxesTensorList"); axes_node = helper_->ConcatIndices(info); - } else { + } else if (HasInput("AxesTensor")) { auto info = GetInput("AxesTensor"); axes_node = helper_->AutoCast(info[0].name, info[0].dtype, P2ODataType::INT64); + } else { + Warn() << "AxesTensor or AxesTensor not found, using Identity " + "instead of Unsqueeze." + << std::endl; + helper_->MakeNode("Identity", {input_info[0].name}, + {output_info[0].name}); + return; } helper_->MakeNode("Unsqueeze", {input_info[0].name, axes_node}, {output_info[0].name}); diff --git a/tests/run.sh b/tests/run.sh index 0b2c7f57c..324441381 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -57,7 +57,6 @@ ignore="test_auto_scan_multiclass_nms.py test_nn_Upsample.py \ test_normalize.py \ test_scatter_nd_add.py \ - test_unsqueeze.py \ test_quantize_model.py \ test_quantize_model_minist.py \ test_quantize_model_speedup.py \ diff --git a/tests/test_assign.py b/tests/test_assign.py index 8a21ecec6..bdce51f51 100644 --- a/tests/test_assign.py +++ b/tests/test_assign.py @@ -33,25 +33,25 @@ def forward(self, inputs): return x -def test_assign_9(): +def test_assign_int32(): """ api: paddle.assign - op version: 9 + op version: 7 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'assign', [9]) + obj = APIOnnx(op, 'assign', [7]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('int32'))) obj.run() -def test_assign_10(): +def test_assign_int64(): """ api: paddle.assign - op version: 10 + op version: 7 """ op = Net() op.eval() @@ -59,14 +59,14 @@ def test_assign_10(): obj = APIOnnx(op, 'assign', [10]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('int64'))) obj.run() -def test_assign_11(): +def test_assign_fp32(): """ api: paddle.assign - op version: 11 + op version: 7 """ op = Net() op.eval() @@ -78,7 +78,7 @@ def test_assign_11(): obj.run() -def test_assign_12(): +def test_assign_fp64(): """ api: paddle.assign op version: 12 @@ -89,5 +89,12 @@ def test_assign_12(): obj = APIOnnx(op, 'assign', [12]) obj.set_input_data( "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) + paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float64'))) obj.run() + + +if __name__ == "__main__": + test_assign_int32() + test_assign_int64() + test_assign_fp32() + test_assign_fp64() \ No newline at end of file diff --git a/tests/test_einsum.py b/tests/test_einsum.py index 366987a8a..cfadaa690 100644 --- a/tests/test_einsum.py +++ b/tests/test_einsum.py @@ -40,7 +40,7 @@ def forward(self, input): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_sum', [12]) + obj = APIOnnx(op, 'einsum', [13]) obj.set_input_data("input_data", paddle.rand([4])) obj.run() @@ -69,7 +69,7 @@ def forward(self, x, y): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([4]) obj.set_input_data("input_data", input_x, input_x) obj.run() @@ -99,7 +99,7 @@ def forward(self, x, y): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([4]) input_y = paddle.rand([5]) obj.set_input_data("input_data", input_x, input_y) @@ -130,7 +130,7 @@ def forward(self, x): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([2, 3, 2]) obj.set_input_data("input_data", input_x) obj.run() @@ -160,7 +160,7 @@ def forward(self, x, y): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([2, 3, 2]) input_y = paddle.rand([2, 2, 3]) obj.set_input_data("input_data", input_x, input_y) @@ -191,7 +191,7 @@ def forward(self, x): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([2, 3, 2]) obj.set_input_data("input_data", input_x) obj.run() @@ -221,7 +221,7 @@ def forward(self, x, y): op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'einsum_dot', [12]) + obj = APIOnnx(op, 'einsum', [13]) input_x = paddle.rand([2, 3, 2]) input_y = paddle.rand([2, 2, 3]) obj.set_input_data("input_data", input_x, input_y) @@ -230,4 +230,8 @@ def forward(self, x, y): if __name__ == "__main__": test_einsum_sum() - test_einsum_dot() \ No newline at end of file + test_einsum_dot() + test_einsum_outer() + test_einsum_batch_matrix_multiplication() + test_einsum_ellipsis_transpose() + test_einsum_ellipsis_batch_matrix_multiplication() \ No newline at end of file diff --git a/tests/test_unsqueeze.py b/tests/test_unsqueeze.py index 3ca483afe..efe420b4e 100755 --- a/tests/test_unsqueeze.py +++ b/tests/test_unsqueeze.py @@ -34,7 +34,7 @@ def forward(self, inputs): return x -def test_unsqueeze_9(): +def test_unsqueeze_7(): """ api: paddle.unsqueeze op version: 9 @@ -49,7 +49,7 @@ def test_unsqueeze_9(): obj.run() -def test_unsqueeze_10(): +def test_unsqueeze_13(): """ api: paddle.unsqueeze op version: 10 @@ -64,52 +64,7 @@ def test_unsqueeze_10(): obj.run() -def test_unsqueeze_11(): - """ - api: paddle.unsqueeze - op version: 11 - """ - op = Net() - op.eval() - # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [11]) - obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) - obj.run() - - -def test_unsqueeze_12(): - """ - api: paddle.unsqueeze - op version: 12 - """ - op = Net() - op.eval() - # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [12]) - obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) - obj.run() - - -def test_unsqueeze_axis_13(): - """ - api: paddle.unsqueeze - op version: 13 - """ - op = Net(axis=paddle.to_tensor(1)) - op.eval() - # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [13]) - obj.set_input_data( - "input_data", - paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) - obj.run() - - -def test_unsqueeze_13_two_tensor_axis(): +def test_unsqueeze_two_tensor_axis(): """ api: paddle.unsqueeze op version: 13 @@ -124,7 +79,7 @@ def test_unsqueeze_13_two_tensor_axis(): obj.run() -def test_unsqueeze_9_two_axis(): +def test_unsqueeze_two_axis(): """ api: paddle.unsqueeze op version: 9 @@ -132,14 +87,14 @@ def test_unsqueeze_9_two_axis(): op = Net(axis=[0, -1]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [9, 10, 11, 12, 13]) + obj = APIOnnx(op, 'unsqueeze', [7, 13]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run() -def test_unsqueeze_9_multil_axis(): +def test_unsqueeze_multil_axis(): """ api: paddle.unsqueeze op version: 9 @@ -147,14 +102,14 @@ def test_unsqueeze_9_multil_axis(): op = Net(axis=[1, 2, 3, 4]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [9, 10, 11, 12, 13]) + obj = APIOnnx(op, 'unsqueeze', [7, 13]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [5, 10]).astype('float32'))) obj.run() -def test_unsqueeze_9_multil_negative_axis(): +def test_unsqueeze_multil_negative_axis(): """ api: paddle.unsqueeze op version: 9 @@ -162,20 +117,17 @@ def test_unsqueeze_9_multil_negative_axis(): op = Net(axis=[1, 2, 3, -1]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 - obj = APIOnnx(op, 'unsqueeze', [9, 10, 11, 12, 13]) + obj = APIOnnx(op, 'unsqueeze', [7, 13]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [5, 10]).astype('float32'))) obj.run() -# if __name__ == '__main__': -# test_unsqueeze_9() -# test_unsqueeze_10() -# test_unsqueeze_11() -# test_unsqueeze_12() -# test_unsqueeze_axis_12() -# test_unsqueeze_9_two_tensor_axis() -# test_unsqueeze_9_two_axis() -# test_unsqueeze_9_multil_axis() -# test_unsqueeze_9_multil_negative_axis() +if __name__ == '__main__': + test_unsqueeze_7() + test_unsqueeze_13() + test_unsqueeze_two_tensor_axis() + test_unsqueeze_two_axis() + test_unsqueeze_multil_axis() + test_unsqueeze_multil_negative_axis()