diff --git a/runtime/onert/backend/ruy/ops/OperationUtils.h b/runtime/onert/backend/ruy/ops/OperationUtils.h index abee496f57a..5f00a240871 100644 --- a/runtime/onert/backend/ruy/ops/OperationUtils.h +++ b/runtime/onert/backend/ruy/ops/OperationUtils.h @@ -45,7 +45,6 @@ inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor) return nnfw::ruy::Shape(); const ir::Shape &shape = tensor->get_info().shape(); - auto rank = shape.rank(); nnfw::ruy::Shape ret(rank); auto data = ret.DimsData(); diff --git a/runtime/onert/backend/trix/Convert.cc b/runtime/onert/backend/trix/Convert.cc index fe003e7ead5..684dc80dd53 100644 --- a/runtime/onert/backend/trix/Convert.cc +++ b/runtime/onert/backend/trix/Convert.cc @@ -23,19 +23,6 @@ namespace backend namespace trix { -data_layout convertDataLayout(const ir::Layout layout) -{ - switch (layout) - { - case ir::Layout::NCHW: - return DATA_LAYOUT_NCHW; - case ir::Layout::NHWC: - return DATA_LAYOUT_NHWC; - default: - throw std::runtime_error("Unknown Layout"); - } -} - data_type convertDataType(const ir::DataType type) { switch (type) diff --git a/runtime/onert/backend/trix/Convert.h b/runtime/onert/backend/trix/Convert.h index 662ed44b6b7..6b1edd5162f 100644 --- a/runtime/onert/backend/trix/Convert.h +++ b/runtime/onert/backend/trix/Convert.h @@ -19,7 +19,6 @@ #include #include -#include #include #include @@ -31,14 +30,6 @@ namespace backend namespace trix { -/** - * @brief Convert type of layout from onert type to npu type - * - * @param layout Layout type in onert - * @return data_layout Layout type in npu - */ -data_layout convertDataLayout(const ir::Layout layout); - /** * @brief Convert type of data from onert type to npu type * @@ -61,7 +52,7 @@ void setDataInfo(const std::vector &tensors, tensors_data_info *info) for (uint32_t idx = 0; idx < info->num_info; ++idx) { - info->info[idx].layout = convertDataLayout(tensors[idx]->layout()); + info->info[idx].layout = DATA_LAYOUT_NHWC; info->info[idx].type = convertDataType(tensors[idx]->data_type()); } } diff --git a/runtime/onert/core/include/ir/Shape.h b/runtime/onert/core/include/ir/Shape.h index 744a6cb7c0c..5bccb5e15ba 100644 --- a/runtime/onert/core/include/ir/Shape.h +++ b/runtime/onert/core/include/ir/Shape.h @@ -137,7 +137,13 @@ struct Shape inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); } inline bool operator!=(const Shape &lhs, const Shape &rhs) { return lhs.dims() != rhs.dims(); } -Shape permuteShape(const Shape &shape, Layout frontend_layout, Layout backend_layout); +/** + * @brief Converts shape when its rank is 4 + * + * @return Return a shape based on permutation type. + * If rank is not 4, input shape is returned without conversion. + */ +ir::Shape convertShape(const Shape &shape, const PermuteType &type); /** * @brief Find out if tha rank in this shape is "maybe" unspecified. diff --git a/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc b/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc index 2e1b52600b1..560aa21470e 100644 --- a/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc +++ b/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc @@ -16,8 +16,6 @@ #include "PermuteLayer.h" -#include "../../../exec/ShapeConverter.h" - #include // from @ruy namespace onert @@ -204,17 +202,13 @@ void PermuteLayer::run() auto dst_tensor = _dst_tensors.at(i); auto src_tensor = _src_tensors.at(i); auto permute_type = _permute_types.at(i); - auto src_layout = - permute_type == ir::PermuteType::NHWC_TO_NCHW ? ir::Layout::NHWC : ir::Layout::NCHW; - auto dst_layout = - permute_type == ir::PermuteType::NCHW_TO_NHWC ? ir::Layout::NHWC : ir::Layout::NCHW; if (src_tensor->is_dynamic() || dst_tensor->is_dynamic()) { // getting output shape auto src_shape = src_tensor->getShape(); // set output shape and output buffer - ir::Shape new_shape = exec::convertShape(src_shape, src_layout, dst_layout); + ir::Shape new_shape = ir::convertShape(src_shape, permute_type); try { @@ -231,8 +225,7 @@ void PermuteLayer::run() throw; } } - assert(exec::convertShape(src_tensor->getShape(), src_layout, dst_layout) == - dst_tensor->getShape()); + assert(ir::convertShape(src_tensor->getShape(), permute_type) == dst_tensor->getShape()); } assert(_src_tensors.size() == _dst_tensors.size()); assert(_src_tensors.size() == _src_tensors_offsets.size()); diff --git a/runtime/onert/core/src/exec/ExecutorBase.cc b/runtime/onert/core/src/exec/ExecutorBase.cc index 2526e4e6e4f..14149fd10b5 100644 --- a/runtime/onert/core/src/exec/ExecutorBase.cc +++ b/runtime/onert/core/src/exec/ExecutorBase.cc @@ -16,8 +16,6 @@ #include "ExecutorBase.h" -#include "ShapeConverter.h" - #include "util/ConfigSource.h" #include diff --git a/runtime/onert/core/src/exec/IPermuteFunction.test.cc b/runtime/onert/core/src/exec/IPermuteFunction.test.cc index 86c6d730d99..586e2305708 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.test.cc +++ b/runtime/onert/core/src/exec/IPermuteFunction.test.cc @@ -97,7 +97,8 @@ class MockUpTensor : public ITensor class MockUpLayer : public IPermuteFunction { public: - MockUpLayer(const std::vector &inputs, const std::vector &outputs) + MockUpLayer(const std::vector> &inputs, + const std::vector> &outputs) { const uint32_t input_size = inputs.size(); assert(outputs.size() == input_size); @@ -107,8 +108,8 @@ class MockUpLayer : public IPermuteFunction for (uint32_t i = 0; i < input_size; i++) { - _src_tensors[i] = inputs[i]; - _dst_tensors[i] = outputs[i]; + _src_tensors[i] = inputs[i].get(); + _dst_tensors[i] = outputs[i].get(); if (inputs[i]->layout() == outputs[i]->layout()) _permute_types[i] = ir::PermuteType::COPY; else if (inputs[i]->layout() == ir::Layout::NHWC) @@ -146,11 +147,7 @@ TEST(IPermuteFunction, float_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -189,11 +186,7 @@ TEST(IPermuteFunction, float_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -235,11 +228,7 @@ TEST(IPermuteFunction, float_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -284,11 +273,7 @@ TEST(IPermuteFunction, float_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -353,11 +338,7 @@ TEST(IPermuteFunction, float_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -425,10 +406,7 @@ TEST(IPermuteFunction, float_to_qasymm8) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -479,10 +457,7 @@ TEST(IPermuteFunction, float_to_qsymm8) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -533,10 +508,7 @@ TEST(IPermuteFunction, float_to_qsymm16) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -596,10 +568,7 @@ TEST(IPermuteFunction, qasymm8_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -659,10 +628,7 @@ TEST(IPermuteFunction, qsymm8_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -722,10 +688,7 @@ TEST(IPermuteFunction, qsymm16_to_float) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -796,11 +759,7 @@ TEST(IPermuteFunction, float_qasymm8_layout) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) @@ -895,11 +854,7 @@ TEST(IPermuteFunction, float_qasymm8_layout) outputs[i]->setBuffer(output_buffers[i].get()); } - auto mockup_layer = std::make_unique( - std::vector{inputs[0].get(), inputs[1].get(), inputs[2].get(), - inputs[3].get()}, - std::vector{outputs[0].get(), outputs[1].get(), outputs[2].get(), - outputs[3].get()}); + auto mockup_layer = std::make_unique(inputs, outputs); mockup_layer->run(); for (size_t i = 0; i < 4; ++i) diff --git a/runtime/onert/core/src/exec/ShapeConverter.cc b/runtime/onert/core/src/exec/ShapeConverter.cc deleted file mode 100644 index 707aef29b30..00000000000 --- a/runtime/onert/core/src/exec/ShapeConverter.cc +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ShapeConverter.h" - -namespace onert -{ -namespace exec -{ - -ir::Shape convertShape(const ir::Shape &shape, ir::Layout src_layout, ir::Layout dst_layout) -{ - if (shape.rank() != 4) - return shape; - - if (src_layout == dst_layout) - return shape; - - if (src_layout == ir::Layout::NCHW && dst_layout == ir::Layout::NHWC) - { - const ir::Shape &src_NCHW = shape; - ir::Shape dst_NHWC(4); - dst_NHWC.dim(0) = src_NCHW.dim(0); // N - dst_NHWC.dim(1) = src_NCHW.dim(2); // H - dst_NHWC.dim(2) = src_NCHW.dim(3); // W - dst_NHWC.dim(3) = src_NCHW.dim(1); // C - - return dst_NHWC; - } - - if (src_layout == ir::Layout::NHWC && dst_layout == ir::Layout::NCHW) - { - const ir::Shape &src_NHWC = shape; - ir::Shape dst_NCHW(4); - dst_NCHW.dim(0) = src_NHWC.dim(0); // N - dst_NCHW.dim(1) = src_NHWC.dim(3); // C - dst_NCHW.dim(2) = src_NHWC.dim(1); // H - dst_NCHW.dim(3) = src_NHWC.dim(2); // W - - return dst_NCHW; - } - - throw std::runtime_error("Should not reach here"); -} - -} // namespace exec -} // namespace onert diff --git a/runtime/onert/core/src/exec/ShapeConverter.h b/runtime/onert/core/src/exec/ShapeConverter.h deleted file mode 100644 index 7dc7e7536f1..00000000000 --- a/runtime/onert/core/src/exec/ShapeConverter.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_EXEC_SHAPE_CONVERTER_H__ -#define __ONERT_EXEC_SHAPE_CONVERTER_H__ - -#include -#include - -namespace onert -{ -namespace exec -{ - -/** - * @brief Converts shape when its rank is 4 - * - * @return ir::Shape Return a shape based on dst_layout. If rank is not 4, input shape is - * returned without conversion. - */ -ir::Shape convertShape(const ir::Shape &shape, ir::Layout src_layout, ir::Layout dst_layout); - -} // namespace exec -} // namespace onert - -#endif // __ONERT_EXEC_SHAPE_CONVERTER_H__ diff --git a/runtime/onert/core/src/ir/Shape.cc b/runtime/onert/core/src/ir/Shape.cc index 1961aea5da1..37783f4d037 100644 --- a/runtime/onert/core/src/ir/Shape.cc +++ b/runtime/onert/core/src/ir/Shape.cc @@ -66,29 +66,33 @@ uint64_t Shape::num_elements() const std::multiplies()); } -Shape permuteShape(const Shape &shape, Layout from, Layout to) +Shape convertShape(const Shape &shape, const PermuteType &type) { assert(shape.rank() <= Shape::kMaxRank); - Shape ret{shape}; - if (from == to) - return ret; + + if (type == ir::PermuteType::COPY) + return shape; + if (shape.rank() < 4) - return ret; + return shape; + // Permutation changing layout beyond 4-D is not supported yet assert(shape.rank() <= 4); - if (from == Layout::NHWC && to == Layout::NCHW) + + Shape ret{shape}; + if (type == ir::PermuteType::NCHW_TO_NHWC) { ret.dim(1) = shape.dim(3); ret.dim(2) = shape.dim(1); ret.dim(3) = shape.dim(2); + + return ret; } - else if (from == Layout::NCHW && to == Layout::NHWC) - { - ret.dim(1) = shape.dim(2); - ret.dim(2) = shape.dim(3); - ret.dim(3) = shape.dim(1); - } - // Other cases(either `from` or `to` is UNKNOWN), just return the original shape + + assert(type == ir::PermuteType::NHWC_TO_NCHW); + ret.dim(1) = shape.dim(2); + ret.dim(2) = shape.dim(3); + ret.dim(3) = shape.dim(1); return ret; }