Skip to content

Commit

Permalink
Move convertShape, simplify IPermuteFunction.test.cc, fix trix backen…
Browse files Browse the repository at this point in the history
…d build failure
  • Loading branch information
hseok-oh committed Aug 28, 2024
1 parent 4b176d2 commit 79e48f1
Show file tree
Hide file tree
Showing 10 changed files with 44 additions and 210 deletions.
1 change: 0 additions & 1 deletion runtime/onert/backend/ruy/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
return nnfw::ruy::Shape();

const ir::Shape &shape = tensor->get_info().shape();

auto rank = shape.rank();
nnfw::ruy::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
13 changes: 0 additions & 13 deletions runtime/onert/backend/trix/Convert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,6 @@ namespace backend
namespace trix
{

data_layout convertDataLayout(const ir::Layout layout)
{
switch (layout)
{
case ir::Layout::NCHW:
return DATA_LAYOUT_NCHW;
case ir::Layout::NHWC:
return DATA_LAYOUT_NHWC;
default:
throw std::runtime_error("Unknown Layout");
}
}

data_type convertDataType(const ir::DataType type)
{
switch (type)
Expand Down
11 changes: 1 addition & 10 deletions runtime/onert/backend/trix/Convert.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

#include <backend/IPortableTensor.h>
#include <ir/DataType.h>
#include <ir/Layout.h>

#include <libnpuhost.h>
#include <type_traits>
Expand All @@ -31,14 +30,6 @@ namespace backend
namespace trix
{

/**
* @brief Convert type of layout from onert type to npu type
*
* @param layout Layout type in onert
* @return data_layout Layout type in npu
*/
data_layout convertDataLayout(const ir::Layout layout);

/**
* @brief Convert type of data from onert type to npu type
*
Expand All @@ -61,7 +52,7 @@ void setDataInfo(const std::vector<T *> &tensors, tensors_data_info *info)

for (uint32_t idx = 0; idx < info->num_info; ++idx)
{
info->info[idx].layout = convertDataLayout(tensors[idx]->layout());
info->info[idx].layout = DATA_LAYOUT_NHWC;
info->info[idx].type = convertDataType(tensors[idx]->data_type());
}
}
Expand Down
8 changes: 7 additions & 1 deletion runtime/onert/core/include/ir/Shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,13 @@ struct Shape
inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); }
inline bool operator!=(const Shape &lhs, const Shape &rhs) { return lhs.dims() != rhs.dims(); }

Shape permuteShape(const Shape &shape, Layout frontend_layout, Layout backend_layout);
/**
* @brief Converts shape when its rank is 4
*
* @return Return a shape based on permutation type.
* If rank is not 4, input shape is returned without conversion.
*/
ir::Shape convertShape(const Shape &shape, const PermuteType &type);

/**
* @brief Find out if tha rank in this shape is "maybe" unspecified.
Expand Down
11 changes: 2 additions & 9 deletions runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

#include "PermuteLayer.h"

#include "../../../exec/ShapeConverter.h"

#include <ruy/context.h> // from @ruy

namespace onert
Expand Down Expand Up @@ -204,17 +202,13 @@ void PermuteLayer::run()
auto dst_tensor = _dst_tensors.at(i);
auto src_tensor = _src_tensors.at(i);
auto permute_type = _permute_types.at(i);
auto src_layout =
permute_type == ir::PermuteType::NHWC_TO_NCHW ? ir::Layout::NHWC : ir::Layout::NCHW;
auto dst_layout =
permute_type == ir::PermuteType::NCHW_TO_NHWC ? ir::Layout::NHWC : ir::Layout::NCHW;
if (src_tensor->is_dynamic() || dst_tensor->is_dynamic())
{
// getting output shape
auto src_shape = src_tensor->getShape();

// set output shape and output buffer
ir::Shape new_shape = exec::convertShape(src_shape, src_layout, dst_layout);
ir::Shape new_shape = ir::convertShape(src_shape, permute_type);

try
{
Expand All @@ -231,8 +225,7 @@ void PermuteLayer::run()
throw;
}
}
assert(exec::convertShape(src_tensor->getShape(), src_layout, dst_layout) ==
dst_tensor->getShape());
assert(ir::convertShape(src_tensor->getShape(), permute_type) == dst_tensor->getShape());
}
assert(_src_tensors.size() == _dst_tensors.size());
assert(_src_tensors.size() == _src_tensors_offsets.size());
Expand Down
2 changes: 0 additions & 2 deletions runtime/onert/core/src/exec/ExecutorBase.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

#include "ExecutorBase.h"

#include "ShapeConverter.h"

#include "util/ConfigSource.h"
#include <misc/polymorphic_downcast.h>

Expand Down
79 changes: 17 additions & 62 deletions runtime/onert/core/src/exec/IPermuteFunction.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ class MockUpTensor : public ITensor
class MockUpLayer : public IPermuteFunction
{
public:
MockUpLayer(const std::vector<MockUpTensor *> &inputs, const std::vector<MockUpTensor *> &outputs)
MockUpLayer(const std::vector<std::unique_ptr<MockUpTensor>> &inputs,
const std::vector<std::unique_ptr<MockUpTensor>> &outputs)
{
const uint32_t input_size = inputs.size();
assert(outputs.size() == input_size);
Expand All @@ -107,8 +108,8 @@ class MockUpLayer : public IPermuteFunction

for (uint32_t i = 0; i < input_size; i++)
{
_src_tensors[i] = inputs[i];
_dst_tensors[i] = outputs[i];
_src_tensors[i] = inputs[i].get();
_dst_tensors[i] = outputs[i].get();
if (inputs[i]->layout() == outputs[i]->layout())
_permute_types[i] = ir::PermuteType::COPY;
else if (inputs[i]->layout() == ir::Layout::NHWC)
Expand Down Expand Up @@ -146,11 +147,7 @@ TEST(IPermuteFunction, float_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -189,11 +186,7 @@ TEST(IPermuteFunction, float_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -235,11 +228,7 @@ TEST(IPermuteFunction, float_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -284,11 +273,7 @@ TEST(IPermuteFunction, float_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -353,11 +338,7 @@ TEST(IPermuteFunction, float_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -425,10 +406,7 @@ TEST(IPermuteFunction, float_to_qasymm8)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -479,10 +457,7 @@ TEST(IPermuteFunction, float_to_qsymm8)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -533,10 +508,7 @@ TEST(IPermuteFunction, float_to_qsymm16)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -596,10 +568,7 @@ TEST(IPermuteFunction, qasymm8_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -659,10 +628,7 @@ TEST(IPermuteFunction, qsymm8_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -722,10 +688,7 @@ TEST(IPermuteFunction, qsymm16_to_float)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(), inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -796,11 +759,7 @@ TEST(IPermuteFunction, float_qasymm8_layout)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down Expand Up @@ -895,11 +854,7 @@ TEST(IPermuteFunction, float_qasymm8_layout)
outputs[i]->setBuffer(output_buffers[i].get());
}

auto mockup_layer = std::make_unique<MockUpLayer>(
std::vector<MockUpTensor *>{inputs[0].get(), inputs[1].get(), inputs[2].get(),
inputs[3].get()},
std::vector<MockUpTensor *>{outputs[0].get(), outputs[1].get(), outputs[2].get(),
outputs[3].get()});
auto mockup_layer = std::make_unique<MockUpLayer>(inputs, outputs);
mockup_layer->run();

for (size_t i = 0; i < 4; ++i)
Expand Down
60 changes: 0 additions & 60 deletions runtime/onert/core/src/exec/ShapeConverter.cc

This file was deleted.

Loading

0 comments on commit 79e48f1

Please sign in to comment.