From 14ca2ed21a50939fb5fefc75de8265f316666d65 Mon Sep 17 00:00:00 2001 From: Hyeongseok Oh Date: Thu, 29 Aug 2024 16:13:54 +0900 Subject: [PATCH] [onert] Remove layout in ITensor This commit removes layout access from ITensor and derived classes. Tensor classes for external buffer (IOTensor, UserTensor, EdgeTensor) and unittest remains layout information. ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh --- runtime/onert/backend/acl_common/IACLTensor.cc | 2 -- runtime/onert/backend/acl_common/IACLTensor.h | 1 - runtime/onert/backend/cpu/KernelGenerator.cc | 6 ------ runtime/onert/backend/cpu/ops/OperationUtils.cc | 1 - runtime/onert/backend/cpu/ops/OperationUtils.h | 3 --- runtime/onert/backend/ruy/KernelGenerator.cc | 6 ------ runtime/onert/backend/ruy/ops/OperationUtils.h | 3 --- runtime/onert/backend/train/KernelGenerator.cc | 5 ----- runtime/onert/backend/train/ops/OperationUtils.cc | 3 --- runtime/onert/backend/train/ops/PoolLayer.cc | 4 ---- runtime/onert/backend/train/optimizer/Optimizers.test.cc | 4 ---- runtime/onert/backend/trix/Convert.h | 2 +- runtime/onert/backend/xnnpack/KernelGenerator.cc | 6 ------ runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc | 3 --- .../onert/backend/xnnpack/ops/DepthwiseConvolutionLayer.cc | 3 --- runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc | 3 --- runtime/onert/core/include/backend/ITensor.h | 1 - runtime/onert/core/include/backend/basic/Tensor.h | 6 ++---- .../core/include/backend/basic/train/TrainableTensor.h | 1 - runtime/onert/core/src/backend/builtin/IOTensor.cc | 1 - runtime/onert/core/src/backend/builtin/IOTensor.h | 2 +- runtime/onert/core/src/backend/builtin/UserTensor.h | 2 +- .../onert/core/src/backend/builtin/kernel/PermuteLayer.cc | 4 ++-- runtime/onert/core/src/exec/EdgeTensor.h | 2 +- runtime/onert/core/src/exec/IPermuteFunction.cc | 3 --- runtime/onert/core/src/exec/IPermuteFunction.h | 4 ++-- runtime/onert/core/src/exec/IPermuteFunction.test.cc | 2 +- runtime/onert/core/src/exec/feature/MockTensor.test.h | 2 +- runtime/onert/core/src/exec/feature/nchw/Reader.h | 2 -- runtime/onert/core/src/exec/feature/nhwc/Reader.h | 2 -- 30 files changed, 12 insertions(+), 77 deletions(-) diff --git a/runtime/onert/backend/acl_common/IACLTensor.cc b/runtime/onert/backend/acl_common/IACLTensor.cc index 9920750fc24..50fef459daf 100644 --- a/runtime/onert/backend/acl_common/IACLTensor.cc +++ b/runtime/onert/backend/acl_common/IACLTensor.cc @@ -41,8 +41,6 @@ size_t IACLTensor::calcOffset(const ir::Coordinates &coords) const return info()->offset_element_in_bytes(acl_coords); } -ir::Layout IACLTensor::layout() const { return acl_common::asRuntimeLayout(info()->data_layout()); } - ir::DataType IACLTensor::data_type() const { return acl_common::asRuntimeDataType(info()->data_type()); diff --git a/runtime/onert/backend/acl_common/IACLTensor.h b/runtime/onert/backend/acl_common/IACLTensor.h index 7ea6327a7c6..e43a4cbdac8 100644 --- a/runtime/onert/backend/acl_common/IACLTensor.h +++ b/runtime/onert/backend/acl_common/IACLTensor.h @@ -49,7 +49,6 @@ class IACLTensor : public ITensor uint8_t *buffer() const final { return handle()->buffer(); } size_t total_size() const final { return info()->total_size(); } size_t calcOffset(const ir::Coordinates &coords) const final; - ir::Layout layout() const final; ir::DataType data_type() const final; float data_scale() const override; int32_t data_zero_point() const override; diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc index 598ec422688..75c78d886cd 100644 --- a/runtime/onert/backend/cpu/KernelGenerator.cc +++ b/runtime/onert/backend/cpu/KernelGenerator.cc @@ -260,12 +260,6 @@ std::unique_ptr KernelGenerator::generate(ir::OperationI for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) { - auto portable_tensor = _tensor_reg->getPortableTensor(ind); - if (portable_tensor) - { - assert(portable_tensor->layout() == ir::Layout::NHWC); - } - auto tensor = _tensor_reg->getNativeTensor(ind); if (tensor) { diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.cc b/runtime/onert/backend/cpu/ops/OperationUtils.cc index 686865af281..5e27f175e5f 100644 --- a/runtime/onert/backend/cpu/ops/OperationUtils.cc +++ b/runtime/onert/backend/cpu/ops/OperationUtils.cc @@ -286,7 +286,6 @@ std::vector getReducerAxes(const IPortableTensor *axes) std::vector ret; auto axes_vals = (axes->getShape().rank() == 0) ? 1 : axes->getShape().dim(0); - assert(axes->layout() == ir::Layout::NHWC); assert(static_cast(axes_vals) == axes->getShape().num_elements()); switch (axes->data_type()) { diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.h b/runtime/onert/backend/cpu/ops/OperationUtils.h index 39ef0cf6c54..544d07b8785 100644 --- a/runtime/onert/backend/cpu/ops/OperationUtils.h +++ b/runtime/onert/backend/cpu/ops/OperationUtils.h @@ -97,9 +97,6 @@ inline nnfw::cker::Shape getShape(const IPortableTensor *tensor) return nnfw::cker::Shape(); const ir::Shape &shape = tensor->get_info().shape(); - - assert(tensor->layout() == ir::Layout::NHWC); - auto rank = shape.rank(); nnfw::cker::Shape ret(rank); auto data = ret.DimsData(); diff --git a/runtime/onert/backend/ruy/KernelGenerator.cc b/runtime/onert/backend/ruy/KernelGenerator.cc index 8e2b12d4024..ef80452d5b4 100644 --- a/runtime/onert/backend/ruy/KernelGenerator.cc +++ b/runtime/onert/backend/ruy/KernelGenerator.cc @@ -57,12 +57,6 @@ std::unique_ptr KernelGenerator::generate(ir::OperationI for (const auto &ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) { - auto portable_tensor = _tensor_reg->getPortableTensor(ind); - if (portable_tensor) - { - assert(portable_tensor->layout() == ir::Layout::NHWC); - } - auto tensor = _tensor_reg->getNativeTensor(ind); if (tensor) { diff --git a/runtime/onert/backend/ruy/ops/OperationUtils.h b/runtime/onert/backend/ruy/ops/OperationUtils.h index 716400c1f14..5f00a240871 100644 --- a/runtime/onert/backend/ruy/ops/OperationUtils.h +++ b/runtime/onert/backend/ruy/ops/OperationUtils.h @@ -45,9 +45,6 @@ inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor) return nnfw::ruy::Shape(); const ir::Shape &shape = tensor->get_info().shape(); - - assert(tensor->layout() == ir::Layout::NHWC); - auto rank = shape.rank(); nnfw::ruy::Shape ret(rank); auto data = ret.DimsData(); diff --git a/runtime/onert/backend/train/KernelGenerator.cc b/runtime/onert/backend/train/KernelGenerator.cc index f3df39735e0..aaaa50f1b72 100644 --- a/runtime/onert/backend/train/KernelGenerator.cc +++ b/runtime/onert/backend/train/KernelGenerator.cc @@ -133,11 +133,6 @@ std::unique_ptr KernelGenerator::generate(ir:: for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) { - auto portable_tensor = _tensor_reg->getPortableTensor(ind); - if (portable_tensor) - { - assert(portable_tensor->layout() == ir::Layout::NHWC); - } auto tensor = _tensor_reg->getNonConstTensor(ind); if (tensor) { diff --git a/runtime/onert/backend/train/ops/OperationUtils.cc b/runtime/onert/backend/train/ops/OperationUtils.cc index 9736d5ba5d6..94a2f52491a 100644 --- a/runtime/onert/backend/train/ops/OperationUtils.cc +++ b/runtime/onert/backend/train/ops/OperationUtils.cc @@ -37,9 +37,6 @@ nnfw::cker::Shape getShape(const IPortableTensor *tensor) assert(!tensor->is_dynamic() && "Dynamic tensor is not supported yet"); const ir::Shape &shape = tensor->get_info().shape(); - - assert(tensor->layout() == ir::Layout::NHWC); - auto rank = shape.rank(); nnfw::cker::Shape ret(rank); auto data = ret.DimsData(); diff --git a/runtime/onert/backend/train/ops/PoolLayer.cc b/runtime/onert/backend/train/ops/PoolLayer.cc index f77d58e6517..e98a33050ba 100644 --- a/runtime/onert/backend/train/ops/PoolLayer.cc +++ b/runtime/onert/backend/train/ops/PoolLayer.cc @@ -77,8 +77,6 @@ class MaxPool2D final : public TrainingKernelRegistry public: void forward(const IPortableTensor *in, IPortableTensor *out) { - assert(in->layout() == ir::Layout::NHWC); - auto out_shape = getShape(out); auto out_data = getBuffer(out); auto arg_max_index = _arg_max_index.get(); @@ -90,8 +88,6 @@ class MaxPool2D final : public TrainingKernelRegistry void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in) { - assert(back_prop_out->layout() == ir::Layout::NHWC); - // activation backward try { diff --git a/runtime/onert/backend/train/optimizer/Optimizers.test.cc b/runtime/onert/backend/train/optimizer/Optimizers.test.cc index f24138c0481..2876fe430cc 100644 --- a/runtime/onert/backend/train/optimizer/Optimizers.test.cc +++ b/runtime/onert/backend/train/optimizer/Optimizers.test.cc @@ -54,8 +54,6 @@ class MockUpTensor : public IPortableTensor template const std::vector &data() const { return _data; } - ir::Layout layout() const override { return ir::Layout::NHWC; } - private: using ITensor::setShape; using ITensor::set_dynamic; @@ -89,8 +87,6 @@ class MockUpTrainableTensor : public backend::train::ITrainableTensor return const_cast(_data.data()); } - ir::Layout layout() const override { return ir::Layout::NHWC; } - public: std::vector optVars() override { diff --git a/runtime/onert/backend/trix/Convert.h b/runtime/onert/backend/trix/Convert.h index 662ed44b6b7..9359f0a5084 100644 --- a/runtime/onert/backend/trix/Convert.h +++ b/runtime/onert/backend/trix/Convert.h @@ -61,7 +61,7 @@ void setDataInfo(const std::vector &tensors, tensors_data_info *info) for (uint32_t idx = 0; idx < info->num_info; ++idx) { - info->info[idx].layout = convertDataLayout(tensors[idx]->layout()); + info->info[idx].layout = DATA_LAYOUT_NHWC; info->info[idx].type = convertDataType(tensors[idx]->data_type()); } } diff --git a/runtime/onert/backend/xnnpack/KernelGenerator.cc b/runtime/onert/backend/xnnpack/KernelGenerator.cc index 01408561f87..abdfbe914c1 100644 --- a/runtime/onert/backend/xnnpack/KernelGenerator.cc +++ b/runtime/onert/backend/xnnpack/KernelGenerator.cc @@ -70,12 +70,6 @@ std::unique_ptr KernelGenerator::generate(ir::OperationI for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) { - auto portable_tensor = _tensor_reg->getPortableTensor(ind); - if (portable_tensor) - { - assert(portable_tensor->layout() == ir::Layout::NHWC); - } - auto tensor = _tensor_reg->getNativeTensor(ind); if (tensor) { diff --git a/runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc b/runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc index d08f7eac95d..e2e4e38253f 100644 --- a/runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc +++ b/runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc @@ -59,9 +59,6 @@ void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTe _activation = activation; _output = output; - // TODO Support not nhwc layer - assert(_input->layout() == ir::Layout::NHWC); - assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU || _activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6); } diff --git a/runtime/onert/backend/xnnpack/ops/DepthwiseConvolutionLayer.cc b/runtime/onert/backend/xnnpack/ops/DepthwiseConvolutionLayer.cc index 87617e10c0a..1fc04b32b6d 100644 --- a/runtime/onert/backend/xnnpack/ops/DepthwiseConvolutionLayer.cc +++ b/runtime/onert/backend/xnnpack/ops/DepthwiseConvolutionLayer.cc @@ -60,9 +60,6 @@ void DepthwiseConvolutionLayer::configure( _activation = activation; _output = output; - // TODO Support not nhwc layer - assert(_input->layout() == ir::Layout::NHWC); - assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU || _activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6); } diff --git a/runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc b/runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc index 93885c619af..71f02b11091 100644 --- a/runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc +++ b/runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc @@ -44,9 +44,6 @@ void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortabl _activation = activation; _output = output; - // TODO Support not nhwc layer - assert(_input->layout() == ir::Layout::NHWC); - assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU || _activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6); } diff --git a/runtime/onert/core/include/backend/ITensor.h b/runtime/onert/core/include/backend/ITensor.h index 56041626448..81113bf24fe 100644 --- a/runtime/onert/core/include/backend/ITensor.h +++ b/runtime/onert/core/include/backend/ITensor.h @@ -42,7 +42,6 @@ class ITensor virtual uint8_t *buffer() const = 0; virtual size_t total_size() const = 0; virtual size_t calcOffset(const ir::Coordinates &coords) const = 0; - virtual ir::Layout layout() const = 0; virtual ir::DataType data_type() const = 0; virtual float data_scale() const = 0; virtual int32_t data_zero_point() const = 0; diff --git a/runtime/onert/core/include/backend/basic/Tensor.h b/runtime/onert/core/include/backend/basic/Tensor.h index 92d98f82e13..5847dfdd82d 100644 --- a/runtime/onert/core/include/backend/basic/Tensor.h +++ b/runtime/onert/core/include/backend/basic/Tensor.h @@ -41,8 +41,8 @@ class Tensor : public IPortableTensor public: Tensor(const ir::OperandInfo &info, DynamicMemoryManager *dynamic_mem_mgr) - : IPortableTensor(info), _layout(ir::Layout::NHWC), _buffer(nullptr), _size(info.total_size()), - _num_references(0), _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr) + : IPortableTensor(info), _buffer(nullptr), _size(info.total_size()), _num_references(0), + _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr) { // DO NOTHING } @@ -71,7 +71,6 @@ class Tensor : public IPortableTensor public: uint8_t *buffer() const override { return _buffer; } - ir::Layout layout() const override { return _layout; } void set_dynamic() override { _info.setDynamic(); } bool applyShape(const ir::Shape &new_shape) override; @@ -126,7 +125,6 @@ class Tensor : public IPortableTensor void setShape(const ir::Shape &new_shape) override; protected: - const ir::Layout _layout; uint8_t *_buffer; size_t _size; int32_t _num_references; diff --git a/runtime/onert/core/include/backend/basic/train/TrainableTensor.h b/runtime/onert/core/include/backend/basic/train/TrainableTensor.h index 263b32962c8..6a5c41782a4 100644 --- a/runtime/onert/core/include/backend/basic/train/TrainableTensor.h +++ b/runtime/onert/core/include/backend/basic/train/TrainableTensor.h @@ -51,7 +51,6 @@ class TrainableTensor : public backend::train::ITrainableTensor public: uint8_t *buffer() const override { return _tensor.buffer(); } - ir::Layout layout() const override { return _tensor.layout(); } public: std::vector optVars() override; diff --git a/runtime/onert/core/src/backend/builtin/IOTensor.cc b/runtime/onert/core/src/backend/builtin/IOTensor.cc index a467888a832..2097566fa81 100644 --- a/runtime/onert/core/src/backend/builtin/IOTensor.cc +++ b/runtime/onert/core/src/backend/builtin/IOTensor.cc @@ -40,7 +40,6 @@ void IOTensor::setTensor(IPortableTensor *tensor) { assert(tensor); assert(tensor != this); - assert(tensor->layout() == _orig->layout()); // Changing layout is not considered yet _tensor = tensor; if (_info.shape() != tensor->getShape()) { diff --git a/runtime/onert/core/src/backend/builtin/IOTensor.h b/runtime/onert/core/src/backend/builtin/IOTensor.h index cb8307791dd..1467ca0d8ff 100644 --- a/runtime/onert/core/src/backend/builtin/IOTensor.h +++ b/runtime/onert/core/src/backend/builtin/IOTensor.h @@ -57,7 +57,7 @@ class IOTensor : public IPortableTensor public: uint8_t *buffer() const override { return _tensor->buffer(); } - ir::Layout layout() const override { return _orig->layout(); } + ir::Layout layout() const { return _orig->layout(); } void set_dynamic() override { _info.setDynamic(); diff --git a/runtime/onert/core/src/backend/builtin/UserTensor.h b/runtime/onert/core/src/backend/builtin/UserTensor.h index b7f6ce091d1..9b6ffbd41a6 100644 --- a/runtime/onert/core/src/backend/builtin/UserTensor.h +++ b/runtime/onert/core/src/backend/builtin/UserTensor.h @@ -45,7 +45,7 @@ class UserTensor : public IPortableTensor public: uint8_t *buffer() const override { return _buffer; } - ir::Layout layout() const override { return _layout; } + ir::Layout layout() const { return _layout; } void set_dynamic() override { _info.setDynamic(); } void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); } bool applyShape(const ir::Shape &) override; diff --git a/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc b/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc index 7d06af10fbe..2f0ce5e4ecf 100644 --- a/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc +++ b/runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc @@ -265,7 +265,7 @@ void PermuteLayer::run() // If dst is subtensor, we have to use clEnqueueMapBuffer instead of clEnqueueWirteBuffer else if (dst->needMemoryMap() && !dst->is_subtensor()) { - if (!src->has_padding() && !dst->has_padding() && src->layout() == dst->layout()) + if (!src->has_padding() && !dst->has_padding() && permute_type == ir::PermuteType::COPY) { // This is more effective than multi-threading src->access([&](backend::ITensor &) { dst->enqueueWriteBuffer(src->buffer(), false); }); @@ -281,7 +281,7 @@ void PermuteLayer::run() } } else if (src->needMemoryMap() && !src->is_subtensor() && !src->has_padding() && - !dst->has_padding() && src->layout() == dst->layout()) + !dst->has_padding() && permute_type == ir::PermuteType::COPY) { // This is more effective than multi-threading assert(!dst->needMemoryMap()); diff --git a/runtime/onert/core/src/exec/EdgeTensor.h b/runtime/onert/core/src/exec/EdgeTensor.h index 8df79c3890d..5ea9c4ac47b 100644 --- a/runtime/onert/core/src/exec/EdgeTensor.h +++ b/runtime/onert/core/src/exec/EdgeTensor.h @@ -36,7 +36,7 @@ class EdgeTensor : public backend::IPortableTensor ~EdgeTensor() = default; uint8_t *buffer() const override { return _buffer.get(); } - ir::Layout layout() const override { return _layout; } + ir::Layout layout() const { return _layout; } void set_dynamic() override { _info.setDynamic(); } bool applyShape(const ir::Shape &new_shape) override; void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); } diff --git a/runtime/onert/core/src/exec/IPermuteFunction.cc b/runtime/onert/core/src/exec/IPermuteFunction.cc index 95198373c16..6775c17d1a6 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.cc +++ b/runtime/onert/core/src/exec/IPermuteFunction.cc @@ -36,9 +36,6 @@ using namespace onert; inline nnfw::cker::Shape getShape(const backend::ITensor *tensor) { const ir::Shape shape = tensor->getShape(); - - assert(tensor->layout() == ir::Layout::NHWC); - auto rank = shape.rank(); nnfw::cker::Shape ret(rank); auto data = ret.DimsData(); diff --git a/runtime/onert/core/src/exec/IPermuteFunction.h b/runtime/onert/core/src/exec/IPermuteFunction.h index 517d0dc6bee..604e0c72d8b 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.h +++ b/runtime/onert/core/src/exec/IPermuteFunction.h @@ -93,7 +93,7 @@ class IPermuteFunction : public IFunction // Now there is no case where both src and dst have cl buffer. assert(!src->needMemoryMap()); - if (!src->has_padding() && !dst->has_padding() && src->layout() == dst->layout()) + if (!src->has_padding() && !dst->has_padding() && permute_type == ir::PermuteType::COPY) { src->access([&](backend::ITensor &) { dst->enqueueWriteBuffer(src->buffer(), false); }); } @@ -110,7 +110,7 @@ class IPermuteFunction : public IFunction } } else if (src->needMemoryMap() && !src->is_subtensor() && !src->has_padding() && - !dst->has_padding() && src->layout() == dst->layout()) + !dst->has_padding() && permute_type == ir::PermuteType::COPY) { assert(!dst->needMemoryMap()); dst->access([&](backend::ITensor &) { src->enqueueReadBuffer(dst->buffer(), true); }); diff --git a/runtime/onert/core/src/exec/IPermuteFunction.test.cc b/runtime/onert/core/src/exec/IPermuteFunction.test.cc index fd5f73f7f3e..586e2305708 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.test.cc +++ b/runtime/onert/core/src/exec/IPermuteFunction.test.cc @@ -71,7 +71,7 @@ class MockUpTensor : public ITensor uint8_t *buffer() const override { return _data; } - ir::Layout layout() const override { return _layout; } + ir::Layout layout() const { return _layout; } ir::DataType data_type() const override { return _type_info.type(); } float data_scale() const override { return _type_info.scale(); } int32_t data_zero_point() const override { return _type_info.zero_point(); } diff --git a/runtime/onert/core/src/exec/feature/MockTensor.test.h b/runtime/onert/core/src/exec/feature/MockTensor.test.h index 1d2d375e2ae..bdddad99aae 100644 --- a/runtime/onert/core/src/exec/feature/MockTensor.test.h +++ b/runtime/onert/core/src/exec/feature/MockTensor.test.h @@ -47,7 +47,7 @@ template class MockTensor : public onert::backend::ITensor public: // DUMMY methods size_t total_size() const override { return 0; } - onert::ir::Layout layout() const override { return _layout; } + onert::ir::Layout layout() const { return _layout; } onert::ir::DataType data_type() const override { return onert::ir::DataType::UINT8; } float data_scale() const override { return 0; } int32_t data_zero_point() const override { return 0; } diff --git a/runtime/onert/core/src/exec/feature/nchw/Reader.h b/runtime/onert/core/src/exec/feature/nchw/Reader.h index e1a963cbdc6..0519b995718 100644 --- a/runtime/onert/core/src/exec/feature/nchw/Reader.h +++ b/runtime/onert/core/src/exec/feature/nchw/Reader.h @@ -52,8 +52,6 @@ template class Reader : public feature::Reader Reader(backend::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == ir::Layout::NCHW); - const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); auto shape = tensor->getShape(); _strides.W = shape.dim(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset; diff --git a/runtime/onert/core/src/exec/feature/nhwc/Reader.h b/runtime/onert/core/src/exec/feature/nhwc/Reader.h index 3e3c431bfa5..eec0675553a 100644 --- a/runtime/onert/core/src/exec/feature/nhwc/Reader.h +++ b/runtime/onert/core/src/exec/feature/nhwc/Reader.h @@ -53,8 +53,6 @@ template class Reader : public feature::Reader Reader(const backend::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == ir::Layout::NHWC); - const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); auto shape = tensor->getShape(); _strides.C = shape.dim(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;