diff --git a/runtime/onert/backend/cpu/BackendContext.cc b/runtime/onert/backend/cpu/BackendContext.cc index 17a121a6a71..3a39df917c4 100644 --- a/runtime/onert/backend/cpu/BackendContext.cc +++ b/runtime/onert/backend/cpu/BackendContext.cc @@ -31,7 +31,11 @@ namespace backend namespace cpu { -ITensorRegistry *BackendContext::genTensors() { return basic::genTensors(*this); } +ITensorRegistry *BackendContext::genTensors() +{ + return basic::genTensors(tensor_builder, *graph(), external_operands(), tensor_registry, + data().op_order, tensor_builder->getSharedMemoryOperandIndexes()); +} FunctionMap BackendContext::genKernels() { @@ -43,7 +47,8 @@ FunctionMap BackendContext::genKernels() ret.emplace(op_ind, std::move(fn_seq)); } - basic::initConsts(*this); + basic::initConsts(graph()->operands(), external_operands(), tensor_registry.get(), + tensor_builder->getSharedMemoryOperandIndexes()); // NOTE For memory optimization, we want to free some operand data const_cast(*_data.graph) diff --git a/runtime/onert/backend/cpu/BackendContext.h b/runtime/onert/backend/cpu/BackendContext.h index 24aaeef2cd5..69ab30c827f 100644 --- a/runtime/onert/backend/cpu/BackendContext.h +++ b/runtime/onert/backend/cpu/BackendContext.h @@ -44,11 +44,6 @@ class BackendContext : public onert::backend::BackendContext ITensorRegistry *genTensors() override; FunctionMap genKernels() override; - const ir::OperandIndexMap sharedMemoryOperandIndexes() override - { - return tensor_builder->getSharedMemoryOperandIndexes(); - } - std::shared_ptr external_context() { return _external_context; } public: diff --git a/runtime/onert/core/include/backend/BackendContext.h b/runtime/onert/core/include/backend/BackendContext.h index fd1f341b502..052809f7d11 100644 --- a/runtime/onert/core/include/backend/BackendContext.h +++ b/runtime/onert/core/include/backend/BackendContext.h @@ -64,7 +64,6 @@ class BackendContext const util::Set &external_operands() const { return _data.external_operands; } const ContextData &data() const { return _data; } - virtual const ir::OperandIndexMap sharedMemoryOperandIndexes() { return {}; } virtual ITensorRegistry *genTensors() = 0; virtual FunctionMap genKernels() = 0; diff --git a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h index a5d38c73c6a..ce905ce7a46 100644 --- a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h +++ b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h @@ -34,19 +34,18 @@ namespace basic { // TODO Remove the template param BackendContext once unification of cpu backend context is done -template void planTensors(const T_BackendContext &ctx) +template +void planTensors(const std::shared_ptr &tensor_builder, const ir::Graph &graph, + const util::Set &external_operands, + const std::vector &op_order) { - const ir::Graph &graph = *ctx.graph(); - const auto &order = ctx.data().op_order; - auto tensor_builder = ctx.tensor_builder; - ir::OperandIndexMap uses_map; ir::OperandIndexMap def_map; ir::OperandIndexSequence constants; // Prepare scanning graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { - if (ctx.external_operands().contains(ind)) + if (external_operands.contains(ind)) return; // TODO Check if we need to handle unused tensors @@ -95,7 +94,7 @@ template void planTensors(const T_BackendContext &ct // 1. Scan DEF of outputs. If the DEF, allocate it // 2. Scan DEF of inputs. If variable tensor, allocate it // 3. Scan USE of inputs. Decrease the USE and deallocate if the USE is 0 - for (const auto &op_ind : order) + for (const auto &op_ind : op_order) { const auto &op = graph.operations().at(op_ind); auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED; @@ -104,7 +103,7 @@ template void planTensors(const T_BackendContext &ct // Define outputs for (const auto &ind : op_outputs) { - if (ctx.external_operands().contains(ind)) + if (external_operands.contains(ind)) continue; if (!tensor_builder->isRegistered(ind)) continue; @@ -121,7 +120,7 @@ template void planTensors(const T_BackendContext &ct // non-constant because of less memory usage by memory planning in here for (const auto &ind : op_inputs) { - if (ctx.external_operands().contains(ind)) + if (external_operands.contains(ind)) continue; if (!tensor_builder->isRegistered(ind)) continue; @@ -138,7 +137,7 @@ template void planTensors(const T_BackendContext &ct for (const auto &ind : op_inputs) { - if (ctx.external_operands().contains(ind)) + if (external_operands.contains(ind)) continue; if (!tensor_builder->isRegistered(ind)) continue; @@ -177,16 +176,19 @@ template void planTensors(const T_BackendContext &ct [](std::pair it) { return it.second == 0; })); } -template ITensorRegistry *genTensors(T_BackendContext &ctx) +template +ITensorRegistry *genTensors(const std::shared_ptr &tensor_builder, + const ir::Graph &graph, + const util::Set &external_operands, + const std::shared_ptr &tensor_registry, + const std::vector &op_order, + const ir::OperandIndexMap &shared_memory_operand_idx) { - const ir::Graph &graph = *ctx.graph(); - auto tensor_builder = ctx.tensor_builder; - // process source tensors for shared memory at first std::vector registered_source_ind; - for (const auto &[_, source_ind] : tensor_builder->getSharedMemoryOperandIndexes()) + for (const auto &[_, source_ind] : shared_memory_operand_idx) { - if (ctx.external_operands().contains(source_ind)) + if (external_operands.contains(source_ind)) continue; if (tensor_builder->isRegistered(source_ind)) // some tensors can have the same source continue; @@ -195,7 +197,7 @@ template ITensorRegistry *genTensors(T_BackendContex } graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) { - if (ctx.external_operands().contains(ind)) + if (external_operands.contains(ind)) return; if (std::find(std::begin(registered_source_ind), std::end(registered_source_ind), ind) != std::end(registered_source_ind)) // skip tensors already registered @@ -206,7 +208,7 @@ template ITensorRegistry *genTensors(T_BackendContex // TODO Get compiler options from compiler, and use it rather than getting it from Env if (util::getConfigString(util::config::EXECUTOR) == "Linear") { - basic::planTensors(ctx); + basic::planTensors(tensor_builder, graph, external_operands, op_order); } else { @@ -220,7 +222,13 @@ template ITensorRegistry *genTensors(T_BackendContex tensor_builder->allocate(); - return ctx.tensor_registry.get(); + return tensor_registry.get(); +} + +template ITensorRegistry *genTensors(T_BackendContext &ctx) +{ + return genTensors(ctx.tensor_builder, *ctx.graph(), ctx.external_operands(), ctx.tensor_registry, + ctx.data().op_order, {}); } inline void initConsts(const ir::Operands &operands, @@ -263,8 +271,7 @@ inline void initConsts(const ir::Operands &operands, inline void initConsts(BackendContext &ctx) { - initConsts(ctx.graph()->operands(), ctx.external_operands(), ctx.tensor_registry.get(), - ctx.sharedMemoryOperandIndexes()); + initConsts(ctx.graph()->operands(), ctx.external_operands(), ctx.tensor_registry.get(), {}); } } // namespace basic diff --git a/runtime/onert/core/src/backend/builtin/Backend.h b/runtime/onert/core/src/backend/builtin/Backend.h index e36d9282f12..85d389505d3 100644 --- a/runtime/onert/core/src/backend/builtin/Backend.h +++ b/runtime/onert/core/src/backend/builtin/Backend.h @@ -66,7 +66,7 @@ class Backend : public ::onert::backend::Backend, public backend::train::ITraina // TODO Remove TensorBuilder and ConstantInitializer // TODO Support Consecutive controflow operation's intermediate tensor auto tr = std::make_shared(); - auto tb = std::make_shared(tr, ir::OperandIndexMap{}); + auto tb = std::make_shared(tr); context->tensor_registry = tr; context->tensor_builder = tb; context->kernel_gen = std::make_shared( diff --git a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc index 26534df531d..b6781061f2f 100644 --- a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc +++ b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc @@ -27,13 +27,10 @@ namespace backend namespace builtin { -TensorBuilder::TensorBuilder( - const std::shared_ptr &tensor_reg, - const ir::OperandIndexMap &shared_memory_operand_indexes) +TensorBuilder::TensorBuilder(const std::shared_ptr &tensor_reg) : _tensor_reg{tensor_reg}, _dynamic_tensor_mgr{new DynamicTensorManager(_tensor_reg->base_reg())}, _static_tensor_mgr{new basic::StaticTensorManager( - _tensor_reg->base_reg(), _dynamic_tensor_mgr.get(), shared_memory_operand_indexes)}, - _shared_memory_operand_indexes{shared_memory_operand_indexes} + _tensor_reg->base_reg(), _dynamic_tensor_mgr.get(), ir::OperandIndexMap{})} { /* empty */ } @@ -102,11 +99,6 @@ basic::Tensor *TensorBuilder::nativeOwnTensorAt(const ir::OperandIndex &ind) return _tensor_reg->getNativeOwnTensor(ind); } -const ir::OperandIndexMap &TensorBuilder::getSharedMemoryOperandIndexes() const -{ - return _shared_memory_operand_indexes; -} - } // namespace builtin } // namespace backend } // namespace onert diff --git a/runtime/onert/core/src/backend/builtin/TensorBuilder.h b/runtime/onert/core/src/backend/builtin/TensorBuilder.h index 77d182d779d..295e91da1fc 100644 --- a/runtime/onert/core/src/backend/builtin/TensorBuilder.h +++ b/runtime/onert/core/src/backend/builtin/TensorBuilder.h @@ -37,8 +37,7 @@ namespace builtin class TensorBuilder { public: - TensorBuilder(const std::shared_ptr &tensor_reg, - const ir::OperandIndexMap &shared_memory_operand_indexes); + TensorBuilder(const std::shared_ptr &tensor_reg); /** * @brief Register tensor information to allocate on CPU backend @@ -54,8 +53,6 @@ class TensorBuilder void allocate(void); - const ir::OperandIndexMap &getSharedMemoryOperandIndexes() const; - DynamicTensorManager *dynamicTensorManager(void); /** @@ -71,7 +68,6 @@ class TensorBuilder std::unique_ptr _dynamic_tensor_mgr; std::unique_ptr _static_tensor_mgr; ir::OperandIndexMap _tensor_info_map; - ir::OperandIndexMap _shared_memory_operand_indexes; }; } // namespace builtin