Skip to content

Commit

Permalink
Remove layout info in BackendContext
Browse files Browse the repository at this point in the history
  • Loading branch information
hseok-oh committed Jul 31, 2024
1 parent 93a01d2 commit 2d802d5
Show file tree
Hide file tree
Showing 3 changed files with 0 additions and 15 deletions.
3 changes: 0 additions & 3 deletions runtime/onert/core/include/backend/BackendContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ struct ContextData
std::vector<onert::ir::OperationIndex> op_order;
/* Operands that are defined by other backends */
util::Set<ir::OperandIndex> external_operands;
/* Operand layout info */
ir::OperandIndexMap<ir::Layout> operand_layouts;
/* Custom kernel builder */
std::shared_ptr<custom::IKernelBuilder> custom_kernel_builder;
/* Is linear executor or not */
Expand All @@ -64,7 +62,6 @@ class BackendContext
const Backend *backend() const { return _backend; }
const ir::Graph *graph() const { return _data.graph.get(); }
const util::Set<ir::OperandIndex> &external_operands() const { return _data.external_operands; }
const ir::OperandIndexMap<ir::Layout> &operand_layouts() const { return _data.operand_layouts; }
const ContextData &data() const { return _data; }

virtual ITensorRegistry *genTensors() = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,6 @@ template <typename T_BackendContext> void planTensors(const T_BackendContext &ct
{
// These tensors do not exist in any (No use and def)
const auto &info = obj.info();
// NOTE Currently we only support NHWC tensors for cpu-common tensors.
// There is no way to get the layout info from the backend context for now.
// When we support NCHW tensors as well, we also need to change tensor info to be
// permuted shape.
assert(ctx.operand_layouts().at(ind) == ir::Layout::NHWC);
tensor_builder->registerTensorInfo(ind, info);
}
});
Expand Down
7 changes: 0 additions & 7 deletions runtime/onert/core/src/compiler/ExecutorFactory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,6 @@ createBackendContexts(compiler::ILoweredGraph &lgraph, bool linear_executor,
init_context_data(backend);

auto &partial_graph = *context_data_map[backend].graph;
auto &operand_layouts = context_data_map[backend].operand_layouts;
assert(operand_layouts.find(operand_ind) == operand_layouts.end());
operand_layouts[operand_ind] = ir::Layout::NHWC;

// Copy the operand and insert it to the partial graph
auto new_operand = std::make_unique<ir::Operand>(operand);
Expand All @@ -191,7 +188,6 @@ createBackendContexts(compiler::ILoweredGraph &lgraph, bool linear_executor,

auto &partial_graph = *context_data_map[backend].graph;
auto &external_operands = context_data_map[backend].external_operands;
auto &operand_layouts = context_data_map[backend].operand_layouts;

{
// Add missing operands (externals)
Expand All @@ -210,8 +206,6 @@ createBackendContexts(compiler::ILoweredGraph &lgraph, bool linear_executor,
UNUSED_RELEASE(new_operand_ind);
assert(new_operand_ind == operand_ind);

assert(operand_layouts.find(operand_ind) == operand_layouts.end());
operand_layouts[operand_ind] = ir::Layout::NHWC;
external_operands.add(operand_ind);
}

Expand Down Expand Up @@ -708,7 +702,6 @@ exec::IExecutor *ExecutorFactory::createTrainableExecutor(
tdata.tgraph = std::move(tgraph);
tdata.op_order = std::move(data.op_order);
tdata.external_operands = std::move(external_operands);
tdata.operand_layouts = std::move(data.operand_layouts);
tdata.custom_kernel_builder = std::move(data.custom_kernel_builder);
tdata.is_linear_executor = data.is_linear_executor;
tdata.optim_info = training_info.optimizerInfo();
Expand Down

0 comments on commit 2d802d5

Please sign in to comment.