Skip to content

Commit

Permalink
[onert-micro] Reduce Relu code duplication
Browse files Browse the repository at this point in the history
This pr reduces code duplication for Relu.

ONE-DCO-1.0-Signed-off-by: Artem Balyshev <[email protected]>
  • Loading branch information
Artem Balyshev committed Jun 26, 2024
1 parent 0a7eea9 commit 98f85b6
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 155 deletions.
36 changes: 36 additions & 0 deletions onert-micro/onert-micro/include/execute/kernels/ReluCommon.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H
#define ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H

#include "OMStatus.h"
#include "core/OMUtils.h"

#include "execute/OMKernelExecutionBuilder.h"
#include "execute/OMRuntimeKernel.h"

namespace onert_micro
{
namespace execute
{

OMStatus execute_relu_common(const OMExecuteArgs &execute_args, bool is_relu_6);

} // namespace execute
} // namespace onert_micro

#endif // ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H
1 change: 1 addition & 0 deletions onert-micro/onert-micro/src/execute/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ set(SOURCES
OMKernelExecutionBuilder.cpp
OMRuntimeKernel.cpp
OMUtils.cpp
kernels/ReluCommon.cpp
kernels/ConvolutionCommon.cpp
kernels/PoolingCommon.cpp
kernels/ReadKernelDataCommon.cpp
Expand Down
76 changes: 3 additions & 73 deletions onert-micro/onert-micro/src/execute/kernels/Relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,84 +14,14 @@
* limitations under the License.
*/

#include "OMStatus.h"

#include "core/OMUtils.h"

#include "execute/OMKernelExecutionBuilder.h"
#include "execute/OMRuntimeKernel.h"

#include "PALReluCommon.h"
#include "execute/kernels/ReluCommon.h"

using namespace onert_micro;
using namespace onert_micro::execute;

namespace
{

constexpr uint32_t inputTensorIdx = 0;
constexpr uint32_t outputTensorIdx = 0;

} // namespace

// NOTE: doesnt currently support dynamic shapes
OMStatus onert_micro::execute::execute_kernel_CircleRelu(const OMExecuteArgs &execute_args)
{
core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
uint16_t op_index = execute_args.kernel_index;

const circle::Tensor *input = nullptr;
const circle::Tensor *output = nullptr;

uint8_t *input_data = nullptr;
uint8_t *output_data = nullptr;

OMStatus status = Ok;

OMRuntimeKernel runtime_kernel;
runtime_kernel.readKernel(op_index, runtime_context);

input = runtime_kernel.inputs[inputTensorIdx];
output = runtime_kernel.outputs[outputTensorIdx];

assert(input != nullptr);
assert(output != nullptr);

status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
if (status != Ok)
return status;

input_data = runtime_kernel.inputs_data[inputTensorIdx];
output_data = runtime_kernel.outputs_data[outputTensorIdx];

assert(input_data != nullptr);
assert(output_data != nullptr);

switch (input->type())
{
#ifndef DIS_FLOAT
case circle::TensorType_FLOAT32:
{
core::OMRuntimeShape input_shape(input);
core::OMRuntimeShape output_shape(output);

const float *input_data_float = core::utils::castInputData<float>(input_data);
float *output_data_float = core::utils::castOutputData<float>(output_data);

assert(output_data_float);
const int flat_size = input_shape.flatSize();

status = pal::ReLUCommon(flat_size, input_data_float, output_data_float, 0.0f, false);
}
break;
#endif // DIS_FLOAT
default:
{
status = UnsupportedType;
assert(false && "Unsupported type.");
}
}

return status;
bool is_relu_6 = false;
return execute_relu_common(execute_args, is_relu_6);
}
77 changes: 3 additions & 74 deletions onert-micro/onert-micro/src/execute/kernels/Relu6.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,85 +14,14 @@
* limitations under the License.
*/

#include "OMStatus.h"

#include "core/OMUtils.h"

#include "execute/OMKernelExecutionBuilder.h"
#include "execute/OMRuntimeKernel.h"

#include "PALReluCommon.h"
#include "execute/kernels/ReluCommon.h"

using namespace onert_micro;
using namespace onert_micro::execute;

namespace
{

constexpr uint32_t inputTensorIdx = 0;
constexpr uint32_t outputTensorIdx = 0;

} // namespace

// NOTE: doesnt currently support dynamic shapes
OMStatus onert_micro::execute::execute_kernel_CircleRelu6(const OMExecuteArgs &execute_args)
{
core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
uint16_t op_index = execute_args.kernel_index;

const circle::Tensor *input = nullptr;
const circle::Tensor *output = nullptr;

uint8_t *input_data = nullptr;
uint8_t *output_data = nullptr;

OMStatus status = Ok;

OMRuntimeKernel runtime_kernel;
runtime_kernel.readKernel(op_index, runtime_context);

input = runtime_kernel.inputs[inputTensorIdx];
output = runtime_kernel.outputs[outputTensorIdx];

assert(input != nullptr);
assert(output != nullptr);

status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
if (status != Ok)
return status;

input_data = runtime_kernel.inputs_data[inputTensorIdx];
output_data = runtime_kernel.outputs_data[outputTensorIdx];

assert(input_data != nullptr);
assert(output_data != nullptr);

switch (input->type())
{
#ifndef DIS_FLOAT
case circle::TensorType_FLOAT32:
{

core::OMRuntimeShape input_shape(input);
core::OMRuntimeShape output_shape(output);

const float *input_data_float = core::utils::castInputData<float>(input_data);
float *output_data_float = core::utils::castOutputData<float>(output_data);

assert(output_data_float);
const int flat_size = input_shape.flatSize();

status = pal::ReLUCommon(flat_size, input_data_float, output_data_float, 0.0f, true);
}
break;
#endif // DIS_FLOAT
default:
{
status = UnsupportedType;
assert(false && "Unsupported type.");
}
}

return status;
bool is_relu_6 = true;
return execute_relu_common(execute_args, is_relu_6);
}
92 changes: 92 additions & 0 deletions onert-micro/onert-micro/src/execute/kernels/ReluCommon.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "execute/kernels/ReluCommon.h"
#include "PALReluCommon.h"

using namespace onert_micro;
using namespace onert_micro::execute;

namespace
{

constexpr uint32_t inputTensorIdx = 0;
constexpr uint32_t outputTensorIdx = 0;

} // namespace

// NOTE: doesnt currently support dynamic shapes
OMStatus onert_micro::execute::execute_relu_common(const OMExecuteArgs &execute_args,
bool is_relu_6)
{
core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
uint16_t op_index = execute_args.kernel_index;

const circle::Tensor *input = nullptr;
const circle::Tensor *output = nullptr;

uint8_t *input_data = nullptr;
uint8_t *output_data = nullptr;

OMStatus status = Ok;

OMRuntimeKernel runtime_kernel;
runtime_kernel.readKernel(op_index, runtime_context);

input = runtime_kernel.inputs[inputTensorIdx];
output = runtime_kernel.outputs[outputTensorIdx];

assert(input != nullptr);
assert(output != nullptr);

status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
if (status != Ok)
return status;

input_data = runtime_kernel.inputs_data[inputTensorIdx];
output_data = runtime_kernel.outputs_data[outputTensorIdx];

assert(input_data != nullptr);
assert(output_data != nullptr);

switch (input->type())
{
#ifndef DIS_FLOAT
case circle::TensorType_FLOAT32:
{
core::OMRuntimeShape input_shape(input);
core::OMRuntimeShape output_shape(output);

const auto *input_data_float = core::utils::castInputData<float>(input_data);
auto *output_data_float = core::utils::castOutputData<float>(output_data);

assert(output_data_float);
const int flat_size = input_shape.flatSize();

status = pal::ReLUCommon(flat_size, input_data_float, output_data_float, 0.0f, is_relu_6);
}
break;
#endif // DIS_FLOAT
default:
{
status = UnsupportedType;
assert(false && "Unsupported type.");
}
}

return status;
}
8 changes: 0 additions & 8 deletions onert-micro/onert-micro/src/import/kernels/Relu6.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,6 @@
using namespace onert_micro;
using namespace onert_micro::core;

namespace
{

constexpr uint32_t inputTensorIdx = 0;
constexpr uint32_t outputTensorIdx = 0;

} // namespace

OMStatus onert_micro::import::configure_kernel_CircleRelu6(const OMConfigureArgs &config_args)
{

Expand Down

0 comments on commit 98f85b6

Please sign in to comment.