From 2043a7861a5d89e2e710bfea5554bc3c086b0977 Mon Sep 17 00:00:00 2001
From: Jiyoung Giuliana Yun
Date: Tue, 25 Jun 2024 19:44:36 +0900
Subject: [PATCH 01/16] [onert/nnfw_api] Specify loss per epoch in
GenModelTrain tests (#13271)
This commit verifies the loss per epoch by updating the GenModelTrain
code.
ONE-DCO-1.0-Signed-off-by: Jiyoung Yun
Co-authored-by: Jang Jiseob
---
tests/nnfw_api/lib/GenModelTrain.h | 35 ++++++++++---------
.../GenModelTests/BranchModelTrain.test.cc | 6 ++--
.../nontrainable_op_trains/MaxPool2D.test.cc | 8 ++---
.../nontrainable_op_trains/Mean.test.cc | 3 +-
.../nontrainable_op_trains/Pad.test.cc | 2 +-
.../nontrainable_op_trains/Relu.test.cc | 2 +-
.../nontrainable_op_trains/Relu6.test.cc | 2 +-
.../nontrainable_op_trains/Reshape.test.cc | 4 +--
.../nontrainable_op_trains/Softmax.test.cc | 2 +-
.../one_op_trains/Conv2D.test.cc | 13 +++----
.../DepthwiseConvolution.test.cc | 12 +++----
.../one_op_trains/FullyConnected.test.cc | 8 ++---
12 files changed, 50 insertions(+), 47 deletions(-)
diff --git a/tests/nnfw_api/lib/GenModelTrain.h b/tests/nnfw_api/lib/GenModelTrain.h
index 3b7507a1e54..d717c7bb1e7 100644
--- a/tests/nnfw_api/lib/GenModelTrain.h
+++ b/tests/nnfw_api/lib/GenModelTrain.h
@@ -43,9 +43,9 @@ struct TrainCaseData
DataSet dataset;
/**
- * @brief A vector of loss buffer
+ * @brief A vector of loss buffers
*/
- std::vector losses;
+ std::vector> losses;
/**
* @brief Append vector data list of inputs that are used in one step
@@ -84,10 +84,9 @@ struct TrainCaseData
* @tparam T Data type
* @param data vector data array
*/
- TrainCaseData &setLosses(std::vector losses)
+ TrainCaseData &addLosses(const std::vector &loss)
{
- this->losses = losses;
-
+ losses.emplace_back(loss);
return *this;
}
@@ -159,7 +158,7 @@ inline void TrainCaseData::addData(OneStepData &dest, const std::vector
static TrainCaseData uniformTCD(const std::vector>> &inputs_dataset,
const std::vector>> &expects_dataset,
- const std::vector &losses)
+ const std::vector> &losses)
{
assert(inputs_dataset.size() == expects_dataset.size());
@@ -167,11 +166,9 @@ static TrainCaseData uniformTCD(const std::vector>> &
for (const auto &data : inputs_dataset)
ret.addInputs(data);
for (const auto &data : expects_dataset)
- {
- assert(data.size() == losses.size());
ret.addExpects(data);
- }
- ret.setLosses(losses);
+ for (const auto &loss : losses)
+ ret.addLosses(loss);
return ret;
}
@@ -360,6 +357,7 @@ class GenModelTrain : public ::testing::Test
// Prepare expected losses
const auto &ref_losses = train_case.losses;
+ ASSERT_EQ(ref_losses.size(), num_epoch);
std::vector actual_losses(num_expecteds, 0.f);
for (uint32_t epoch = 0; epoch < num_epoch; ++epoch)
{
@@ -410,14 +408,17 @@ class GenModelTrain : public ::testing::Test
{
actual_losses[i] /= num_step;
}
- }
- // TODO better way for handling FP error?
- for (uint32_t i = 0; i < actual_losses.size(); i++)
- {
- const float actual = actual_losses[i];
- const float expected = ref_losses[i];
- EXPECT_NEAR(expected, actual, 0.001) << "Loss #" << i;
+ ASSERT_EQ(ref_losses[epoch].size(), actual_losses.size());
+
+ // TODO better way for handling FP error?
+ for (uint32_t i = 0; i < actual_losses.size(); i++)
+ {
+ const float actual = actual_losses[i];
+ const float expected = ref_losses[epoch][i];
+ EXPECT_NEAR(expected, actual, 0.001)
+ << "Loss " << epoch + 1 << "/" << num_epoch << " #" << i;
+ }
}
}
diff --git a/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc b/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc
index e01489f0b2b..e642eda8630 100644
--- a/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc
@@ -48,7 +48,7 @@ TEST_F(GenModelTrain, BranchOps_FC_Add)
_context->addTrainCase(uniformTCD(
{{{1, 3}, {0, 1, 2, 3, 4, 5, 6, 7}}, {{2, 1}, {7, 6, 5, 4, 3, 2, 1, 0}}}, // inputs
{{{2, 1, 5, 5, 2, 1, 5, 5}}, {{2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
- {8.4678f} // loss
+ {{9.2218f}, {8.9554f}, {8.7044f}, {8.4678f}} // loss
));
_context->setBackends({"train"});
@@ -90,7 +90,7 @@ TEST_F(GenModelTrain, BranchOps_FC_Sub)
_context->addTrainCase(uniformTCD(
{{{0, 1, 2, 3, 4, 5, 1, 3}, {6, 7}}, {{5, 4, 3, 2, 1, 0, 2, 1}, {7, 6}}}, // inputs
{{{2, 1, 5, 5, 2, 1, 5, 5}}, {{2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
- {3.2863f} // loss
+ {{7.3265f}, {4.6811f}, {3.6735f}, {3.2863f}} // loss
));
_context->setBackends({"train"});
@@ -145,7 +145,7 @@ TEST_F(GenModelTrain, BranchOps_FC_Mul)
_context->addTrainCase(
uniformTCD({{{0, 3}, {6, 7}}, {{5, 4}, {7, 6}}}, // inputs
{{{3, 2, 1, 2, 5, 6, 1, 0}}, {{2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
- {12.2822f} // loss
+ {{12.5488f}, {12.4590f}, {12.3701f}, {12.2822f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/MaxPool2D.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/MaxPool2D.test.cc
index 7d2357e11ef..5038c7aa4b8 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/MaxPool2D.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/MaxPool2D.test.cc
@@ -62,7 +62,7 @@ TEST_F(GenModelTrain, NonTrainableOps_Conv2D_MaxPool2D)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // inputs
{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1}}}, // expected
- {24.0089f} // loss
+ {{31.6667f}, {28.6837f}, {26.1765f}, {24.0089f}} // loss
));
_context->setBackends({"train"});
@@ -110,7 +110,7 @@ TEST_F(GenModelTrain, NonTrainableOps_Conv2D_MaxPool2D_Depth1_Filter2)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // inputs
{{{1, 2, 3, 4, 5, 6, 7, 8, 9}}}, // expected
- {8.4666} // loss
+ {{31.6667f}, {25.9453f}, {15.4067f}, {8.4666f}} // loss
));
_context->setBackends({"train"});
@@ -158,7 +158,7 @@ TEST_F(GenModelTrain, NonTrainableOps_Conv2D_MaxPool2D_Depth2_Filter2)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // inputs
{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1}}}, // expected
- {9.3556f} // loss
+ {{31.6667f}, {27.8823f}, {16.9743f}, {9.3556f}} // loss
));
_context->setBackends({"train"});
@@ -208,7 +208,7 @@ TEST_F(GenModelTrain, NonTrainableOps_Conv2D_MaxPool2D_Stride2Filter2)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // inputs
{{{1, 2, 3, 4, 5, 6, 7, 8}}}, // expected
- {9.0784f} // loss
+ {{25.5000f}, {19.2126f}, {12.9202f}, {9.0784f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Mean.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Mean.test.cc
index 2153c4e2e25..242c3ff2d4a 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Mean.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Mean.test.cc
@@ -46,7 +46,8 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Mean)
_context = std::make_unique(cgen.finish());
_context->addTrainCase(uniformTCD({{{1, 3}}, {{2, 1}}}, // inputs
{{{5}}, {{3}}}, // expected
- {13.3691f} // loss
+ {{16.7778f}, {15.5544f}, {14.4203f}, {13.3691f}}
+ // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Pad.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Pad.test.cc
index 71cec482129..1a86f40619e 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Pad.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Pad.test.cc
@@ -45,7 +45,7 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Pad)
_context->addTrainCase(
uniformTCD({{{0, 1, 2, 3, 4, 5, 6, 7}}, {{7, 6, 5, 4, 3, 2, 1, 0}}}, // inputs
{{{0, 13, 52, 0}}, {{0, 31, 24, 0}}}, // expected
- {1.3900f} // loss
+ {{462.7862f}, {32.8115f}, {5.8401f}, {1.3900f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu.test.cc
index 35a0380542e..ece21ad8b71 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu.test.cc
@@ -50,7 +50,7 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Relu_FC)
_context->addTrainCase(
uniformTCD({{{1, 3}}, {{2, 1}}}, // inputs
{{{0, 1, 5, 5, 2, 1, 5, 5}}, {{2, 1, 5, 5, 0, 1, 5, 6}}}, // expected
- {13.5010f} // loss
+ {{13.9041f}, {13.7684f}, {13.6340f}, {13.5010f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu6.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu6.test.cc
index 7273c335c3c..b52f304d90a 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu6.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Relu6.test.cc
@@ -50,7 +50,7 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Relu6_FC)
_context->addTrainCase(
uniformTCD({{{1, 3}}, {{2, 1}}}, // inputs
{{{2, 0, 6, 5, 2, 1, 6, 5}}, {{2, 1, 6, 5, 0, 1, 6, 6}}}, // expected
- {16.3412f} // loss
+ {{16.8338f}, {16.6680f}, {16.5038f}, {16.3412f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Reshape.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Reshape.test.cc
index 57e9e17a630..eda2b75955b 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Reshape.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Reshape.test.cc
@@ -49,8 +49,8 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Reshape)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // input dataset
{{{47, -4, -25, 9, 10, 10, -13, 11, -14, -26, -12, 26, 20, 40, 1, 3, 11,
- 4}}}, // expected dataset
- {226.5260f} // last losses
+ 4}}}, // expected dataset
+ {{403.3333f}, {324.0978f}, {267.7882f}, {226.5260f}} // losses
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Softmax.test.cc b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Softmax.test.cc
index e9dcb239afd..a244ede941d 100644
--- a/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Softmax.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/nontrainable_op_trains/Softmax.test.cc
@@ -45,7 +45,7 @@ TEST_F(GenModelTrain, NonTrainableOps_FC_Softmax)
_context->addTrainCase(
uniformTCD({{{1, 3}}, {{2, 1}}}, // inputs
{{{0, 1, 0, 0, 0, 0, 0, 0}}, {{0, 0, 0, 0, 0, 1, 0, 0}}}, // expected
- {0.1092f} // loss
+ {{0.1094f}, {0.1093f}, {0.1092f}, {0.1092f}} // loss
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/one_op_trains/Conv2D.test.cc b/tests/nnfw_api/src/GenModelTests/one_op_trains/Conv2D.test.cc
index 9ffb44e1e63..1d65d8d2ead 100644
--- a/tests/nnfw_api/src/GenModelTests/one_op_trains/Conv2D.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/one_op_trains/Conv2D.test.cc
@@ -51,13 +51,13 @@ TEST_F(GenModelTrain, OneOp_Conv2D_training_enabled)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // input dataset
{{{47, -4, -25, 9, 10, 10, -13, 11, -14, -26, -12, 26, 20, 40, 1, 3, 11,
- 4}}}, // expected dataset
- {324.0978f} // last losses
+ 4}}}, // expected dataset
+ {{403.3333f}, {324.0978f}, {267.7882f}, {226.5260f}} // losses
));
_context->setBackends({"train"});
// To apply backward to loss, epoch should be >= 2
- _context->setEpoch(2);
+ _context->setEpoch(4);
SUCCEED();
}
@@ -70,13 +70,14 @@ TEST_F(GenModelTrain, OneOp_Conv2D_training_disabled)
uniformTCD({{{4, 0, -5, 1, 0, 4, -1, 1, -1, -3, 3, -2, -4,
1, -2, 2, 4, -4, 2, 2, 0, 4, -1, -2, 4}}}, // input dataset
{{{47, -4, -25, 9, 10, 10, -13, 11, -14, -26, -12, 26, 20, 40, 1, 3, 11,
- 4}}}, // expected dataset
- {403.333f} // gain of loss after each epoch is const (equal 403.333)
+ 4}}}, // expected dataset
+ {{403.333f}, {403.333f}, {403.333f}, {403.333f}}
+ // gain of loss after each epoch is const (equal 403.333)
));
_context->setBackends({"train"});
// To apply backward to loss, epoch should be >= 2
- _context->setEpoch(10);
+ _context->setEpoch(4);
SUCCEED();
}
diff --git a/tests/nnfw_api/src/GenModelTests/one_op_trains/DepthwiseConvolution.test.cc b/tests/nnfw_api/src/GenModelTests/one_op_trains/DepthwiseConvolution.test.cc
index e6972857975..83a969a4412 100644
--- a/tests/nnfw_api/src/GenModelTests/one_op_trains/DepthwiseConvolution.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/one_op_trains/DepthwiseConvolution.test.cc
@@ -41,9 +41,9 @@ TEST_F(GenModelTrain, OneOp_DepthwiseConv2D)
_context = std::make_unique(cgen.finish());
_context->addTrainCase(
- uniformTCD({{{1, 2, 7, 8, 3, 4, 9, 1, 5, 6, 11, 2}}}, // input dataset
- {{{1, -4, 1, -3, 2, -2, 2, -4}}}, // expected dataset
- {1.1701f} // last losses
+ uniformTCD({{{1, 2, 7, 8, 3, 4, 9, 1, 5, 6, 11, 2}}}, // input dataset
+ {{{1, -4, 1, -3, 2, -2, 2, -4}}}, // expected dataset
+ {{6.8750f}, {2.5275f}, {1.6320f}, {1.1701f}} // losses
));
_context->setBackends({"train"});
@@ -78,7 +78,7 @@ TEST_F(GenModelTrain, OneOp_DepthwiseConv2D_No_Multiplier)
_context->addTrainCase(
uniformTCD({{{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}}}, // input dataset
{{{6.5f, 7.5f, 8.5f, 3.5f, 8.5f, 5.5f, 2.5f, 3.5f}}}, // expected dataset
- {15.5431f} // last losses
+ {{38.0000f}, {26.6868f}, {19.8101f}, {15.5431f}} // losses
));
_context->setBackends({"train"});
@@ -113,7 +113,7 @@ TEST_F(GenModelTrain, OneOp_DepthwiseConv2D_No_Multiplier_RELU6)
_context->addTrainCase(
uniformTCD({{{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}}}, // input dataset
{{{6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}}, // expected dataset
- {36.0000f} // last losses
+ {{36.0000f}, {36.0000f}, {36.0000f}, {36.0000f}} // losses
));
_context->setBackends({"train"});
@@ -148,7 +148,7 @@ TEST_F(GenModelTrain, OneOp_DepthwiseConv2D_3x3)
_context->addTrainCase(uniformTCD(
{{{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}}}, // input dataset
{{{6.0f, 16.0f, 8.0f, 16.0f, 10.0f, 16.0f, 12.0f, 16.0f}}}, // expected dataset
- {13.7338f} // last losses
+ {{171.0000f}, {69.5150f}, {29.9159f}, {13.7338f}} // losses
));
_context->setBackends({"train"});
diff --git a/tests/nnfw_api/src/GenModelTests/one_op_trains/FullyConnected.test.cc b/tests/nnfw_api/src/GenModelTests/one_op_trains/FullyConnected.test.cc
index d0b78ee25b5..9e95b90966b 100644
--- a/tests/nnfw_api/src/GenModelTests/one_op_trains/FullyConnected.test.cc
+++ b/tests/nnfw_api/src/GenModelTests/one_op_trains/FullyConnected.test.cc
@@ -42,7 +42,7 @@ TEST_F(GenModelTrain, OneOp_FullyConnected)
_context->addTrainCase(
uniformTCD({{{1, 3}}, {{2, 1}}}, // inputs
{{{2, 1, 5, 5, 2, 1, 5, 5}}, {{2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
- {11.4484f} // loss
+ {{14.2234f}, {13.2278f}, {12.3045f}, {11.4484f}} // loss
));
_context->setBackends({"train"});
@@ -72,9 +72,9 @@ TEST_F(GenModelTrain, OneOp_FullyConnected_OptionalBias)
_context = std::make_unique(cgen.finish());
_context->addTrainCase(
- uniformTCD({{{1, 3, 2, 1}}}, // inputs
- {{{2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
- {{12.7512f}} // loss
+ uniformTCD({{{1, 3, 2, 1}}}, // inputs
+ {{{2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 6}}}, // expected
+ {{14.4375f}, {13.9950f}, {13.5668f}, {13.1523f}, {12.7512f}} // loss
));
_context->setBackends({"train"});
From 41cca4a2180baec190f7fd27b42f82424cae21b4 Mon Sep 17 00:00:00 2001
From: Hyeongseok Oh
Date: Tue, 25 Jun 2024 20:12:59 +0900
Subject: [PATCH 02/16] [onert] Change API status condition (#13280)
This commit changes some API status condition
- `nnfw_set_workspace`: should be called before model loading to support cached optimized model auto loading later
- `nnfw_set_quantization_type`, `nnfw_set_quantized_model_path`, `nnfw_quantize`, `nnfw_set_codegen_model_path`, `nnfw_codegen`: allow to be called after prepare to support full quantization with minmax data collection
ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh
---
runtime/onert/api/nnfw/include/nnfw.h | 2 +-
.../onert/api/nnfw/src/nnfw_api_internal.cc | 15 +++++-------
.../src/NNPackageTests/AddModelLoaded.test.cc | 20 ++--------------
.../src/NNPackageTests/SessionCreated.test.cc | 24 +++++++++++++++++++
4 files changed, 33 insertions(+), 28 deletions(-)
diff --git a/runtime/onert/api/nnfw/include/nnfw.h b/runtime/onert/api/nnfw/include/nnfw.h
index 51def28a3ff..6008a7cfa02 100644
--- a/runtime/onert/api/nnfw/include/nnfw.h
+++ b/runtime/onert/api/nnfw/include/nnfw.h
@@ -511,7 +511,7 @@ NNFW_STATUS nnfw_query_info_u32(nnfw_session *session, NNFW_INFO_ID id, uint32_t
* This function sets the directory to be used as a workspace.
* System should allow read and write access to the directory for the runtime.
* Default workspace is running directory of the application.
- * This function should be called before {@link nnfw_prepare} is invoked.
+ * This function should be called before {@link nnfw_load_model_from_file} is invoked.
*
* @param[in] session session to be queried on.
* @param[in] dir workspace directory path
diff --git a/runtime/onert/api/nnfw/src/nnfw_api_internal.cc b/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
index acc446c6e8d..c7c335af939 100644
--- a/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
+++ b/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
@@ -898,14 +898,11 @@ NNFW_STATUS nnfw_session::set_workspace(const char *dir)
if (!dir)
return NNFW_STATUS_UNEXPECTED_NULL;
- if (isStatePreparedOrFinishedRun())
+ if (!isStateInitialized())
return NNFW_STATUS_INVALID_STATE;
_coptions->workspace_dir = std::string(dir);
- // TODO Set workspace dir to workspace user (ex. compiler, quantization manager, etc)
- // if model is already loaded
-
return NNFW_STATUS_NO_ERROR;
}
@@ -1683,7 +1680,7 @@ NNFW_STATUS nnfw_session::set_quantization_type(NNFW_QUANTIZE_TYPE qtype)
using onert::odc::QuantizeType;
try
{
- if (!isStateModelLoaded())
+ if (isStateInitialized() || isStateRunning())
{
std::cerr << "invalid state" << std::endl;
return NNFW_STATUS_INVALID_STATE;
@@ -1722,7 +1719,7 @@ NNFW_STATUS nnfw_session::set_quantized_model_path(const char *path)
{
try
{
- if (!isStateModelLoaded())
+ if (isStateInitialized() || isStateRunning())
{
std::cerr << "invalid state" << std::endl;
return NNFW_STATUS_INVALID_STATE;
@@ -1743,7 +1740,7 @@ NNFW_STATUS nnfw_session::quantize()
{
try
{
- if (!isStateModelLoaded())
+ if (isStateInitialized() || isStateRunning())
{
std::cerr << "invalid state" << std::endl;
return NNFW_STATUS_INVALID_STATE;
@@ -1774,7 +1771,7 @@ NNFW_STATUS nnfw_session::set_codegen_model_path(const char *path)
{
try
{
- if (!isStateModelLoaded())
+ if (isStateInitialized() || isStateRunning())
{
std::cerr << "invalid state" << std::endl;
return NNFW_STATUS_INVALID_STATE;
@@ -1796,7 +1793,7 @@ NNFW_STATUS nnfw_session::codegen(const char *target, NNFW_CODEGEN_PREF pref)
{
try
{
- if (!isStateModelLoaded())
+ if (isStateInitialized() || isStateRunning())
{
std::cerr << "Error during nnfw_session::codegen : Invalid state" << std::endl;
return NNFW_STATUS_INVALID_STATE;
diff --git a/tests/nnfw_api/src/NNPackageTests/AddModelLoaded.test.cc b/tests/nnfw_api/src/NNPackageTests/AddModelLoaded.test.cc
index f43da2f98c4..d0fe617a5ce 100644
--- a/tests/nnfw_api/src/NNPackageTests/AddModelLoaded.test.cc
+++ b/tests/nnfw_api/src/NNPackageTests/AddModelLoaded.test.cc
@@ -257,15 +257,10 @@ TEST_F(ValidationTestAddModelLoaded, neg_debug_get_config)
ASSERT_EQ(nnfw_get_config(_session, "BAD_KEY", buf, sizeof(buf)), NNFW_STATUS_ERROR);
}
-TEST_F(ValidationTestAddModelLoaded, set_workspace)
-{
- NNFW_ENSURE_SUCCESS(nnfw_set_workspace(_session, "."));
- SUCCEED();
-}
-
TEST_F(ValidationTestAddModelLoaded, neg_set_workspace)
{
- ASSERT_EQ(nnfw_set_workspace(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL);
+ // Call after prepare
+ EXPECT_EQ(nnfw_set_workspace(_session, "."), NNFW_STATUS_INVALID_STATE);
}
TEST_F(ValidationTestAddModelLoaded, set_prepare_config)
@@ -283,14 +278,3 @@ TEST_F(ValidationTestAddModelLoaded, neg_set_execute_config)
EXPECT_EQ(nnfw_set_execute_config(_session, NNFW_RUN_CONFIG_PROFILE, nullptr),
NNFW_STATUS_INVALID_STATE);
}
-
-TEST_F(ValidationTestAddModelLoaded, neg_set_execute_config_with_no_workspace)
-{
- NNFW_ENSURE_SUCCESS(nnfw_set_workspace(_session, ""));
- NNFW_ENSURE_SUCCESS(nnfw_prepare(_session));
-
- // Some execution config requires workspace
- EXPECT_EQ(nnfw_set_execute_config(_session, NNFW_RUN_CONFIG_DUMP_MINMAX, nullptr),
- NNFW_STATUS_ERROR);
- EXPECT_EQ(nnfw_set_execute_config(_session, NNFW_RUN_CONFIG_TRACE, nullptr), NNFW_STATUS_ERROR);
-}
diff --git a/tests/nnfw_api/src/NNPackageTests/SessionCreated.test.cc b/tests/nnfw_api/src/NNPackageTests/SessionCreated.test.cc
index 4576ce884eb..d410fe8055a 100644
--- a/tests/nnfw_api/src/NNPackageTests/SessionCreated.test.cc
+++ b/tests/nnfw_api/src/NNPackageTests/SessionCreated.test.cc
@@ -124,3 +124,27 @@ TEST_F(ValidationTestSessionCreated, neg_internal_set_config)
// All arguments are valid, but the session state is wrong
ASSERT_EQ(nnfw_set_config(_session, "GRAPH_DOT_DUMP", "0"), NNFW_STATUS_INVALID_STATE);
}
+
+TEST_F(ValidationTestSessionCreated, set_workspace)
+{
+ NNFW_ENSURE_SUCCESS(nnfw_set_workspace(_session, "."));
+ SUCCEED();
+}
+
+TEST_F(ValidationTestSessionCreated, neg_set_workspace)
+{
+ ASSERT_EQ(nnfw_set_workspace(_session, nullptr), NNFW_STATUS_UNEXPECTED_NULL);
+}
+
+TEST_F(ValidationTestSessionCreated, neg_set_execute_config_with_no_workspace)
+{
+ NNFW_ENSURE_SUCCESS(nnfw_set_workspace(_session, ""));
+ auto cbuf = genAddModel();
+ NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(_session, cbuf.buffer(), cbuf.size()));
+ NNFW_ENSURE_SUCCESS(nnfw_prepare(_session));
+
+ // Some execution config requires workspace
+ EXPECT_EQ(nnfw_set_execute_config(_session, NNFW_RUN_CONFIG_DUMP_MINMAX, nullptr),
+ NNFW_STATUS_ERROR);
+ EXPECT_EQ(nnfw_set_execute_config(_session, NNFW_RUN_CONFIG_TRACE, nullptr), NNFW_STATUS_ERROR);
+}
From 8befa00d46e700c755ee4e62d6ebfaf82ae4450b Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 06:17:30 +0900
Subject: [PATCH 03/16] [one-cmds] Add warning message on target related
process (#13285)
This commit adds warning messages on target related process.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
.../dummy-driver/src/dummy-compile.cpp | 13 +-
.../dummy-driver/src/dummy-compiler.cpp | 12 +-
compiler/one-cmds/one-codegen | 4 +
compiler/one-cmds/one-profile | 4 +
compiler/one-cmds/onelib/argumentparse.py | 21 +++-
compiler/one-cmds/tests/onecc_066.cfg | 11 ++
compiler/one-cmds/tests/onecc_066.ini | 2 +
compiler/one-cmds/tests/onecc_066.py | 13 ++
compiler/one-cmds/tests/onecc_066.test | 116 ++++++++++++++++++
9 files changed, 193 insertions(+), 3 deletions(-)
create mode 100644 compiler/one-cmds/tests/onecc_066.cfg
create mode 100644 compiler/one-cmds/tests/onecc_066.ini
create mode 100644 compiler/one-cmds/tests/onecc_066.py
create mode 100644 compiler/one-cmds/tests/onecc_066.test
diff --git a/compiler/one-cmds/dummy-driver/src/dummy-compile.cpp b/compiler/one-cmds/dummy-driver/src/dummy-compile.cpp
index caf3913ebd6..93b2ab543f3 100644
--- a/compiler/one-cmds/dummy-driver/src/dummy-compile.cpp
+++ b/compiler/one-cmds/dummy-driver/src/dummy-compile.cpp
@@ -17,6 +17,7 @@
/**
* dummy-compile only tests its interface rather than its functionality.
*
+ * ./dummy-compile -h
* ./dummy-compile -o ${OUTPUT_NAME} ${INPUT_NAME}
* ./dummy-compile --target {TARGET_NAME} -o ${OUTPUT_NAME} ${INPUT_NAME}
*
@@ -29,9 +30,19 @@
int main(int argc, char **argv)
{
- if (argc != 4 and argc != 6)
+ if (argc != 2 and argc != 4 and argc != 6)
return EXIT_FAILURE;
+ if (argc == 2)
+ {
+ std::string help_o{"-h"};
+ std::string argv_1{argv[1]};
+ if (help_o != argv_1)
+ return EXIT_FAILURE;
+
+ std::cout << "HELP MESSAGE!!" << std::endl;
+ return EXIT_SUCCESS;
+ }
if (argc == 4)
{
std::string opt_o{"-o"};
diff --git a/compiler/one-cmds/dummy-driver/src/dummy-compiler.cpp b/compiler/one-cmds/dummy-driver/src/dummy-compiler.cpp
index 1ecb813946e..6ca4226eb50 100644
--- a/compiler/one-cmds/dummy-driver/src/dummy-compiler.cpp
+++ b/compiler/one-cmds/dummy-driver/src/dummy-compiler.cpp
@@ -26,8 +26,18 @@
int main(int argc, char **argv)
{
- if (argc != 6)
+ if (argc != 2 and argc != 6)
return EXIT_FAILURE;
+ if (argc == 2)
+ {
+ std::string help_o{"-h"};
+ std::string argv_1{argv[1]};
+ if (help_o != argv_1)
+ return EXIT_FAILURE;
+
+ std::cout << "HELP MESSAGE!!" << std::endl;
+ return EXIT_SUCCESS;
+ }
std::string target_name{argv[2]};
std::string output_name{argv[5]};
diff --git a/compiler/one-cmds/one-codegen b/compiler/one-cmds/one-codegen
index 6fe61167994..677962341fe 100644
--- a/compiler/one-cmds/one-codegen
+++ b/compiler/one-cmds/one-codegen
@@ -220,6 +220,10 @@ def main():
if oneutils.is_valid_attr(cfg_args, 'command'):
given_backends = [given_backend]
setattr(cfg_args, given_backend, cfg_args.command)
+ # If "command" key exists with target option, command schema is not used. ${BACKEND}-compile will be run as before.
+ print(
+ "WARNING: 'command' key in the [one-codegen] will be deprecated as of September 1, 2025."
+ )
else:
# DO NOTHING
pass
diff --git a/compiler/one-cmds/one-profile b/compiler/one-cmds/one-profile
index 615246e2899..85517b56e38 100644
--- a/compiler/one-cmds/one-profile
+++ b/compiler/one-cmds/one-profile
@@ -255,6 +255,10 @@ def main():
if oneutils.is_valid_attr(cfg_args, 'command'):
given_backends = [given_backend]
setattr(cfg_args, given_backend, cfg_args.command)
+ # If "command" key exists with target option, command schema is not used. ${BACKEND}-profile will be run as before.
+ print(
+ "WARNING: 'command' key in the [one-profile] will be deprecated as of September 1, 2025."
+ )
else:
# DO NOTHING
pass
diff --git a/compiler/one-cmds/onelib/argumentparse.py b/compiler/one-cmds/onelib/argumentparse.py
index 0b12316bdfd..6f722ed1a84 100644
--- a/compiler/one-cmds/onelib/argumentparse.py
+++ b/compiler/one-cmds/onelib/argumentparse.py
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import ntpath
from types import SimpleNamespace
from typing import List, Tuple, Union, Type
import shutil
@@ -46,6 +47,20 @@ def __init__(self):
self.driver: str = None
self.target: str = None
+ def print_help(self):
+ backends_list = backends.get_list(self.driver)
+ driver_path = None
+ for cand in backends_list:
+ if ntpath.basename(cand) == self.driver:
+ driver_path = cand
+ if not driver_path:
+ driver_path = shutil.which(self.driver)
+
+ if not driver_path:
+ raise FileNotFoundError(self.driver + ' not found')
+
+ oneutils.run([driver_path, '-h'], err_prefix=self.driver)
+
def add_argument(self, *args, **kwargs):
if not 'action' in kwargs:
raise RuntimeError('"action" keyword argument is required')
@@ -80,6 +95,7 @@ def make_cmd(self, cfg_args: SimpleNamespace) -> List:
# use first driver
driver_path = driver_list[0]
cmd: List = [driver_path]
+ invalid_options = list(cfg_args.__dict__.keys())
# traverse the action in order and make commands
for action in self._actions:
arg, act, dtype = action
@@ -94,7 +110,6 @@ def make_cmd(self, cfg_args: SimpleNamespace) -> List:
if act == NormalOption:
if not oneutils.is_valid_attr(cfg_args, option_name):
- # TODO raise error when invalid option is given in the cfg file.
continue
if dtype == bool and getattr(cfg_args, option_name).lower() == "false":
continue
@@ -106,4 +121,8 @@ def make_cmd(self, cfg_args: SimpleNamespace) -> List:
assert act == NormalOption
if dtype == str:
cmd += [getattr(cfg_args, option_name)]
+ invalid_options.remove(option_name)
+ if len(invalid_options):
+ print(f'WARNING: there are invalid options {invalid_options}')
+ self.print_help()
return cmd
diff --git a/compiler/one-cmds/tests/onecc_066.cfg b/compiler/one-cmds/tests/onecc_066.cfg
new file mode 100644
index 00000000000..f62a32da9f6
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_066.cfg
@@ -0,0 +1,11 @@
+[onecc]
+one-codegen=True
+
+[backend]
+target=onecc_066
+
+[one-codegen]
+invalid_option=onecc_066
+verbose=True
+input=onecc_066.circle
+output=onecc_066.tvn
diff --git a/compiler/one-cmds/tests/onecc_066.ini b/compiler/one-cmds/tests/onecc_066.ini
new file mode 100644
index 00000000000..2da9372d24e
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_066.ini
@@ -0,0 +1,2 @@
+TARGET=onecc_066
+BACKEND=dummy
diff --git a/compiler/one-cmds/tests/onecc_066.py b/compiler/one-cmds/tests/onecc_066.py
new file mode 100644
index 00000000000..50cbc3b7d9a
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_066.py
@@ -0,0 +1,13 @@
+from onelib import argumentparse
+from onelib.argumentparse import DriverName, NormalOption, TargetOption
+
+
+def command_schema():
+ parser = argumentparse.ArgumentParser()
+ parser.add_argument("dummy-compiler", action=DriverName)
+ parser.add_argument("--target", action=TargetOption)
+ parser.add_argument("--verbose", action=NormalOption, dtype=bool)
+ parser.add_argument("input", action=NormalOption)
+ parser.add_argument("output", action=NormalOption)
+
+ return parser
diff --git a/compiler/one-cmds/tests/onecc_066.test b/compiler/one-cmds/tests/onecc_066.test
new file mode 100644
index 00000000000..355a04f009c
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_066.test
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+# Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Have invalid options in the [one-codegen]
+
+: '
+This test assumes below directories.
+
+[one hierarchy]
+ one
+ ├── backends
+ │ └── command
+ │ └── codegen
+ ├── bin
+ ├── doc
+ ├── include
+ ├── lib
+ ├── optimization
+ ├── target
+ └── test # pwd
+'
+
+BACKENDS_ALREADY_EXIST=true
+CMD_ALREADY_EXIST=true
+DUMMY_ALREADY_EXIST=true
+TARGET_ALREADY_EXIST=true
+
+BACKEND_NAME="dummy"
+
+filename_ext="$(basename -- $0)"
+filename="${filename_ext%.*}"
+
+driver_name="dummy-compiler"
+configfile="onecc_066.cfg"
+outputfile="onecc_066.tvn"
+targetfile="onecc_066.ini"
+
+clean_envir()
+{
+ rm -rf ../bin/${driver_name}
+ rm -rf ../target/${targetfile}
+ rm -rf "../backends/command/${BACKEND_NAME}/codegen.py"
+ if [ "$TARGET_ALREADY_EXIST" = false ]; then
+ rm -rf ../target/
+ fi
+ if [ "$DUMMY_ALREADY_EXIST" = false ]; then
+ rm -rf "../backends/command/${BACKEND_NAME}/"
+ fi
+ if [ "$CMD_ALREADY_EXIST" = false ]; then
+ rm -rf ../backends/command/
+ fi
+ if [ "$BACKENDS_ALREADY_EXIST" = false ]; then
+ rm -rf ../backends/
+ fi
+}
+
+trap_err_onexit()
+{
+ echo "${filename_ext} FAILED"
+ clean_envir
+ exit 255
+}
+
+trap trap_err_onexit ERR
+
+rm -f ${filename}.log
+rm -rf ${outputfile}
+
+if [ ! -d "../target/" ]; then
+ mkdir -p ../target/
+ TARGET_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/" ]; then
+ mkdir -p ../backends/
+ BACKENDS_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/command/" ]; then
+ mkdir -p ../backends/command/
+ CMD_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/command/${BACKEND_NAME}/" ]; then
+ mkdir -p ../backends/command/${BACKEND_NAME}/
+ DUMMY_ALREADY_EXIST=false
+fi
+
+# copy dummy tools to bin folder
+cp ${driver_name} ../bin/
+cp ${targetfile} ../target/
+cp onecc_066.py "../backends/command/${BACKEND_NAME}/codegen.py"
+
+# run test
+onecc -C ${configfile} > ${filename}.log 2>&1
+
+clean_envir
+
+if grep -q "${driver_name} with onecc_066 target" "${outputfile}"; then
+ if grep -q "HELP MESSAGE!!" "${filename}.log"; then
+ echo "${filename_ext} SUCCESS"
+ exit 0
+ fi
+fi
+
+trap_err_onexit
From 9715a18453ee88f583607b8d3020e77aabf4508f Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 09:45:48 +0900
Subject: [PATCH 04/16] [luci] Use auto & instead of auto (#13295)
This commit uses auto & to resolve static analysis warning.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
compiler/luci/export/src/CircleExporterImpl.cpp | 2 +-
compiler/luci/export/src/CircleTensorExporter.cpp | 2 +-
compiler/luci/pass/src/CircleQuantizer.cpp | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/compiler/luci/export/src/CircleExporterImpl.cpp b/compiler/luci/export/src/CircleExporterImpl.cpp
index 40c697912ed..014ef45d71c 100644
--- a/compiler/luci/export/src/CircleExporterImpl.cpp
+++ b/compiler/luci/export/src/CircleExporterImpl.cpp
@@ -76,7 +76,7 @@ Offset>>
encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map &opcodes)
{
std::vector> operator_codes_vec(opcodes.size());
- for (auto it : opcodes)
+ for (const auto &it : opcodes)
{
uint32_t idx = it.second;
int8_t dep_code = 127; // BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES
diff --git a/compiler/luci/export/src/CircleTensorExporter.cpp b/compiler/luci/export/src/CircleTensorExporter.cpp
index 4306529ea1b..0022a0e57e8 100644
--- a/compiler/luci/export/src/CircleTensorExporter.cpp
+++ b/compiler/luci/export/src/CircleTensorExporter.cpp
@@ -506,7 +506,7 @@ encodeSparsityParameters(FlatBufferBuilder &builder, luci::SparsityParam *sparsi
std::vector> dim_metadata_vec;
auto luci_dim_metadata = sparsityparam->dim_metadata;
- for (auto it : luci_dim_metadata)
+ for (const auto &it : luci_dim_metadata)
{
// array_segments
auto circle_array_segments = to_circle_sparse_index_vector(builder, it.array_segments());
diff --git a/compiler/luci/pass/src/CircleQuantizer.cpp b/compiler/luci/pass/src/CircleQuantizer.cpp
index 87db0a0452e..86ada1f182e 100644
--- a/compiler/luci/pass/src/CircleQuantizer.cpp
+++ b/compiler/luci/pass/src/CircleQuantizer.cpp
@@ -444,7 +444,7 @@ void CircleQuantizer::quantize(loco::Graph *g) const
}
// Clear existing quantparams before doing fake quantization
- for (auto node : loco::active_nodes(loco::output_nodes(g)))
+ for (auto &node : loco::active_nodes(loco::output_nodes(g)))
{
auto circle_node = loco::must_cast(node);
if (circle_node->quantparam() != nullptr)
From 9c4ff7c45562a4866a87be6a7cf6b1f5401c28d9 Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 09:45:59 +0900
Subject: [PATCH 05/16] [one-cmds] Fix target option (#13296)
This commit fixes target option making it only apply backend tools.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
compiler/one-cmds/onelib/CfgRunner.py | 2 +-
compiler/one-cmds/tests/onecc_067.cfg | 18 ++++
compiler/one-cmds/tests/onecc_067.ini | 2 +
compiler/one-cmds/tests/onecc_067.py | 13 +++
compiler/one-cmds/tests/onecc_067.test | 114 +++++++++++++++++++++++++
5 files changed, 148 insertions(+), 1 deletion(-)
create mode 100644 compiler/one-cmds/tests/onecc_067.cfg
create mode 100644 compiler/one-cmds/tests/onecc_067.ini
create mode 100644 compiler/one-cmds/tests/onecc_067.py
create mode 100644 compiler/one-cmds/tests/onecc_067.test
diff --git a/compiler/one-cmds/onelib/CfgRunner.py b/compiler/one-cmds/onelib/CfgRunner.py
index 477565638b8..c676216ba47 100644
--- a/compiler/one-cmds/onelib/CfgRunner.py
+++ b/compiler/one-cmds/onelib/CfgRunner.py
@@ -118,7 +118,7 @@ def run(self, working_dir, verbose=False):
options.append('--verbose')
if (section == 'one-codegen' or section == 'one-profile') and self.backend:
options += ['-b', self.backend]
- if self.target:
+ if (section == 'one-codegen' or section == 'one-profile') and self.target:
options += ['-T', self.target]
driver_path = os.path.join(working_dir, section)
cmd = [driver_path] + options
diff --git a/compiler/one-cmds/tests/onecc_067.cfg b/compiler/one-cmds/tests/onecc_067.cfg
new file mode 100644
index 00000000000..97395fd9b52
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_067.cfg
@@ -0,0 +1,18 @@
+[onecc]
+one-import-tf=True
+one-codegen=True
+
+[backend]
+target=onecc_067
+
+[one-import-tf]
+input_path=inception_v3.pb
+output_path=inception_v3.onecc_067.circle
+input_arrays=input
+input_shapes=1,299,299,3
+output_arrays=InceptionV3/Predictions/Reshape_1
+converter_version=v2
+
+[one-codegen]
+o=onecc_067.tvn
+input=inception_v3.onecc_067.circle
diff --git a/compiler/one-cmds/tests/onecc_067.ini b/compiler/one-cmds/tests/onecc_067.ini
new file mode 100644
index 00000000000..4abeb6fe1b9
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_067.ini
@@ -0,0 +1,2 @@
+TARGET=onecc_067
+BACKEND=dummy
diff --git a/compiler/one-cmds/tests/onecc_067.py b/compiler/one-cmds/tests/onecc_067.py
new file mode 100644
index 00000000000..ba44e83f494
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_067.py
@@ -0,0 +1,13 @@
+from onelib import argumentparse
+from onelib.argumentparse import DriverName, NormalOption, TargetOption
+
+
+def command_schema():
+ parser = argumentparse.ArgumentParser()
+ parser.add_argument("dummy-compile", action=DriverName)
+ parser.add_argument("--target", action=TargetOption)
+ parser.add_argument("--DSP-quota", action=NormalOption)
+ parser.add_argument("-o", action=NormalOption)
+ parser.add_argument("input", action=NormalOption)
+
+ return parser
diff --git a/compiler/one-cmds/tests/onecc_067.test b/compiler/one-cmds/tests/onecc_067.test
new file mode 100644
index 00000000000..4ea173dcc13
--- /dev/null
+++ b/compiler/one-cmds/tests/onecc_067.test
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+# Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# use command schema of codegen
+
+: '
+This test assumes below directories.
+
+[one hierarchy]
+ one
+ ├── backends
+ │ └── command
+ │ └── dummy (backend name)
+ │ └── codegen.py
+ ├── bin
+ ├── doc
+ ├── include
+ ├── lib
+ ├── optimization
+ ├── target
+ └── test # pwd
+'
+
+BACKENDS_ALREADY_EXIST=true
+CMD_ALREADY_EXIST=true
+DUMMY_ALREADY_EXIST=true
+TARGET_ALREADY_EXIST=true
+
+BACKEND_NAME="dummy"
+
+filename_ext="$(basename -- $0)"
+filename="${filename_ext%.*}"
+
+configfile="onecc_067.cfg"
+outputfile="onecc_067.tvn"
+targetfile="onecc_067.ini"
+
+clean_envir()
+{
+ rm -rf "../bin/${BACKEND_NAME}-compile"
+ rm -rf ../target/${targetfile}
+ rm -rf "../backends/command/${BACKEND_NAME}/codegen.py"
+ if [ "$TARGET_ALREADY_EXIST" = false ]; then
+ rm -rf ../target/
+ fi
+ if [ "$DUMMY_ALREADY_EXIST" = false ]; then
+ rm -rf "../backends/command/${BACKEND_NAME}/"
+ fi
+ if [ "$CMD_ALREADY_EXIST" = false ]; then
+ rm -rf ../backends/command/
+ fi
+ if [ "$BACKENDS_ALREADY_EXIST" = false ]; then
+ rm -rf ../backends/
+ fi
+}
+
+trap_err_onexit()
+{
+ echo "${filename_ext} FAILED"
+ clean_envir
+ exit 255
+}
+
+trap trap_err_onexit ERR
+
+rm -f ${filename}.log
+rm -rf ${outputfile}
+
+if [ ! -d "../target/" ]; then
+ mkdir -p ../target/
+ TARGET_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/" ]; then
+ mkdir -p ../backends/
+ BACKENDS_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/command/" ]; then
+ mkdir -p ../backends/command/
+ CMD_ALREADY_EXIST=false
+fi
+if [ ! -d "../backends/command/${BACKEND_NAME}/" ]; then
+ mkdir -p ../backends/command/${BACKEND_NAME}/
+ DUMMY_ALREADY_EXIST=false
+fi
+
+# copy dummy tools to bin folder
+cp ${BACKEND_NAME}-compile ../bin/
+cp ${targetfile} ../target/
+cp onecc_067.py "../backends/command/${BACKEND_NAME}/codegen.py"
+
+# run test
+onecc -C ${configfile} > ${filename}.log 2>&1
+
+clean_envir
+
+if grep -q "dummy-compile with onecc_067 target" "${outputfile}"; then
+ echo "${filename_ext} SUCCESS"
+ exit 0
+fi
+
+trap_err_onexit
From 3f6b137c3c1d33d9f603daf6d1a6ec3cadde3b3d Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 09:47:47 +0900
Subject: [PATCH 06/16] [luci] Add nullptr check (#13294)
This commit adds nullptr check to resolve static analysis error.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
compiler/luci/pass/src/QuantizeWithPredecessorPass.cpp | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/compiler/luci/pass/src/QuantizeWithPredecessorPass.cpp b/compiler/luci/pass/src/QuantizeWithPredecessorPass.cpp
index 51caf8a5d42..352653b3e58 100644
--- a/compiler/luci/pass/src/QuantizeWithPredecessorPass.cpp
+++ b/compiler/luci/pass/src/QuantizeWithPredecessorPass.cpp
@@ -177,7 +177,10 @@ struct QuantizeWithPredecessor final : public luci::CircleNodeMutableVisitor
Date: Wed, 26 Jun 2024 11:01:10 +0900
Subject: [PATCH 07/16] [onert] Update status on quantize and codegen (#13281)
This commit updates status and model path after quantize and codegen because these APIs loads updated model.
It will help to support full quantization after minmax data collection.
ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh
---
runtime/onert/api/nnfw/src/nnfw_api_internal.cc | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/runtime/onert/api/nnfw/src/nnfw_api_internal.cc b/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
index c7c335af939..aa11e339fc6 100644
--- a/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
+++ b/runtime/onert/api/nnfw/src/nnfw_api_internal.cc
@@ -1755,8 +1755,9 @@ NNFW_STATUS nnfw_session::quantize()
auto model = loadModel(_quant_manager->exportModelPath(), "circle");
if (model == nullptr)
return NNFW_STATUS_ERROR;
- // TODO: Update _model_path if necessary
_nnpkg->replaceModel(std::move(model));
+ _state = State::MODEL_LOADED;
+ _model_path = _quant_manager->exportModelPath();
}
catch (const std::exception &e)
{
@@ -1862,8 +1863,9 @@ NNFW_STATUS nnfw_session::codegen(const char *target, NNFW_CODEGEN_PREF pref)
if (model == nullptr)
return NNFW_STATUS_ERROR;
- // TODO: Update _model_path if necessary
_nnpkg->replaceModel(std::move(model));
+ _state = State::MODEL_LOADED;
+ _model_path = export_model_path;
}
catch (const std::exception &e)
{
From bf494963784fd017112853d4cea7533c5c01b1aa Mon Sep 17 00:00:00 2001
From: Hyeongseok Oh
Date: Wed, 26 Jun 2024 13:45:09 +0900
Subject: [PATCH 08/16] [onert] Add static_cast in CircleExporter (#13299)
This commit adds static_cast to std::streamsize in CircleExporter.
ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh
---
runtime/onert/core/src/exporter/CircleExporter.cc | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/runtime/onert/core/src/exporter/CircleExporter.cc b/runtime/onert/core/src/exporter/CircleExporter.cc
index ac6fdc565f9..b9ac8d5bb65 100644
--- a/runtime/onert/core/src/exporter/CircleExporter.cc
+++ b/runtime/onert/core/src/exporter/CircleExporter.cc
@@ -41,7 +41,7 @@ CircleExporter::CircleExporter(const std::string &source, const std::string &pat
src.seekg(0, std::ios::end);
_data.resize(src.tellg());
src.seekg(0, std::ios::beg);
- src.read(&_data[0], _data.size());
+ src.read(&_data[0], static_cast(_data.size()));
src.close();
}
@@ -145,7 +145,8 @@ void CircleExporter::finish()
builder.Finish(::circle::Model::Pack(builder, _model.get()), ::circle::ModelIdentifier());
std::ofstream dst(_path.c_str(), std::ios::binary);
- dst.write(reinterpret_cast(builder.GetBufferPointer()), builder.GetSize());
+ dst.write(reinterpret_cast(builder.GetBufferPointer()),
+ static_cast(builder.GetSize()));
dst.close();
}
} // namespace exporter
From 1fce792a6888f766e12f778dc4d35c5ff2b8f6e2 Mon Sep 17 00:00:00 2001
From: SeungHui Youn <61981457+zetwhite@users.noreply.github.com>
Date: Wed, 26 Jun 2024 14:04:50 +0900
Subject: [PATCH 09/16] [tools/circle_plus_gen] support fine-tuning feature
(#13261)
This PR supports fineTuning field in input json file.
ONE-DCO-1.0-Signed-off-by: seunghui youn
---
tools/circle_plus_gen/README.md | 9 ++++--
.../example/tparam_sgd_scce.json | 3 +-
.../circle_plus_gen/example/train_tparam.json | 3 +-
.../how-to-write-tparam-json.md | 14 +++++++--
tools/circle_plus_gen/lib/json_parser.py | 22 ++++++++++++-
tools/circle_plus_gen/lib/train_param.py | 31 +++++++++++++++++--
tools/circle_plus_gen/main.py | 6 ++--
7 files changed, 76 insertions(+), 12 deletions(-)
diff --git a/tools/circle_plus_gen/README.md b/tools/circle_plus_gen/README.md
index 111aa376a7f..e79387dfc8c 100644
--- a/tools/circle_plus_gen/README.md
+++ b/tools/circle_plus_gen/README.md
@@ -49,7 +49,8 @@ cat example/train_tparam.json
# "reduction": "sum over batch size"
# }
# },
-# "batchSize": 32
+# "batchSize": 32,
+# "fineFuning": 0
# }
```
@@ -67,7 +68,8 @@ python3 main.py example/sample.circle example/train_tparam.json out.circle
# "args": {
# ...
# },
-# "batchSize": 32
+# "batchSize": 32,
+# "fineFuning": 0
# }
# succesfully add hyperparameters to the circle file
# saved in out.circle
@@ -110,7 +112,8 @@ python3 main.py example/sample_tparam.circle
# "reduction": "SumOverBatchSize"
# }
# },
-# "batchSize": 64
+# "batchSize": 64,
+# "fineTuning": -1
# }
```
diff --git a/tools/circle_plus_gen/example/tparam_sgd_scce.json b/tools/circle_plus_gen/example/tparam_sgd_scce.json
index 9e0394574ab..34f511297d8 100644
--- a/tools/circle_plus_gen/example/tparam_sgd_scce.json
+++ b/tools/circle_plus_gen/example/tparam_sgd_scce.json
@@ -12,5 +12,6 @@
"reduction": "sum over batch size"
}
},
- "batchSize": 64
+ "batchSize": 64,
+ "fineFuning" : 2
}
diff --git a/tools/circle_plus_gen/example/train_tparam.json b/tools/circle_plus_gen/example/train_tparam.json
index f8eea0c4e38..60c64b21955 100644
--- a/tools/circle_plus_gen/example/train_tparam.json
+++ b/tools/circle_plus_gen/example/train_tparam.json
@@ -15,5 +15,6 @@
"reduction": "sum over batch size"
}
},
- "batchSize": 32
+ "batchSize": 32,
+ "fineFuning": 0
}
diff --git a/tools/circle_plus_gen/how-to-write-tparam-json.md b/tools/circle_plus_gen/how-to-write-tparam-json.md
index 204ea4355ee..9fd79c43240 100644
--- a/tools/circle_plus_gen/how-to-write-tparam-json.md
+++ b/tools/circle_plus_gen/how-to-write-tparam-json.md
@@ -9,14 +9,15 @@ The json file consists of **a single JSON object** containing the following keys
{
"optimizer" : {...},
"loss" : {...},
- "batchSize" : 32,
+ "batchSize" : 32,
+ "fineTuning" : 0,
}
```
- "optimizer" : refer [optimizer](#optimizer) for writing corresponding value
- "loss" : refer [loss](#loss) for writing corresponding value
- "batchSize" : a number of examples processeed during each iteration
-
+- "fineTuning" : refer [fineTuning] for writing corresponding value
## optimizer
@@ -82,3 +83,12 @@ An object describing the loss function. This should include two keys :
* sparse categorical crossentropy
* categorical crossentropy
* mean squared error
+
+## fineTuning
+
+An integer value, describing how many number of operations will be trained.
+
+Also, 0 and 1 can be used to indicate the following meanings:
+
+* 0 : entire model will be trained
+* -1 : entire model will NOT be trained, This can be used for inference
diff --git a/tools/circle_plus_gen/lib/json_parser.py b/tools/circle_plus_gen/lib/json_parser.py
index 7f8c2222109..8c2c1ebd687 100644
--- a/tools/circle_plus_gen/lib/json_parser.py
+++ b/tools/circle_plus_gen/lib/json_parser.py
@@ -1,5 +1,5 @@
import re
-from typing import Tuple
+from typing import Tuple, List, Union
from lib import utils
from schema import circle_traininfo_generated as ctr_gen
@@ -148,3 +148,23 @@ def load_loss_reduction(s: str):
raise ValueError(f"not supported loss.args.reduction={s}")
return type
+
+
+def load_fine_tuning(n: int, num_op: int) -> List[int]:
+ '''Return [int] to set ModelTrainingT.trainableOps
+ '''
+ if n == 0: # full training
+ return list(range(0, num_op))
+
+ elif n == -1: # not training
+ return []
+
+ elif 0 < n and n <= num_op: # fine tuning
+ start_idx = num_op - n
+ return list(range(start_idx, num_op))
+
+ elif n > num_op:
+ raise ValueError(f"number of operators({num_op}) < fine_tuning({n})")
+
+ else:
+ raise ValueError(f"not supported train {s}")
diff --git a/tools/circle_plus_gen/lib/train_param.py b/tools/circle_plus_gen/lib/train_param.py
index e70887de2c6..092122e0653 100644
--- a/tools/circle_plus_gen/lib/train_param.py
+++ b/tools/circle_plus_gen/lib/train_param.py
@@ -27,7 +27,7 @@ def to_buff(self):
return builder.Output()
@classmethod
- def from_json(cls, json_file: str):
+ def from_json(cls, json_file: str, num_op: int):
'''Create TrainInfo from json file'''
with open(json_file, 'rt') as f:
json_obj = json.load(f)
@@ -52,11 +52,21 @@ def from_json(cls, json_file: str):
if "reduction" in json_obj["loss"].keys():
tparam.lossReductionType = load_loss_reduction(json_obj["loss"]["reduction"])
+ # load fine_tuning
+ fine_tuning = 0 # if not given, full training is default
+ if "fineTuning" in json_obj:
+ fine_tuning = json_obj["fineTuning"]
+ try:
+ tparam.trainableOps = load_fine_tuning(fine_tuning, num_op)
+ except ValueError as e:
+ print(e)
+ raise (f"failed to parse \'fineTuning\'")
+
new_tparam = cls()
new_tparam.train_param = tparam
return new_tparam
- def dump_as_json(self) -> str:
+ def dump_as_json(self, num_op: int) -> str:
'''Return JSON formmated string'''
tparam = self.train_param
name_opt = OptimizerNamer()
@@ -75,4 +85,21 @@ def dump_as_json(self) -> str:
json_form["loss"]["args"]["reduction"] = name_rdt(tparam.lossReductionType)[0]
json_form["batchSize"] = tparam.batchSize
+ ft = []
+ if tparam.trainableOps != None:
+ ft = list(tparam.trainableOps)
+ num_ft = len(ft)
+
+ if num_ft == 0:
+ json_form["fineTuning"] = -1
+
+ elif ft == list(range(0, num_op)):
+ json_form["fineTuning"] = 0
+
+ elif num_ft < num_op and list(range(num_op - num_ft, num_op)) == ft:
+ json_form["fineTuning"] = num_ft
+
+ else:
+ raise ValueError(f"fail to dump fineTuning{ft}")
+
return json.dumps(json_form, indent=4)
diff --git a/tools/circle_plus_gen/main.py b/tools/circle_plus_gen/main.py
index 45f0d04628e..82683ee278c 100644
--- a/tools/circle_plus_gen/main.py
+++ b/tools/circle_plus_gen/main.py
@@ -28,11 +28,12 @@ def print_training_hparameters(circle_model: CirclePlus):
if circle_model has training hyperparameters, print it out
'''
tinfo: typing.Union[TrainParam, None] = circle_model.get_train_param()
+ num_op = circle_model.get_number_of_operators()
if tinfo == None:
print("No hyperparameters")
else:
- print(tinfo.dump_as_json())
+ print(tinfo.dump_as_json(num_op))
# TODO print list of trainable operators
@@ -44,8 +45,9 @@ def inject_hparams(in_file, hparams_file, out_file=None) -> None:
if out_file is None:
out_file = in_file
- tparams: TrainParam = TrainParam.from_json(hparams_file)
circle_model: CirclePlus = CirclePlus.from_file(in_file)
+ num_op = circle_model.get_number_of_operators()
+ tparams: TrainParam = TrainParam.from_json(hparams_file, num_op)
circle_model.set_train_param(tparams)
print("succesfully add hyperparameters to the circle file")
From c0febf1b1c3a9c81a853f287e2e99f4b93858a77 Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 15:25:45 +0900
Subject: [PATCH 10/16] [ahub] Fix tc checker config (#13301)
This commit fixes tc checker config file.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
.ahub/tcchecker-tca/config.yaml | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/.ahub/tcchecker-tca/config.yaml b/.ahub/tcchecker-tca/config.yaml
index 12fbabefd04..fac28216bfc 100644
--- a/.ahub/tcchecker-tca/config.yaml
+++ b/.ahub/tcchecker-tca/config.yaml
@@ -12,10 +12,12 @@ test:
- /tests/nnfw_api
testFile:
- - extension: test.cpp
- any: true
- - extension: test.cc
- any: true
+ - extension: cpp
+ ends:
+ - .test
+ - extension: cc
+ ends:
+ - .test
testCase:
- condition:
- functionName:
From 5a10842bac1517d54c13b93e451945d5d8ca8949 Mon Sep 17 00:00:00 2001
From: seongwoo chae
Date: Wed, 26 Jun 2024 15:40:31 +0900
Subject: [PATCH 11/16] [one-cmds] Add comments about command schema (#13298)
This commit adds comments about command schema.
ONE-DCO-1.0-Signed-off-by: seongwoo
---
compiler/one-cmds/onelib/argumentparse.py | 64 +++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/compiler/one-cmds/onelib/argumentparse.py b/compiler/one-cmds/onelib/argumentparse.py
index 6f722ed1a84..0927030c751 100644
--- a/compiler/one-cmds/onelib/argumentparse.py
+++ b/compiler/one-cmds/onelib/argumentparse.py
@@ -13,6 +13,70 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+This is for the command schema feature.
+
+_one-cmds_ has lots of tools such as one-import, one-optimize, etc.
+They have their own section in the configuration file and users can
+ give arguments with key-value pairs.
+
+But, backend tools such as one-codegen and one-profile hasn't the same
+ mechanism. Rather, they should pass all the arguments with `command` key
+ because _onecc_ can't know the backends' interface in advance.
+
+The command schema has been introduced for resolving these difficulties.
+If users provide _onecc_ with the command schema that describes the interface
+ of the backend, users can give arguments with key-value paris like other tools.
+
+NOTE. Command schema feature works only when target option is given.
+
+[AS-IS]
+
+# example.cfg
+[backend]
+target=my_target
+
+[one-codegen]
+backend=my_backend
+commnad=--output sample.tvn sample.circle
+
+[TO-BE]
+
+# /usr/share/one/backends/command/my_backend/codegen.py
+from onelib import argumentparse
+from onelib.argumentparse import DriverName, NormalOption, TargetOption
+
+
+def command_schema():
+ parser = argumentparse.ArgumentParser()
+ parser.add_argument("my_backend-compile", action=DriverName)
+ parser.add_argument("--output", action=NormalOption)
+ parser.add_argument("input", action=NormalOption)
+
+ return parser
+
+# /usr/share/one/target/my_target.ini
+TARGET=my_target
+BACKEND=my_backend
+
+# example.cfg
+[one-codegen]
+output=sample.tvn
+input=sample.circle
+
+
+---
+
+Command schema file should define `command_schema` function. And, you can add
+ arguments by calling `add_argument`. You should specify an action according to
+the option category.
+
+[Action List]
+- DriverName: the name of backend driver
+- TargetOption: the target option of the drvier.
+- NormalOption: the option of the driver. Starting with dash('-') implies the option
+ is optional rather than positional one.
+"""
import ntpath
from types import SimpleNamespace
From 75ec6d8051b649145c0f1d7b5f9e95c446440a4c Mon Sep 17 00:00:00 2001
From: Hyeongseok Oh
Date: Wed, 26 Jun 2024 16:44:55 +0900
Subject: [PATCH 12/16] [onert] Add overflow check in MinMaxReader (#13297)
This commit add num_run, num_op, num_input overflow check in MinMaxReader.
ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh
---
runtime/onert/odc/MinMaxReader.cc | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/runtime/onert/odc/MinMaxReader.cc b/runtime/onert/odc/MinMaxReader.cc
index 65eead8f076..c9c92148eb5 100644
--- a/runtime/onert/odc/MinMaxReader.cc
+++ b/runtime/onert/odc/MinMaxReader.cc
@@ -18,6 +18,7 @@
#include
#include
+#include
namespace
{
@@ -99,6 +100,10 @@ MinMaxVectors MinMaxReader::readOP(uint32_t model_idx, uint32_t subg_idx, uint32
float minmax[2];
const int64_t data_size = sizeof(float) * 2 + sizeof(uint32_t) * 3;
+ // Check num_run overflow
+ if (num_run > std::numeric_limits::max() / data_size)
+ throw std::runtime_error("num_run overflow");
+
for (uint32_t r = 0; r < num_run; ++r)
{
// Read num of operations and num of inputs
@@ -107,6 +112,13 @@ MinMaxVectors MinMaxReader::readOP(uint32_t model_idx, uint32_t subg_idx, uint32
uint32_t num_input = 0;
readMMFile(&num_input, sizeof(uint32_t), 1, file, "Cannot read num of inputs");
+ // Check num_op overflow
+ if (num_op > std::numeric_limits::max() / data_size / num_run)
+ throw std::runtime_error("num_op overflow");
+ // Check num_input overflow
+ if (num_input > std::numeric_limits::max() / data_size / num_run)
+ throw std::runtime_error("num_input overflow");
+
// Find operation
for (uint32_t i = 0; i < num_op; ++i)
{
@@ -165,6 +177,10 @@ MinMaxVectors MinMaxReader::readInput(uint32_t model_idx, uint32_t subg_idx,
float minmax[2];
const int64_t data_size = sizeof(float) * 2 + sizeof(uint32_t) * 3;
+ // Check num_run overflow
+ if (num_run > std::numeric_limits::max() / data_size)
+ throw std::runtime_error("num_run overflow");
+
for (uint32_t r = 0; r < num_run; ++r)
{
// Read num of operations and num of inputs
@@ -173,6 +189,13 @@ MinMaxVectors MinMaxReader::readInput(uint32_t model_idx, uint32_t subg_idx,
uint32_t num_input = 0;
readMMFile(&num_input, sizeof(uint32_t), 1, file, "Cannot read num of inputs");
+ // Check num_op overflow
+ if (num_op > std::numeric_limits::max() / data_size / num_run)
+ throw std::runtime_error("num_op overflow");
+ // Check num_input overflow
+ if (num_input > std::numeric_limits::max() / data_size / num_run)
+ throw std::runtime_error("num_input overflow");
+
// Skip operation minmax data
seekMMFile(file, static_cast(data_size * num_op), SEEK_CUR,
"Cannot skip operation minmax data");
From 7ef85d0622161abf30af203d6fe5d8d8e3bb2d72 Mon Sep 17 00:00:00 2001
From: Hyeongseok Oh
Date: Wed, 26 Jun 2024 16:48:14 +0900
Subject: [PATCH 13/16] [infra] Fix compiler TC checker config (#13302)
This commit fixes compiler TC checker config.
ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh
---
.ahub/tcchecker-tca/config.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.ahub/tcchecker-tca/config.yaml b/.ahub/tcchecker-tca/config.yaml
index fac28216bfc..ecae5f5a3ba 100644
--- a/.ahub/tcchecker-tca/config.yaml
+++ b/.ahub/tcchecker-tca/config.yaml
@@ -138,9 +138,9 @@ test:
- /compiler/vconone
testFile:
- - extension: .test.cpp
- any: true
-
+ - extension: cpp
+ ends:
+ - .test
testCase:
- condition:
- functionName:
From 8c60ef7b407b37f46b2da6bdd0149211048d0b81 Mon Sep 17 00:00:00 2001
From: chunseoklee
Date: Wed, 26 Jun 2024 16:49:07 +0900
Subject: [PATCH 14/16] onert-micro-dev (#13286)
- onert-micro api and its implementation
ONE-DCO-1.0-Signed-off-by: Chunseok Lee
---
onert-micro/onert-micro/include/onert-micro.h | 359 ++++++++++++++
onert-micro/onert-micro/src/CMakeLists.txt | 6 +
.../onert-micro/src/api/CMakeLists.txt | 14 +
.../onert-micro/src/api/onert-micro.cpp | 436 ++++++++++++++++++
4 files changed, 815 insertions(+)
create mode 100644 onert-micro/onert-micro/include/onert-micro.h
create mode 100644 onert-micro/onert-micro/src/api/CMakeLists.txt
create mode 100644 onert-micro/onert-micro/src/api/onert-micro.cpp
diff --git a/onert-micro/onert-micro/include/onert-micro.h b/onert-micro/onert-micro/include/onert-micro.h
new file mode 100644
index 00000000000..e340e817561
--- /dev/null
+++ b/onert-micro/onert-micro/include/onert-micro.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ONERT_MICRO_H_
+#define _ONERT_MICRO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * typical training flow in onert-micro
+ *
+ * 1. load model or checkpoint
+ * 1-1. (optional) configure training options
+ * 2. feed training input / output(e.g. label) data (cf. unit of a step)
+ * 3. train a step
+ * 4. check loss
+ * 4-0. save checkpoint for recovery/resume training
+ * 4-1. no more traning -> go to 5
+ * 4-2. more training -> go to 2
+ * 5. save current state to inference model
+ * 6. inference with inference model
+// sample example
+// 0. create context
+nnfw_session *session;
+nnfw_create_session(&session);
+// 1. load model (and checkpoint if continue training)
+nnfw_load_model_from_file(session, MODEL_PATH);
+// 1-1. (optional, TBD) configure training options
+nnfw_load_ckpt_from_file(session, CKPT_PATH);
+nnfw_train_prepare(session);
+float training_input[BATCH_SIZE*INPUT_SIZE];
+float training_label[BATCH_SIZE*OUTPUT_SIZE];
+// main training loop
+for(int epoch=0; epoch < NUM_EPOCHS; epoch++) {
+ for(int step=0; step < NUM_BATCHES ; step++) {
+ // prepare this steps's intput/label
+ memcpy(training_input, train_input_data + THIS_BATCH_OFFSET, BATCH_SIZE*INPUT_SIZE);
+ memcpy(training_output, train_output_data + THIS_BATCH_OFFSET, BATCH_SIZE*OUTPUT_SIZE);
+ // 2. feed training input / expected output
+ nnfw_train_set_input(session, 0 , training_input, NULL);
+ nnfw_train_set_expected(session, 0 , training_output, NULL);
+ // 3. train a step
+ nnfw_train(session);
+ }
+ // 4. check loss
+ float loss;
+ nnfw_train_get_loss(ctx, 0, &loss);
+ if(loss > TARGET_LOSS) {
+ nnfw_train_save_as_checkpoint(ctx, CKPT_PATH);
+ }
+ else {
+ nnfw_train_export_circle(ctx, CIRCLE_PATH);
+ }
+}
+*/
+
+typedef struct nnfw_session nnfw_session;
+
+typedef enum
+{
+ /** A tensor of 32 bit floating point */
+ NNFW_TYPE_TENSOR_FLOAT32 = 0,
+ /** A tensor of 32 bit signed integer */
+ NNFW_TYPE_TENSOR_INT32 = 1,
+} NNFW_TYPE;
+
+/**
+ * @brief Result values returned from a call to an API function
+ */
+typedef enum
+{
+ /** Successful */
+ NNFW_STATUS_NO_ERROR = 0,
+ /**
+ * An error code for general use.
+ * Mostly used when there is no specific value for that certain situation.
+ */
+ NNFW_STATUS_ERROR = 1,
+ /** Unexpected null argument is given. */
+ NNFW_STATUS_UNEXPECTED_NULL = 2,
+ /** When a function was called but it is not valid for the current session state. */
+ NNFW_STATUS_INVALID_STATE = 3,
+ /** When it is out of memory */
+ NNFW_STATUS_OUT_OF_MEMORY = 4,
+ /** When it was given an insufficient output buffer */
+ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE = 5,
+ /** When API is deprecated */
+ NNFW_STATUS_DEPRECATED_API = 6,
+} NNFW_STATUS;
+
+/**
+ * @brief Maximum rank expressible with nnfw
+ */
+#define NNFW_MAX_RANK (6)
+
+/**
+ * @brief tensor info describes the type and shape of tensors
+ *
+ * This structure is used to describe input and output tensors.
+ * Application can get input and output tensor type and shape described in model by using
+ * {@link nnfw_input_tensorinfo} and {@link nnfw_output_tensorinfo}
+ *
+ *
Maximum rank is 6 (NNFW_MAX_RANK). And tensor's dimension value is filled in 'dims' field from
+ * index 0.
+ * For example, if tensor's rank is 4,
+ * application can get dimension value from dims[0], dims[1], dims[2], and dims[3]
+ */
+typedef struct nnfw_tensorinfo
+{
+ /** The data type */
+ NNFW_TYPE dtype;
+ /** The number of dimensions (rank) */
+ int32_t rank;
+ /**
+ * The dimension of tensor.
+ * Maximum rank is 6 (NNFW_MAX_RANK).
+ */
+ int32_t dims[NNFW_MAX_RANK];
+} nnfw_tensorinfo;
+
+//////////////////////////////////////////////
+// Essential APIs for training
+//////////////////////////////////////////////
+typedef enum
+{
+ NNFW_TRAIN_LOSS_UNDEFINED = 0,
+ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR = 1,
+ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY = 2,
+} NNFW_TRAIN_LOSS;
+
+typedef enum
+{
+ /** Undefined */
+ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED = 0,
+ /** Scalar sum divided by number of elements in losses */
+ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE = 1,
+ /** Scalar sum of weighted losses */
+ NNFW_TRAIN_LOSS_REDUCTION_SUM = 2,
+} NNFW_TRAIN_LOSS_REDUCTION;
+
+typedef enum
+{
+ NNFW_TRAIN_OPTIMIZER_UNDEFINED = 0,
+ NNFW_TRAIN_OPTIMIZER_SGD = 1,
+ NNFW_TRAIN_OPTIMIZER_ADAM = 2,
+} NNFW_TRAIN_OPTIMIZER;
+
+typedef struct nnfw_loss_info
+{
+ NNFW_TRAIN_LOSS loss;
+ NNFW_TRAIN_LOSS_REDUCTION reduction_type;
+} nnfw_loss_info;
+
+typedef struct nnfw_adam_option
+{
+ float beta;
+ float beta2;
+ float epsilon;
+} nnfw_adam_option;
+
+/**
+ * @brief Maximum numer of trainable operations
+ */
+#define NNFW_TRAINABLE_OPS_MAX_SIZE (256)
+
+/**
+ * @brief Training information to prepare training
+ * @todo Add more training information
+ * (e.g. optimizer, loss function, ...)
+ */
+typedef struct nnfw_train_info
+{
+ /** Learning rate */
+ float learning_rate = 0.001f;
+ /** Batch size */
+ uint32_t batch_size = 1;
+ /** loss info */
+ nnfw_loss_info loss_info{.loss = NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY,
+ .reduction_type = NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE};
+ /** optimizer type */
+ NNFW_TRAIN_OPTIMIZER opt = NNFW_TRAIN_OPTIMIZER_ADAM;
+
+ uint32_t num_trainble_ops = 0;
+
+ nnfw_adam_option adam_opt{.beta = 0.9f,
+
+ .beta2 = 0.999f,
+ .epsilon = 1e-7f};
+} nnfw_train_info;
+
+/**
+ * @brief Set training information
+ * @note This function should be called after calling {@link nnfw_load_model_from_file}
+ * and before calling {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session to be set training information
+ * @param[in] info The training information
+ *
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info *info);
+
+/**
+ * @brief Create a new session instance.
+ *
+ *
This only creates a session.
+ * Model is loaded after {@link nnfw_load_model_from_file} is invoked.
+ * And inference is performed after {@link nnfw_run} is invoked.
+ *
+ *
{@link nnfw_close_session} should be called once
+ * if session is no longer needed
+ *
+ * @param[out] session The session to be created
+ * @return NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_create_session(nnfw_session **session);
+
+/**
+ * @brief Close a session instance
+ *
+ * After called, access to closed session by application will be invalid
+ *
+ * @param[in] session The session to be closed
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_close_session(nnfw_session *session);
+
+/**
+ * @brief Load model from nnpackage file or directory
+ *
+ * The length of \p package_file_path must not exceed 1024 bytes including zero at the end.
+ *
+ * @param[in] session nnfw_session loading the given nnpackage file/dir
+ * @param[in] package_file_path Path to the nnpackage file or unzipped directory to be loaded
+ *
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path);
+
+/**
+ * @brief Prepare session to be ready for training
+ * @note The session will be entered into training mode
+ *
+ * If training info is NOT set in session, this function returns @c NNFW_STATUS_ERROR .
+ * You should set training info using {@link nnfw_train_set_traininfo}.
+ *
+ * @param[in] session The session to be prepared for training
+ *
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_prepare(nnfw_session *session);
+
+/**
+ * @brief Train the model
+ * @note This function should be called after {@link nnfw_train_set_input} and
+ * {@link nnfw_train_set_expected} for each input and expected output
+ *
+ * In order to use \p update_weights as false, it should be called after
+ * {@link nnfw_train_set_output}.
+ *
+ * @param[in] session The session to be trained
+ * @param[in] update_weights If true, update weights of the model
+ * If false, do not update weights of the model (for validation)
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights);
+
+/**
+ * @brief Export current training model into circle model
+ * @note This function should be called on training mode
+ * This function should be called after {@link nnfw_train}
+ *
+ * @param[in] session The session to export inference model
+ * @param[in] path The path to export inference model
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path);
+
+NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path);
+NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path);
+
+/**
+ * @brief Set training input
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session to be set training inputs and expected model outputs
+ * @param[in] index The index of training input
+ * @param[in] input The input buffers for training
+ * @param[in] input_info The shape and type of input buffer
+ * If it is nullptr, it will not change shape and batch size
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, void *input,
+ const nnfw_tensorinfo *input_info);
+
+/**
+ * @brief Set training expected output
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param session The session to be set training inputs and expected model outputs
+ * @param index The index of training expected output
+ * @param expected The expected buffers for training
+ * @param expected_info The shape and type of expected buffer
+ * If it is nullptr, it will not change shape and batch size
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, void *expected,
+ const nnfw_tensorinfo *expected_info);
+
+/**
+ * @brief Get loss value for expected output
+ * @note This function should be called after {@link nnfw_train}
+ *
+ * @param[in] session The session to get loss value
+ * @param[in] index The index of loss value [0, number of expected outputs)
+ * @param[out] loss The loss value
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss);
+
+/**
+ * @brief Set training output buffer
+ *
+ * This function must be called after {@link nnfw_train_prepare}, \p buffer given to this function
+ * can be reused for training. \p length must be greater or equal than the operand requires.
+ * An output operand can have unspecified shape and deduced dynamically during the execution. You
+ * must provide \p buffer large enough.
+ *
+ * @param[in] session Session from inference output is to be extracted
+ * @param[in] index Index of output to be set (0-indexed)
+ * @param[in] type Type of the output
+ * @param[out] buffer Raw buffer for output
+ * @param[in] length Size of bytes of output buffer
+ *
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type,
+ void *buffer, size_t length);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_ONERT_MICRO_H_
diff --git a/onert-micro/onert-micro/src/CMakeLists.txt b/onert-micro/onert-micro/src/CMakeLists.txt
index 903e93e7e5a..6c2d7a8a733 100644
--- a/onert-micro/onert-micro/src/CMakeLists.txt
+++ b/onert-micro/onert-micro/src/CMakeLists.txt
@@ -13,6 +13,8 @@ set(OM_INCLUDE_OPTIMIZE_DIR "${OM_INCLUDE_DIR}/optimize")
#define train path
set(OM_SOURCE_TRAIN_DIR "${OM_SOURCE_DIR}/train")
set(OM_INCLUDE_TRAIN_DIR "${OM_INCLUDE_DIR}/train")
+#define train path
+set(OM_SOURCE_DEV_DIR "${OM_SOURCE_DIR}/api")
#OM_Interpreter lib binary name
set(OM_INTERPRETER_LIB "onert_micro_interpreter")
@@ -30,6 +32,8 @@ set(OM_OPTIMIZE_LIB "onert_micro_optimize${OM_SUFFIX}")
set(OM_PAL_LIB "onert_micro_pal${OM_SUFFIX}")
#Train lib binary name
set(OM_TRAIN_LIB "onert_micro_train${OM_SUFFIX}")
+#dev name
+set(OM_DEV_LIB "onert_micro_dev${OM_SUFFIX}")
message(STATUS "ONERT MICRO BEGIN")
@@ -41,6 +45,8 @@ add_subdirectory(${OM_SOURCE_IMPORT_DIR})
add_subdirectory(${OM_SOURCE_EXECUTE_DIR})
#build optimize lib
add_subdirectory(${OM_SOURCE_OPTIMIZE_DIR})
+#build dev lib
+add_subdirectory(${OM_SOURCE_DEV_DIR})
target_link_libraries(${OM_CORE_LIB} PUBLIC ${OM_CIRCLE_SCHEMA})
target_link_libraries(${OM_CORE_LIB} PUBLIC ${OM_IMPORT_LIB})
diff --git a/onert-micro/onert-micro/src/api/CMakeLists.txt b/onert-micro/onert-micro/src/api/CMakeLists.txt
new file mode 100644
index 00000000000..98baf6e0938
--- /dev/null
+++ b/onert-micro/onert-micro/src/api/CMakeLists.txt
@@ -0,0 +1,14 @@
+message(STATUS "ONERT MICRO DEV BUILD BEGIN")
+
+set(SOURCES
+ onert-micro.cpp)
+
+add_library(${OM_DEV_LIB} STATIC ${SOURCES})
+target_compile_options(${OM_DEV_LIB} PRIVATE "-fexceptions")
+target_link_libraries(${OM_DEV_LIB} PUBLIC ${OM_TRAININFO_SCHEMA})
+target_include_directories(${OM_DEV_LIB} PUBLIC "${OM_INCLUDE_DIR}")
+target_link_libraries(${OM_DEV_LIB} PUBLIC ${OM_INTERPRETER_LIB})
+target_link_libraries(${OM_DEV_LIB} PUBLIC ${OM_TRAINING_INTERPRETER_LIB})
+target_link_libraries(${OM_DEV_LIB} PUBLIC onert_micro_coverage)
+
+message(STATUS "ONERT MICRO DEV BUILD FINISHED")
diff --git a/onert-micro/onert-micro/src/api/onert-micro.cpp b/onert-micro/onert-micro/src/api/onert-micro.cpp
new file mode 100644
index 00000000000..fabaeae7b37
--- /dev/null
+++ b/onert-micro/onert-micro/src/api/onert-micro.cpp
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "OMTrainingInterpreter.h"
+#include "onert-micro.h"
+#include
+#include
+
+#define NNFW_RETURN_ERROR_IF_NULL(p) \
+ do \
+ { \
+ if ((p) == NULL) \
+ return NNFW_STATUS_UNEXPECTED_NULL; \
+ } while (0)
+
+// helper for file processing
+using DataBuffer = std::vector;
+
+DataBuffer readFile(const char *path)
+{
+ std::ifstream file(path, std::ios::binary | std::ios::in);
+ if (!file.good())
+ {
+ std::string errmsg = "Failed to open file";
+ throw std::runtime_error(errmsg.c_str());
+ }
+
+ file.seekg(0, std::ios::end);
+ auto fileSize = file.tellg();
+ file.seekg(0, std::ios::beg);
+
+ // reserve capacity
+ DataBuffer model_data(fileSize);
+
+ // read the data
+ file.read(model_data.data(), fileSize);
+ if (file.fail())
+ {
+ std::string errmsg = "Failed to read file";
+ throw std::runtime_error(errmsg.c_str());
+ }
+
+ return model_data;
+}
+
+struct nnfw_session
+{
+private:
+public:
+ /**
+ * @brief Factory method. It creates and initialize nnfw_session
+ *
+ * @note Use factory instead of constructor to get status
+ */
+ static NNFW_STATUS create(nnfw_session **session);
+
+private:
+ nnfw_session();
+
+public:
+ ~nnfw_session();
+
+ NNFW_STATUS load_model_from_file(const char *package_file_path);
+
+ NNFW_STATUS train_set_traininfo(const nnfw_train_info *info);
+ NNFW_STATUS train_prepare();
+ NNFW_STATUS train_input_tensorinfo(uint32_t index, nnfw_tensorinfo *ti);
+ NNFW_STATUS train_expected_tensorinfo(uint32_t index, nnfw_tensorinfo *ti);
+ NNFW_STATUS train_set_input(uint32_t index, void *input);
+ NNFW_STATUS train_set_expected(uint32_t index, void *expected);
+ NNFW_STATUS train_set_output(uint32_t index, NNFW_TYPE type, void *buffer, size_t length);
+ NNFW_STATUS train_run(bool update_weights);
+ NNFW_STATUS train_get_loss(uint32_t index, float *loss);
+ NNFW_STATUS train_export_circle(const char *path);
+
+ NNFW_STATUS train_export_checkpoint(const char *path);
+ NNFW_STATUS train_import_checkpoint(const char *path);
+
+private:
+ uint32_t getInputSize();
+ uint32_t getOutputSize();
+ NNFW_STATUS loadTrainingInfo(char *buf_ptr);
+ NNFW_STATUS loadOptimizerInfo(const circle::ModelTraining *circle_model);
+ NNFW_STATUS loadLossInfo(const circle::ModelTraining *circle_model);
+ NNFW_STATUS loadTrainableOps(const circle::ModelTraining *circle_model, int num_ops);
+
+private:
+ onert_micro::OMTrainingInterpreter *_train_interpreter;
+ onert_micro::OMConfig _config;
+ DataBuffer _model_buf;
+ std::string _model_path;
+ uint8_t *outputbuf;
+};
+
+nnfw_session::nnfw_session() : _train_interpreter{new onert_micro::OMTrainingInterpreter()}
+{
+ // TODO: Remove after implementing train_set_traininfo
+ // Set user defined training settings
+ const uint32_t training_epochs = 10;
+ const float learning_rate = 0.001f;
+ const uint32_t num_train_layers = 10;
+ const onert_micro::OMLoss loss = onert_micro::CROSS_ENTROPY;
+ const onert_micro::OMTrainOptimizer train_optim = onert_micro::ADAM;
+ const float beta = 0.9;
+ const float beta_squares = 0.999;
+ const float epsilon = 1e-07;
+
+ _config.train_mode = true;
+ {
+ onert_micro::OMTrainingContext train_context;
+ train_context.batch_size = 32;
+ train_context.num_of_train_layers = num_train_layers;
+ train_context.learning_rate = learning_rate;
+ train_context.loss = loss;
+ train_context.optimizer = train_optim;
+ train_context.beta = beta;
+ train_context.beta_squares = beta_squares;
+ train_context.epsilon = epsilon;
+ train_context.num_step = 0;
+
+ _config.training_context = train_context;
+ }
+
+ outputbuf = nullptr;
+}
+
+NNFW_STATUS nnfw_session::create(nnfw_session **session)
+{
+ if (session == nullptr)
+ return NNFW_STATUS_UNEXPECTED_NULL;
+
+ auto new_session = std::unique_ptr(new nnfw_session());
+ *session = new_session.release();
+
+ if (*session == nullptr)
+ {
+ return NNFW_STATUS_ERROR;
+ }
+
+ return NNFW_STATUS_NO_ERROR;
+}
+
+nnfw_session::~nnfw_session() { delete _train_interpreter; }
+
+NNFW_STATUS nnfw_session::loadOptimizerInfo(const circle::ModelTraining *circle_model)
+{
+ assert(circle_model != nullptr);
+
+ const circle::Optimizer circle_opt = circle_model->optimizer();
+
+ switch (circle_opt)
+ {
+ case circle::Optimizer_SGD:
+ _config.training_context.optimizer = onert_micro::SGD;
+ _config.training_context.learning_rate =
+ circle_model->optimizer_opt_as_SGDOptions()->learning_rate();
+ break;
+ case circle::Optimizer_ADAM:
+ _config.training_context.optimizer = onert_micro::ADAM;
+ _config.training_context.learning_rate =
+ circle_model->optimizer_opt_as_AdamOptions()->learning_rate();
+ _config.training_context.beta = circle_model->optimizer_opt_as_AdamOptions()->beta_1();
+ _config.training_context.beta_squares =
+ circle_model->optimizer_opt_as_AdamOptions()->beta_2();
+ _config.training_context.epsilon = circle_model->optimizer_opt_as_AdamOptions()->epsilon();
+ break;
+ default:
+ std::cerr << "unknown optimzer" << std::endl;
+ return NNFW_STATUS_ERROR;
+ }
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::loadLossInfo(const circle::ModelTraining *circle_model)
+{
+ assert(circle_model != nullptr);
+
+ const circle::LossFn circle_loss = circle_model->lossfn();
+
+ switch (circle_loss)
+ {
+ case circle::LossFn::LossFn_CATEGORICAL_CROSSENTROPY:
+ _config.training_context.loss = onert_micro::CROSS_ENTROPY;
+ break;
+ case circle::LossFn::LossFn_MEAN_SQUARED_ERROR:
+ _config.training_context.loss = onert_micro::MSE;
+ break;
+ case circle::LossFn::LossFn_SPARSE_CATEGORICAL_CROSSENTROPY:
+ // TODO enable this conversion after core support sparse_categorial_crossentropy
+ std::cerr << "'sparse_categorical_crossentropy' is not supported yet" << std::endl;
+ return NNFW_STATUS_ERROR;
+ default:
+ std::cerr << "unknown loss function" << std::endl;
+ return NNFW_STATUS_ERROR;
+ }
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::loadTrainableOps(const circle::ModelTraining *circle_model, int num_ops)
+{
+ assert(circle_model != nullptr);
+
+ auto ops_list = circle_model->trainable_ops();
+ if (ops_list != nullptr)
+ _config.training_context.num_of_train_layers =
+ num_ops - ops_list->data()[0]; // simply assume ops[0] is the least node number
+ else
+ _config.training_context.num_of_train_layers = num_ops;
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::loadTrainingInfo(char *buf)
+{
+ auto model = circle::GetModel(buf);
+ auto num_ops = model->subgraphs()->Get(0)->operators()->size();
+ // Load Metadata
+ auto const metadata_list = model->metadata();
+ const uint8_t *data = nullptr;
+ if (metadata_list != nullptr)
+ {
+ for (uint32_t i = 0; i < metadata_list->size(); ++i)
+ {
+ const auto metadata = metadata_list->Get(i);
+ if (strcmp(metadata->name()->c_str(), "CIRCLE_TRAINING") != 0)
+ continue;
+ data = (model->buffers()->Get(metadata->buffer()))->data()->data();
+ }
+ const circle::ModelTraining *traininfo_model =
+ circle::GetModelTraining(static_cast(data));
+ _config.training_context.batch_size = traininfo_model->batch_size();
+ loadOptimizerInfo(traininfo_model);
+ loadLossInfo(traininfo_model);
+ loadTrainableOps(traininfo_model, num_ops);
+ }
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::load_model_from_file(const char *file_path)
+{
+ _model_buf = readFile(file_path);
+ _config.model_ptr = _model_buf.data();
+ _config.model_size = _model_buf.size();
+ // load training info
+ loadTrainingInfo(_config.model_ptr);
+ // TODO: this import should start on nnfw_prepare if inference_interpreter is introduced
+ _train_interpreter->importTrainModel(_config.model_ptr, _config);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_prepare()
+{
+ // TODO: Implement remaining jobs if inference_interpreter is introduced
+ // maybe interpreter initialization ?
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_run(bool update_weights)
+{
+ if (update_weights)
+ {
+ // TOOD: micro support update_weights ???
+ // Here we use this flag for distinguish inference and train in trainaing interpreter
+ _train_interpreter->trainSingleStep(_config);
+ _config.training_context.num_epoch =
+ _config.training_context.num_step / _config.training_context.batch_size + 1;
+ }
+ else
+ {
+ // TODO: support multiple input/output
+ assert(outputbuf != nullptr);
+ _train_interpreter->allocateInputs();
+ float *allocated_input_data = (float *)_train_interpreter->getInputDataAt(0);
+ float *user_input_data = (float *)_train_interpreter->getInputData(0);
+ memcpy(allocated_input_data, user_input_data,
+ sizeof(float) * _train_interpreter->getInputSizeAt(0));
+ _train_interpreter->run();
+ float *calculated_ptr = (float *)_train_interpreter->getOutputDataAt(0);
+ memcpy(outputbuf, calculated_ptr, sizeof(float) * _train_interpreter->getOutputSizeAt(0));
+ _train_interpreter->reset();
+ }
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_export_circle(const char *path)
+{
+ _train_interpreter->saveModel(_config, path);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_export_checkpoint(const char *path)
+{
+ _train_interpreter->saveCheckpoint(_config, path);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_import_checkpoint(const char *path)
+{
+ _train_interpreter->loadCheckpoint(_config, path);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+// TODO: onert's this function takes const type input
+NNFW_STATUS nnfw_session::train_set_input(uint32_t index, void *input)
+{
+ _train_interpreter->setInput((uint8_t *)input, index);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+// TODO: onert's this function takes const type expected
+NNFW_STATUS nnfw_session::train_set_expected(uint32_t index, void *expected)
+{
+ _train_interpreter->setTarget((uint8_t *)expected, index);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_set_output(uint32_t index, NNFW_TYPE type, void *buffer,
+ size_t length)
+{
+ outputbuf = (uint8_t *)buffer;
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_set_traininfo(const nnfw_train_info *info)
+{
+ _config.training_context.learning_rate = info->learning_rate;
+ _config.training_context.batch_size = info->batch_size;
+ _config.training_context.optimizer =
+ (info->opt == NNFW_TRAIN_OPTIMIZER_ADAM) ? onert_micro::ADAM : onert_micro::SGD;
+ _config.training_context.beta = info->adam_opt.beta;
+ _config.training_context.beta_squares = info->adam_opt.beta2;
+ _config.training_context.beta = info->adam_opt.epsilon;
+ _config.training_context.num_of_train_layers = info->num_trainble_ops;
+ return NNFW_STATUS_NO_ERROR;
+}
+
+NNFW_STATUS nnfw_session::train_get_loss(uint32_t index, float *loss)
+{
+ onert_micro::OMMetrics m;
+ switch (_config.training_context.loss)
+ {
+ case onert_micro::CROSS_ENTROPY:
+ m = onert_micro::CROSS_ENTROPY_METRICS;
+ break;
+ default:
+ m = onert_micro::CROSS_ENTROPY_METRICS;
+ break;
+ }
+
+ _train_interpreter->evaluateMetric(m, reinterpret_cast(loss),
+ _config.training_context.batch_size);
+ return NNFW_STATUS_NO_ERROR;
+}
+
+// onert-micr.h implementation
+
+NNFW_STATUS nnfw_create_session(nnfw_session **session) { return nnfw_session::create(session); }
+
+NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
+{
+ return session->load_model_from_file(package_file_path);
+}
+
+NNFW_STATUS nnfw_train_prepare(nnfw_session *session) { return session->train_prepare(); }
+
+NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights)
+{
+ return session->train_run(update_weights);
+}
+
+NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path)
+{
+ return session->train_export_circle(path);
+}
+
+NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path)
+{
+ return session->train_export_checkpoint(path);
+}
+
+NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path)
+{
+ return session->train_import_checkpoint(path);
+}
+
+NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, void *input,
+ const nnfw_tensorinfo *input_info)
+{
+ NNFW_RETURN_ERROR_IF_NULL(session);
+ return session->train_set_input(index, input);
+}
+
+NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, void *expected,
+ const nnfw_tensorinfo *expected_info)
+{
+ NNFW_RETURN_ERROR_IF_NULL(session);
+ return session->train_set_expected(index, expected);
+}
+
+NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss)
+{
+ NNFW_RETURN_ERROR_IF_NULL(session);
+ return session->train_get_loss(index, loss);
+}
+
+NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info *info)
+{
+ NNFW_RETURN_ERROR_IF_NULL(session);
+ return session->train_set_traininfo(info);
+}
+
+NNFW_STATUS nnfw_train_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type,
+ void *buffer, size_t length)
+{
+ NNFW_RETURN_ERROR_IF_NULL(session);
+ return session->train_set_output(index, type, buffer, length);
+}
From bef445f97d7e889c9f579f8bd4f603839cc09478 Mon Sep 17 00:00:00 2001
From: Balyshev Artem <43214667+BalyshevArtem@users.noreply.github.com>
Date: Wed, 26 Jun 2024 10:50:11 +0300
Subject: [PATCH 15/16] [onert-micro] Reduce pooling code duplication (#13289)
This pr reduces code duplication for pooling import part.
ONE-DCO-1.0-Signed-off-by: Artem Balyshev
Co-authored-by: Artem Balyshev
---
.../include/import/helpers/OMPooingCommon.h | 38 ++++++++
.../onert-micro/src/import/CMakeLists.txt | 1 +
.../src/import/helpers/OMPoolingCommon.cpp | 94 +++++++++++++++++++
.../src/import/kernels/AveragePool2D.cpp | 80 +---------------
.../src/import/kernels/L2Pool2D.cpp | 68 +-------------
.../src/import/kernels/MaxPool2D.cpp | 80 +---------------
6 files changed, 139 insertions(+), 222 deletions(-)
create mode 100644 onert-micro/onert-micro/include/import/helpers/OMPooingCommon.h
create mode 100644 onert-micro/onert-micro/src/import/helpers/OMPoolingCommon.cpp
diff --git a/onert-micro/onert-micro/include/import/helpers/OMPooingCommon.h b/onert-micro/onert-micro/include/import/helpers/OMPooingCommon.h
new file mode 100644
index 00000000000..82d93551f47
--- /dev/null
+++ b/onert-micro/onert-micro/include/import/helpers/OMPooingCommon.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_POOLING_KERNEL_COMMON_H
+#define ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_POOLING_KERNEL_COMMON_H
+
+#include "import/OMKernelConfigureBuilder.h"
+#include "core/OMUtils.h"
+#include "OMStatus.h"
+#include "execute/OMRuntimeKernel.h"
+
+namespace onert_micro
+{
+namespace import
+{
+namespace helpers
+{
+
+OMStatus configure_pooling_kernel_common(const OMConfigureArgs &config_args);
+
+} // namespace helpers
+} // namespace import
+} // namespace onert_micro
+
+#endif // ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_POOLING_KERNEL_COMMON_H
diff --git a/onert-micro/onert-micro/src/import/CMakeLists.txt b/onert-micro/onert-micro/src/import/CMakeLists.txt
index b626bbbf4a3..6297f8feed1 100644
--- a/onert-micro/onert-micro/src/import/CMakeLists.txt
+++ b/onert-micro/onert-micro/src/import/CMakeLists.txt
@@ -7,6 +7,7 @@ set(SOURCES
helpers/OMConfigureSISOKernel.cpp
helpers/OMPadCommon.cpp
helpers/OMConfigureTISOKernel.cpp
+ helpers/OMPoolingCommon.cpp
)
# Add configure kernels
diff --git a/onert-micro/onert-micro/src/import/helpers/OMPoolingCommon.cpp b/onert-micro/onert-micro/src/import/helpers/OMPoolingCommon.cpp
new file mode 100644
index 00000000000..f44e53d3d59
--- /dev/null
+++ b/onert-micro/onert-micro/src/import/helpers/OMPoolingCommon.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "import/helpers/OMPooingCommon.h"
+
+using namespace onert_micro;
+using namespace onert_micro::core;
+
+namespace
+{
+
+constexpr uint32_t inputTensorIdx = 0;
+constexpr uint32_t outputTensorIdx = 0;
+
+} // namespace
+
+OMStatus
+onert_micro::import::helpers::configure_pooling_kernel_common(const OMConfigureArgs &config_args)
+{
+ OMRuntimeContext &runtime_context = config_args.runtime_context;
+ uint16_t op_index = config_args.kernel_index;
+
+ onert_micro::execute::OMRuntimeKernel runtime_kernel;
+
+ OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
+ if (status != Ok)
+ return status;
+
+ const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
+ const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
+
+ assert(input != nullptr);
+ assert(output != nullptr);
+
+ status = utils::checkCondition(input->type() == output->type());
+ if (status != Ok)
+ return status;
+
+ OMRuntimeShape input_shape(input);
+ OMRuntimeShape output_shape(output);
+
+ status = utils::checkCondition(input_shape.dimensionsCount() == output_shape.dimensionsCount());
+ if (status != Ok)
+ return status;
+
+ status = utils::checkCondition(input_shape.dimensionsCount() == 4);
+
+ auto option = runtime_kernel.first_operator->builtin_options_as_Pool2DOptions();
+
+ if (option == nullptr)
+ return UnknownError;
+
+ assert(option != nullptr);
+
+ if (input->type() != circle::TensorType_INT8 and input->type() != circle::TensorType_INT16)
+ return status;
+
+ // Check quantization params
+ if (input->quantization() == nullptr)
+ {
+ return NoQuantization;
+ }
+
+ if (input->quantization()->scale()->size() != 1)
+ {
+ return UnsupportedType;
+ }
+
+ // Check quantization params
+ if (output->quantization() == nullptr)
+ {
+ return NoQuantization;
+ }
+
+ if (output->quantization()->scale()->size() != 1)
+ {
+ return UnsupportedType;
+ }
+
+ return status;
+}
diff --git a/onert-micro/onert-micro/src/import/kernels/AveragePool2D.cpp b/onert-micro/onert-micro/src/import/kernels/AveragePool2D.cpp
index bfba1332ab7..ea55814675a 100644
--- a/onert-micro/onert-micro/src/import/kernels/AveragePool2D.cpp
+++ b/onert-micro/onert-micro/src/import/kernels/AveragePool2D.cpp
@@ -14,89 +14,13 @@
* limitations under the License.
*/
-#include "OMStatus.h"
-
-#include "core/OMUtils.h"
-#include "core/OMKernelData.h"
-
-#include "import/OMKernelConfigureBuilder.h"
-
-#include "execute/OMRuntimeKernel.h"
-#include "execute/OMUtils.h"
+#include "import/helpers/OMPooingCommon.h"
using namespace onert_micro;
using namespace onert_micro::core;
-namespace
-{
-
-constexpr uint32_t inputTensorIdx = 0;
-constexpr uint32_t outputTensorIdx = 0;
-
-} // namespace
-
OMStatus
onert_micro::import::configure_kernel_CircleAveragePool2D(const OMConfigureArgs &config_args)
{
- OMRuntimeContext &runtime_context = config_args.runtime_context;
- uint16_t op_index = config_args.kernel_index;
-
- onert_micro::execute::OMRuntimeKernel runtime_kernel;
-
- OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
- if (status != Ok)
- return status;
-
- const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
- const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
-
- assert(input != nullptr);
- assert(output != nullptr);
-
- status = utils::checkCondition(input->type() == output->type());
- if (status != Ok)
- return status;
-
- OMRuntimeShape input_shape(input);
- OMRuntimeShape output_shape(output);
-
- status = utils::checkCondition(input_shape.dimensionsCount() == output_shape.dimensionsCount());
- if (status != Ok)
- return status;
-
- status = utils::checkCondition(input_shape.dimensionsCount() == 4);
-
- auto option = runtime_kernel.first_operator->builtin_options_as_Pool2DOptions();
-
- if (option == nullptr)
- return UnknownError;
-
- assert(option != nullptr);
-
- if (input->type() != circle::TensorType_INT8 and input->type() != circle::TensorType_INT16)
- return status;
-
- // Check quantization params
- if (input->quantization() == nullptr)
- {
- return NoQuantization;
- }
-
- if (input->quantization()->scale()->size() != 1)
- {
- return UnsupportedType;
- }
-
- // Check quantization params
- if (output->quantization() == nullptr)
- {
- return NoQuantization;
- }
-
- if (output->quantization()->scale()->size() != 1)
- {
- return UnsupportedType;
- }
-
- return status;
+ return import::helpers::configure_pooling_kernel_common(config_args);
}
diff --git a/onert-micro/onert-micro/src/import/kernels/L2Pool2D.cpp b/onert-micro/onert-micro/src/import/kernels/L2Pool2D.cpp
index eebcfcb2ddf..40b9dc427cb 100644
--- a/onert-micro/onert-micro/src/import/kernels/L2Pool2D.cpp
+++ b/onert-micro/onert-micro/src/import/kernels/L2Pool2D.cpp
@@ -14,76 +14,12 @@
* limitations under the License.
*/
-#include "OMStatus.h"
-
-#include "core/OMUtils.h"
-#include "core/OMKernelData.h"
-
-#include "import/OMKernelConfigureBuilder.h"
-
-#include "execute/OMRuntimeKernel.h"
+#include "import/helpers/OMPooingCommon.h"
using namespace onert_micro;
using namespace onert_micro::core;
-namespace
-{
-
-constexpr uint32_t inputTensorIdx = 0;
-constexpr uint32_t outputTensorIdx = 0;
-
-} // namespace
-
OMStatus onert_micro::import::configure_kernel_CircleL2Pool2D(const OMConfigureArgs &config_args)
{
- OMRuntimeContext &runtime_context = config_args.runtime_context;
- uint16_t op_index = config_args.kernel_index;
-
- onert_micro::execute::OMRuntimeKernel runtime_kernel;
-
- OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
- if (status != Ok)
- return status;
-
- const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
- const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
-
- assert(input != nullptr);
- assert(output != nullptr);
-
- status = utils::checkCondition(input->type() == output->type());
- if (status != Ok)
- return status;
-
- OMRuntimeShape input_shape(input);
- OMRuntimeShape output_shape(output);
-
- status = utils::checkCondition(input_shape.dimensionsCount() == output_shape.dimensionsCount());
- if (status != Ok)
- return status;
-
- status = utils::checkCondition(input_shape.dimensionsCount() == 4);
-
- auto option = runtime_kernel.first_operator->builtin_options_as_Pool2DOptions();
-
- if (option == nullptr)
- return UnknownError;
-
- assert(option != nullptr);
-
- if (input->type() != circle::TensorType_INT8 and input->type() != circle::TensorType_INT16)
- return status;
-
- // Check quantization params
- if (input->quantization() == nullptr)
- {
- return NoQuantization;
- }
-
- if (input->quantization()->scale()->size() != 1)
- {
- return UnsupportedType;
- }
-
- return status;
+ return helpers::configure_pooling_kernel_common(config_args);
}
diff --git a/onert-micro/onert-micro/src/import/kernels/MaxPool2D.cpp b/onert-micro/onert-micro/src/import/kernels/MaxPool2D.cpp
index b7c3a219362..68d4ee7828e 100644
--- a/onert-micro/onert-micro/src/import/kernels/MaxPool2D.cpp
+++ b/onert-micro/onert-micro/src/import/kernels/MaxPool2D.cpp
@@ -14,88 +14,12 @@
* limitations under the License.
*/
-#include "OMStatus.h"
-
-#include "core/OMUtils.h"
-#include "core/OMKernelData.h"
-
-#include "import/OMKernelConfigureBuilder.h"
-
-#include "execute/OMRuntimeKernel.h"
-#include "execute/OMUtils.h"
+#include "import/helpers/OMPooingCommon.h"
using namespace onert_micro;
using namespace onert_micro::core;
-namespace
-{
-
-constexpr uint32_t inputTensorIdx = 0;
-constexpr uint32_t outputTensorIdx = 0;
-
-} // namespace
-
OMStatus onert_micro::import::configure_kernel_CircleMaxPool2D(const OMConfigureArgs &config_args)
{
- OMRuntimeContext &runtime_context = config_args.runtime_context;
- uint16_t op_index = config_args.kernel_index;
-
- onert_micro::execute::OMRuntimeKernel runtime_kernel;
-
- OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
- if (status != Ok)
- return status;
-
- const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
- const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
-
- assert(input != nullptr);
- assert(output != nullptr);
-
- status = utils::checkCondition(input->type() == output->type());
- if (status != Ok)
- return status;
-
- OMRuntimeShape input_shape(input);
- OMRuntimeShape output_shape(output);
-
- status = utils::checkCondition(input_shape.dimensionsCount() == output_shape.dimensionsCount());
- if (status != Ok)
- return status;
-
- status = utils::checkCondition(input_shape.dimensionsCount() == 4);
-
- auto option = runtime_kernel.first_operator->builtin_options_as_Pool2DOptions();
-
- if (option == nullptr)
- return UnknownError;
-
- assert(option != nullptr);
-
- if (input->type() != circle::TensorType_INT8 and input->type() != circle::TensorType_INT16)
- return status;
-
- // Check quantization params
- if (input->quantization() == nullptr)
- {
- return NoQuantization;
- }
-
- if (input->quantization()->scale()->size() != 1)
- {
- return UnsupportedType;
- }
-
- // Check quantization params
- if (output->quantization() == nullptr)
- {
- return NoQuantization;
- }
-
- if (output->quantization()->scale()->size() != 1)
- {
- return UnsupportedType;
- }
-
- return status;
+ return helpers::configure_pooling_kernel_common(config_args);
}
From 1d53f9f96f63e8bd0ab58e950a027483586b369d Mon Sep 17 00:00:00 2001
From: Balyshev Artem <43214667+BalyshevArtem@users.noreply.github.com>
Date: Wed, 26 Jun 2024 11:15:22 +0300
Subject: [PATCH 16/16] [onert-micro] Reduce Reshape and ExpandDims code
duplication (#13292)
This pr reduces code duplication for Reshape and ExpandDims.
ONE-DCO-1.0-Signed-off-by: Artem Balyshev
Co-authored-by: Artem Balyshev
---
.../include/execute/kernels/ReshapeCommon.h | 41 +++++++++++
.../onert-micro/src/execute/CMakeLists.txt | 1 +
.../src/execute/kernels/ExpandDims.cpp | 54 +-------------
.../src/execute/kernels/Reshape.cpp | 46 +-----------
.../src/execute/kernels/ReshapeCommon.cpp | 70 +++++++++++++++++++
5 files changed, 116 insertions(+), 96 deletions(-)
create mode 100644 onert-micro/onert-micro/include/execute/kernels/ReshapeCommon.h
create mode 100644 onert-micro/onert-micro/src/execute/kernels/ReshapeCommon.cpp
diff --git a/onert-micro/onert-micro/include/execute/kernels/ReshapeCommon.h b/onert-micro/onert-micro/include/execute/kernels/ReshapeCommon.h
new file mode 100644
index 00000000000..d63736119fd
--- /dev/null
+++ b/onert-micro/onert-micro/include/execute/kernels/ReshapeCommon.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H
+#define ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H
+
+#include "OMStatus.h"
+
+#include "core/OMUtils.h"
+#include "core/OMKernelData.h"
+#include "core/OMDataType.h"
+
+#include "execute/OMKernelExecutionBuilder.h"
+#include "execute/OMUtils.h"
+#include "execute/OMRuntimeKernel.h"
+#include
+
+namespace onert_micro
+{
+namespace execute
+{
+
+OMStatus execute_reshape_common(const OMExecuteArgs &execute_args);
+
+} // namespace execute
+} // namespace onert_micro
+
+#endif // ONERT_MICRO_EXECUTE_KERNELS_RESHAPE_COMMON_H
diff --git a/onert-micro/onert-micro/src/execute/CMakeLists.txt b/onert-micro/onert-micro/src/execute/CMakeLists.txt
index 142e1cec337..912af3bd885 100644
--- a/onert-micro/onert-micro/src/execute/CMakeLists.txt
+++ b/onert-micro/onert-micro/src/execute/CMakeLists.txt
@@ -16,6 +16,7 @@ set(SOURCES
OMUtils.cpp
kernels/ConvolutionCommon.cpp
kernels/PoolingCommon.cpp
+ kernels/ReshapeCommon.cpp
)
# Add configure kernels
diff --git a/onert-micro/onert-micro/src/execute/kernels/ExpandDims.cpp b/onert-micro/onert-micro/src/execute/kernels/ExpandDims.cpp
index 52493c0fada..ee52d5bb50c 100644
--- a/onert-micro/onert-micro/src/execute/kernels/ExpandDims.cpp
+++ b/onert-micro/onert-micro/src/execute/kernels/ExpandDims.cpp
@@ -14,63 +14,13 @@
* limitations under the License.
*/
-#include "OMStatus.h"
-
-#include "core/OMUtils.h"
-#include "core/OMDataType.h"
-
-#include "execute/OMKernelExecutionBuilder.h"
-#include "execute/OMRuntimeKernel.h"
+#include "execute/kernels/ReshapeCommon.h"
using namespace onert_micro;
using namespace onert_micro::execute;
-namespace
-{
-
-constexpr uint32_t inputTensorIdx = 0;
-constexpr uint32_t outputTensorIdx = 0;
-
-} // namespace
-
// NOTE: doesnt currently support dynamic shapes
OMStatus onert_micro::execute::execute_kernel_CircleExpandDims(const OMExecuteArgs &execute_args)
{
- core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
- core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
- uint16_t op_index = execute_args.kernel_index;
-
- OMRuntimeKernel runtime_kernel;
- runtime_kernel.readKernel(op_index, runtime_context);
-
- const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
- const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
-
- assert(input != nullptr);
- assert(output != nullptr);
-
- OMStatus status = Ok;
-
- status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
- if (status != Ok)
- return status;
-
- uint8_t *input_data = runtime_kernel.inputs_data[inputTensorIdx];
- uint8_t *output_data = runtime_kernel.outputs_data[outputTensorIdx];
-
- assert(input_data != nullptr);
- assert(output_data != nullptr);
-
- // Check is it inplace kernel
- if (input_data == output_data)
- return Ok;
-
- const core::OMRuntimeShape shape(input);
-
- const size_t element_size =
- static_cast(getOMDataTypeSize(core::onertMicroDatatype(input->type())));
- const int32_t num_elements = shape.flatSize();
- std::memcpy(output_data, input_data, num_elements * element_size);
-
- return status;
+ return execute_reshape_common(execute_args);
}
diff --git a/onert-micro/onert-micro/src/execute/kernels/Reshape.cpp b/onert-micro/onert-micro/src/execute/kernels/Reshape.cpp
index 5a1482b3a2e..5ddc9b050a1 100644
--- a/onert-micro/onert-micro/src/execute/kernels/Reshape.cpp
+++ b/onert-micro/onert-micro/src/execute/kernels/Reshape.cpp
@@ -14,13 +14,7 @@
* limitations under the License.
*/
-#include "OMStatus.h"
-
-#include "core/OMUtils.h"
-#include "core/OMDataType.h"
-
-#include "execute/OMKernelExecutionBuilder.h"
-#include "execute/OMRuntimeKernel.h"
+#include "execute/kernels/ReshapeCommon.h"
using namespace onert_micro;
using namespace onert_micro::execute;
@@ -36,41 +30,5 @@ constexpr uint32_t outputTensorIdx = 0;
// NOTE: doesnt currently support dynamic shapes
OMStatus onert_micro::execute::execute_kernel_CircleReshape(const OMExecuteArgs &execute_args)
{
- core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
- core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
- uint16_t op_index = execute_args.kernel_index;
-
- OMRuntimeKernel runtime_kernel;
- runtime_kernel.readKernel(op_index, runtime_context);
-
- const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
- const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
-
- assert(input != nullptr);
- assert(output != nullptr);
-
- OMStatus status = Ok;
-
- status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
- if (status != Ok)
- return status;
-
- uint8_t *input_data = runtime_kernel.inputs_data[inputTensorIdx];
- uint8_t *output_data = runtime_kernel.outputs_data[outputTensorIdx];
-
- assert(input_data != nullptr);
- assert(output_data != nullptr);
-
- // Check is it inplace kernel
- if (input_data == output_data)
- return Ok;
-
- const core::OMRuntimeShape shape(input);
-
- const size_t element_size =
- static_cast(getOMDataTypeSize(core::onertMicroDatatype(input->type())));
- const int32_t num_elements = shape.flatSize();
- std::memcpy(output_data, input_data, num_elements * element_size);
-
- return status;
+ return execute_reshape_common(execute_args);
}
diff --git a/onert-micro/onert-micro/src/execute/kernels/ReshapeCommon.cpp b/onert-micro/onert-micro/src/execute/kernels/ReshapeCommon.cpp
new file mode 100644
index 00000000000..ad15cce6f6a
--- /dev/null
+++ b/onert-micro/onert-micro/src/execute/kernels/ReshapeCommon.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "execute/kernels/ReshapeCommon.h"
+
+using namespace onert_micro;
+using namespace onert_micro::execute;
+
+namespace
+{
+
+constexpr uint32_t inputTensorIdx = 0;
+constexpr uint32_t outputTensorIdx = 0;
+
+} // namespace
+
+// NOTE: doesnt currently support dynamic shapes
+OMStatus onert_micro::execute::execute_reshape_common(const OMExecuteArgs &execute_args)
+{
+ core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
+ core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
+ uint16_t op_index = execute_args.kernel_index;
+
+ OMRuntimeKernel runtime_kernel;
+ runtime_kernel.readKernel(op_index, runtime_context);
+
+ const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
+ const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
+
+ assert(input != nullptr);
+ assert(output != nullptr);
+
+ OMStatus status = Ok;
+
+ status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
+ if (status != Ok)
+ return status;
+
+ uint8_t *input_data = runtime_kernel.inputs_data[inputTensorIdx];
+ uint8_t *output_data = runtime_kernel.outputs_data[outputTensorIdx];
+
+ assert(input_data != nullptr);
+ assert(output_data != nullptr);
+
+ // Check is it inplace kernel
+ if (input_data == output_data)
+ return Ok;
+
+ const core::OMRuntimeShape shape(input);
+
+ const size_t element_size =
+ static_cast(getOMDataTypeSize(core::onertMicroDatatype(input->type())));
+ const int32_t num_elements = shape.flatSize();
+ std::memcpy(output_data, input_data, num_elements * element_size);
+
+ return status;
+}