diff --git a/Applications/Custom/mae_loss.cpp b/Applications/Custom/mae_loss.cpp
index 092f762cf6..289311ac89 100644
--- a/Applications/Custom/mae_loss.cpp
+++ b/Applications/Custom/mae_loss.cpp
@@ -27,7 +27,7 @@ void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context,
   nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX);
   nntrainer::Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
 
-  if (!context.executeInPlace())
+  if (!context.getInPlace())
     output.fill(predicted);
 }
 
diff --git a/nntrainer/graph/network_graph.cpp b/nntrainer/graph/network_graph.cpp
index f29c536ea7..15eb5c4779 100644
--- a/nntrainer/graph/network_graph.cpp
+++ b/nntrainer/graph/network_graph.cpp
@@ -640,16 +640,16 @@ void NetworkGraph::addLayer(std::shared_ptr<LayerNode> layer) {
   graph.addNode(layer);
 }
 
-InPlace
+InPlaceType
 NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
 
   if (!lnode->supportInPlace()) {
-    return InPlace::NONE;
+    return InPlaceType::NONE;
   }
 
   if (lnode->getType() == InputLayer::type &&
       !istrequal(getTensorType()[2], "FP32")) {
-    return InPlace::NONE;
+    return InPlaceType::NONE;
   }
 
   /** layers which behave as a no-op - flatten */
@@ -692,11 +692,12 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
     for (auto i = 0u, num_node = lnode->getNumInputConnections(); i < num_node;
          ++i) {
       const auto &input_name = lnode->getInputConnectionName(i);
-      if (getLayerNode(input_name)->executeInPlace() == InPlace::RESTRICTING)
-        return InPlace::RESTRICTING;
+      if (getLayerNode(input_name)->getInPlaceType() ==
+          InPlaceType::RESTRICTING)
+        return InPlaceType::RESTRICTING;
     }
 
-    return InPlace::NON_RESTRICTING;
+    return InPlaceType::NON_RESTRICTING;
   }
 
   /**
@@ -710,7 +711,7 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
    * inplace, and then its restricting mode.
    */
   if (no_op_shared(lnode))
-    return InPlace::RESTRICTING;
+    return InPlaceType::RESTRICTING;
 
   /**
    * @note Conditions to decide if this layer node can be in-place:
@@ -740,9 +741,9 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
       lnode->getType() == LayerNormalizationLayer::type) {
     for (auto i = 0u, num_node = lnode->getNumInputConnections(); i < num_node;
          ++i) {
-      if (getLayerNode(lnode->getInputConnectionName(i))->executeInPlace() ==
-          InPlace::RESTRICTING)
-        return InPlace::NONE;
+      if (getLayerNode(lnode->getInputConnectionName(i))->getInPlaceType() ==
+          InPlaceType::RESTRICTING)
+        return InPlaceType::NONE;
     }
 
     /**
@@ -751,9 +752,9 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
      * layer.
      */
     if (io_independent_backwarding(lnode))
-      return InPlace::NON_RESTRICTING;
+      return InPlaceType::NON_RESTRICTING;
 
-    return InPlace::RESTRICTING;
+    return InPlaceType::RESTRICTING;
   }
 
   /**
@@ -762,17 +763,17 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
    */
   if (lnode->getInputConnections().empty()) {
     if (!istrequal(getTensorType()[2], "FP32"))
-      return InPlace::NONE;
+      return InPlaceType::NONE;
   }
 
-  return InPlace::NONE;
+  return InPlaceType::NONE;
 }
 
 void NetworkGraph::inPlaceOptimize() {
   if (optimize_memory) {
     for (unsigned int idx = 0; idx < graph.size(); ++idx) {
       auto const &lnode = getSortedLayerNode(idx);
-      lnode->executeInPlace(canExecuteInPlace(lnode));
+      lnode->setInPlaceType(canExecuteInPlace(lnode));
     }
   }
 }
@@ -842,7 +843,7 @@ NetworkGraph::finalizeContext(const std::shared_ptr<LayerNode> &lnode,
   /// @note try move inplace control to finalize
   bool shared_var = false, shared_grad = false;
 
-  if (lnode->executeInPlace() != InPlace::NONE && lnode->supportInPlace()) {
+  if (lnode->getInPlaceType() != InPlaceType::NONE && lnode->supportInPlace()) {
     setInplaceSharedMemoryConfigByLayer(lnode, shared_var, shared_grad);
     for (unsigned int i = 0; i < out_specs.size(); ++i) {
       auto &s = out_specs.at(i);
@@ -998,7 +999,7 @@ NetworkGraph::refinalizeContext(const std::shared_ptr<LayerNode> &lnode,
   auto out_specs = init_context.getOutSpecs();
   /// @note try move inplace control to finalize
   bool shared_var = false, shared_grad = false;
-  if (lnode->executeInPlace() != InPlace::NONE) {
+  if (lnode->getInPlaceType() != InPlaceType::NONE) {
     setInplaceSharedMemoryConfigByLayer(lnode, shared_var, shared_grad);
     for (unsigned int i = 0; i < out_specs.size(); ++i) {
       auto &s = out_specs.at(i);
diff --git a/nntrainer/graph/network_graph.h b/nntrainer/graph/network_graph.h
index cec0f0eab1..38f61e21af 100644
--- a/nntrainer/graph/network_graph.h
+++ b/nntrainer/graph/network_graph.h
@@ -609,7 +609,7 @@ class NetworkGraph {
    *
    * @return the mode of inplace for the layer
    */
-  InPlace canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode);
+  InPlaceType canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode);
 
   /**
    * @brief compute optimized backward end. This function calculated the valid
diff --git a/nntrainer/layers/acti_func.h b/nntrainer/layers/acti_func.h
index c6c3576414..f449d5064f 100644
--- a/nntrainer/layers/acti_func.h
+++ b/nntrainer/layers/acti_func.h
@@ -760,7 +760,7 @@ class ActiFunc {
    *
    * @param val True if execute in-place, else false
    */
-  void executeInPlace(bool val) {
+  void setInPlace(bool val) {
     if (val && !supportInPlace())
       throw std::runtime_error(
         "Error setting activation layer to work in-place");
diff --git a/nntrainer/layers/activation_layer.cpp b/nntrainer/layers/activation_layer.cpp
index 8fd59506cc..c7c343cca8 100644
--- a/nntrainer/layers/activation_layer.cpp
+++ b/nntrainer/layers/activation_layer.cpp
@@ -32,8 +32,7 @@
 
 namespace nntrainer {
 ActivationLayer::ActivationLayer() :
-  Layer(),
-  activation_props(new PropTypes(props::Activation())) {
+  Layer(), activation_props(new PropTypes(props::Activation())) {
   acti_func.setActiFunc(ActivationType::ACT_NONE);
 }
 
@@ -65,7 +64,7 @@ void ActivationLayer::finalize(InitLayerContext &context) {
     InitLayerContext::outSpec(context.getInputDimensions()[0], "out",
                               TensorLifespan::FORWARD_DERIV_LIFESPAN));
   context.requestOutputs(std::move(out_specs));
-  acti_func.executeInPlace(context.executeInPlace());
+  acti_func.setInPlace(context.getInPlace());
 }
 
 void ActivationLayer::forwarding(RunLayerContext &context, bool training) {
diff --git a/nntrainer/layers/cl_layers/reshape_cl.cpp b/nntrainer/layers/cl_layers/reshape_cl.cpp
index 7698966484..cd03c1bb88 100644
--- a/nntrainer/layers/cl_layers/reshape_cl.cpp
+++ b/nntrainer/layers/cl_layers/reshape_cl.cpp
@@ -75,7 +75,7 @@ void ReshapeLayerCl::finalize(InitLayerContext &context) {
 }
 
 void ReshapeLayerCl::forwarding(RunLayerContext &context, bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
     const Tensor &input = context.getInput(SINGLE_INOUT_IDX);
     ReshapeProcess(input, output);
@@ -85,7 +85,7 @@ void ReshapeLayerCl::forwarding(RunLayerContext &context, bool training) {
 void ReshapeLayerCl::incremental_forwarding(RunLayerContext &context,
                                             unsigned int from, unsigned int to,
                                             bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
     const Tensor &input = context.getInput(SINGLE_INOUT_IDX);
     if (from) {
@@ -294,7 +294,7 @@ void ReshapeLayerCl::copy_cl(const float *input, float *res,
 }
 
 void ReshapeLayerCl::calcDerivative(RunLayerContext &context) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     context.getOutgoingDerivative(SINGLE_INOUT_IDX)
       .copyData(context.getIncomingDerivative(SINGLE_INOUT_IDX));
   }
diff --git a/nntrainer/layers/identity_layer.cpp b/nntrainer/layers/identity_layer.cpp
index c267cad114..3000757c00 100644
--- a/nntrainer/layers/identity_layer.cpp
+++ b/nntrainer/layers/identity_layer.cpp
@@ -26,7 +26,7 @@ void IdentityLayer::finalize(InitLayerContext &context) {
 }
 
 void IdentityLayer::forwarding(RunLayerContext &context, bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     for (unsigned int i = 0, sz = context.getNumInputs(); i < sz; ++i) {
       Tensor &input_ = context.getInput(i);
       Tensor &hidden_ = context.getOutput(i);
@@ -36,7 +36,7 @@ void IdentityLayer::forwarding(RunLayerContext &context, bool training) {
 }
 
 void IdentityLayer::calcDerivative(RunLayerContext &context) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     for (unsigned int i = 0, sz = context.getNumInputs(); i < sz; ++i) {
       const Tensor &d_hidden = context.getIncomingDerivative(i);
       Tensor &d_input = context.getOutgoingDerivative(i);
diff --git a/nntrainer/layers/input_layer.cpp b/nntrainer/layers/input_layer.cpp
index 2a02e80038..c84fb450c2 100644
--- a/nntrainer/layers/input_layer.cpp
+++ b/nntrainer/layers/input_layer.cpp
@@ -46,7 +46,7 @@ void InputLayer::setProperty(const std::vector<std::string> &values) {
 
 void InputLayer::forwarding(RunLayerContext &context, bool training) {
   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
     hidden_.copyData(input_);
   }
diff --git a/nntrainer/layers/layer_context.h b/nntrainer/layers/layer_context.h
index 50e042e045..8e256027cc 100644
--- a/nntrainer/layers/layer_context.h
+++ b/nntrainer/layers/layer_context.h
@@ -383,7 +383,7 @@ class InitLayerContext {
    *
    * @return true if in-place, else false
    */
-  bool executeInPlace() const { return in_place; }
+  bool getInPlace() const { return in_place; }
 
   /**
    * @brief   get Initial value of Loss_Scale. This is set to RunLayerContext
@@ -889,7 +889,7 @@ class RunLayerContext {
    *
    * @return true if in-place, else false
    */
-  bool executeInPlace() const { return in_place; }
+  bool getInPlace() const { return in_place; }
 
   /**
    * @brief   get layer weights
diff --git a/nntrainer/layers/layer_node.cpp b/nntrainer/layers/layer_node.cpp
index 89728d3262..eefce7f5c7 100644
--- a/nntrainer/layers/layer_node.cpp
+++ b/nntrainer/layers/layer_node.cpp
@@ -186,7 +186,7 @@ createLayerNode(std::unique_ptr<nntrainer::Layer> &&layer,
 
 LayerNode::LayerNode(std::unique_ptr<nntrainer::Layer> &&l) :
   layer(std::move(l)),
-  inplace(InPlace::NONE),
+  inplace_type(InPlaceType::NONE),
   needs_calc_derivative(false),
   needs_calc_gradient(false),
 
@@ -689,8 +689,8 @@ InitLayerContext LayerNode::finalize(const std::vector<TensorDim> &input_dims,
   }
 
   auto context = InitLayerContext(
-    actual_input_dims, out_info, executeInPlace() != InPlace::NONE, getName(),
-    scope, max_norm, tensor_type, loss_scale, mode);
+    actual_input_dims, out_info, getInPlaceType() != InPlaceType::NONE,
+    getName(), scope, max_norm, tensor_type, loss_scale, mode);
 
   layer->finalize(context);
 
@@ -782,8 +782,8 @@ LayerNode::refinalize(const std::vector<TensorDim> &input_dims) {
   }
 
   auto context = InitLayerContext(actual_input_dims, out_info,
-                                  executeInPlace() != InPlace::NONE, getName(),
-                                  scope, max_norm);
+                                  getInPlaceType() != InPlaceType::NONE,
+                                  getName(), scope, max_norm);
 
   layer->finalize(context);
 
@@ -814,7 +814,7 @@ void LayerNode::forwarding(bool training) {
 
   PROFILE_TIME_START(forward_event_key);
   if (reStoreData()) {
-    if (executeInPlace() == InPlace::NONE) {
+    if (getInPlaceType() == InPlaceType::NONE) {
       for (unsigned int i = 0; i < run_context->getNumOutputs(); ++i) {
         run_context->getOutput(i).setValue(0);
         if (!run_context->getOutputGradUnsafe(i).isValid())
@@ -947,7 +947,7 @@ void LayerNode::configureRunContext(const std::vector<Weight *> &weights,
                                     const std::vector<Var_Grad *> &tensors,
                                     float loss_scale) {
   run_context = std::make_unique<RunLayerContext>(
-    getName(), getTrainable(), 0.0f, executeInPlace() != InPlace::NONE,
+    getName(), getTrainable(), 0.0f, getInPlaceType() != InPlaceType::NONE,
     loss_scale, false, weights, inputs, outputs, tensors);
 }
 
diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h
index 6b93441d4f..ee70001b46 100644
--- a/nntrainer/layers/layer_node.h
+++ b/nntrainer/layers/layer_node.h
@@ -59,7 +59,7 @@ class LossScaleForMixed;
  * @brief Enum class for the various types of inplace modes supported by layer
  *
  */
-enum class InPlace {
+enum class InPlaceType {
   NONE,           /**< layer is not inplace */
   RESTRICTING,    /**< layer is in-place and does place restriction on layers
                     ahead of it to be in-place */
@@ -370,19 +370,19 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
    *
    * @param val in place state for the layer
    */
-  void executeInPlace(InPlace val) {
-    if (val != InPlace::NONE && !supportInPlace())
+  void setInPlaceType(InPlaceType val) {
+    if (val != InPlaceType::NONE && !supportInPlace())
       throw std::runtime_error("Error setting layer to work in-place");
 
-    inplace = val;
+    inplace_type = val;
   }
 
   /**
    * @brief   Get if the layer is going to execute in-place
    *
-   * @return InPlace type for the layer
+   * @return Inplace type for the layer
    */
-  InPlace executeInPlace() const { return inplace; }
+  InPlaceType getInPlaceType() const { return inplace_type; }
 
   /**
    * @brief  check if this layer requires label to be passed
@@ -967,8 +967,8 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
   std::unique_ptr<nntrainer::Layer>
     layer; /**< The actual object in the graph node */
 
-  InPlace
-    inplace; /**< store if the current layer is going to operate in-place */
+  InPlaceType inplace_type; /**< store if the current layer is going to operate
+                               in-place */
   bool needs_calc_derivative; /**< cache if this layer needs to do
                                  calcDerivative */
   bool needs_calc_gradient; /**< cache if this layer needs to do calcGradient */
diff --git a/nntrainer/layers/multiout_layer.cpp b/nntrainer/layers/multiout_layer.cpp
index a9c714e549..76264df361 100644
--- a/nntrainer/layers/multiout_layer.cpp
+++ b/nntrainer/layers/multiout_layer.cpp
@@ -31,7 +31,7 @@ void MultiOutLayer::finalize(InitLayerContext &context) {
 }
 
 void MultiOutLayer::forwarding(RunLayerContext &context, bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     const Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
     for (unsigned int idx = 0; idx < context.getNumOutputs(); ++idx) {
       context.getOutput(idx).fill(input_);
@@ -42,7 +42,7 @@ void MultiOutLayer::forwarding(RunLayerContext &context, bool training) {
 void MultiOutLayer::incremental_forwarding(RunLayerContext &context,
                                            unsigned int from, unsigned int to,
                                            bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     if (from) {
       NNTR_THROW_IF(to - from != 1, std::invalid_argument)
         << "incremental step size is not 1";
diff --git a/nntrainer/layers/reshape_layer.cpp b/nntrainer/layers/reshape_layer.cpp
index af4c254475..572b9a5da5 100644
--- a/nntrainer/layers/reshape_layer.cpp
+++ b/nntrainer/layers/reshape_layer.cpp
@@ -47,14 +47,14 @@ void ReshapeLayer::finalize(InitLayerContext &context) {
 }
 
 void ReshapeLayer::forwarding(RunLayerContext &context, bool training) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     context.getOutput(SINGLE_INOUT_IDX)
       .copyData(context.getInput(SINGLE_INOUT_IDX));
   }
 }
 
 void ReshapeLayer::calcDerivative(RunLayerContext &context) {
-  if (!context.executeInPlace()) {
+  if (!context.getInPlace()) {
     context.getOutgoingDerivative(SINGLE_INOUT_IDX)
       .copyData(context.getIncomingDerivative(SINGLE_INOUT_IDX));
   }
diff --git a/nntrainer/layers/time_dist.cpp b/nntrainer/layers/time_dist.cpp
index 0b32d77af1..b3f2814aa9 100644
--- a/nntrainer/layers/time_dist.cpp
+++ b/nntrainer/layers/time_dist.cpp
@@ -123,7 +123,7 @@ void TimeDistLayer::finalize(InitLayerContext &context) {
    */
   TensorDim dist_dim = input_dim;
   dist_dim.height(1);
-  InitLayerContext dist_context({dist_dim}, {}, context.executeInPlace(),
+  InitLayerContext dist_context({dist_dim}, {}, context.getInPlace(),
                                 context.getName());
 
   // During forwarding and backwarding, it set the input and output buffer of
@@ -255,7 +255,7 @@ void TimeDistLayer::forwarding(RunLayerContext &context, bool training) {
 
     RunLayerContext dist_context(
       context.getName(), context.getTrainable(), context.getLoss(),
-      context.executeInPlace(), context.getLossScale(), false,
+      context.getInPlace(), context.getLossScale(), false,
       getWeightsForContext(), {&in_var}, {&out_var}, getTensorsForContext());
 
     dist_layer->forwarding(dist_context, training);
@@ -302,7 +302,7 @@ void TimeDistLayer::calcDerivative(RunLayerContext &context) {
 
     RunLayerContext dist_context(
       context.getName(), context.getTrainable(), context.getLoss(),
-      context.executeInPlace(), context.getLossScale(), false,
+      context.getInPlace(), context.getLossScale(), false,
       getWeightsForContext(), {&in_var}, {&out_var}, getTensorsForContext());
 
     dist_layer->calcDerivative(dist_context);
@@ -353,7 +353,7 @@ void TimeDistLayer::calcGradient(RunLayerContext &context) {
 
     RunLayerContext dist_context(
       context.getName(), context.getTrainable(), context.getLoss(),
-      context.executeInPlace(), context.getLossScale(), false,
+      context.getInPlace(), context.getLossScale(), false,
       getWeightsForContext(), {&in_var}, {&out_var}, getTensorsForContext());
 
     dist_layer->calcGradient(dist_context);
@@ -395,7 +395,7 @@ void TimeDistLayer::setBatch(RunLayerContext &context, unsigned int batch) {
 
     RunLayerContext dist_context(
       context.getName(), context.getTrainable(), context.getLoss(),
-      context.executeInPlace(), context.getLossScale(), false,
+      context.getInPlace(), context.getLossScale(), false,
       getWeightsForContext(), {&in_var}, {&out_var}, getTensorsForContext());
 
     dist_layer->setBatch(dist_context, batch);