Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Graph] rename variable & function names for clarity @open sesame 11/07 21:30 #2789

Merged
merged 1 commit into from
Nov 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Applications/Custom/mae_loss.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void MaeLossLayer::forwarding(nntrainer::RunLayerContext &context,
nntrainer::Tensor &predicted = context.getInput(SINGLE_INOUT_IDX);
nntrainer::Tensor &output = context.getOutput(SINGLE_INOUT_IDX);

if (!context.executeInPlace())
if (!context.getInPlace())
output.fill(predicted);
}

Expand Down
35 changes: 18 additions & 17 deletions nntrainer/graph/network_graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -640,16 +640,16 @@ void NetworkGraph::addLayer(std::shared_ptr<LayerNode> layer) {
graph.addNode(layer);
}

InPlace
InPlaceType
NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {

if (!lnode->supportInPlace()) {
return InPlace::NONE;
return InPlaceType::NONE;
}

if (lnode->getType() == InputLayer::type &&
!istrequal(getTensorType()[2], "FP32")) {
return InPlace::NONE;
return InPlaceType::NONE;
}

/** layers which behave as a no-op - flatten */
Expand Down Expand Up @@ -692,11 +692,12 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
for (auto i = 0u, num_node = lnode->getNumInputConnections(); i < num_node;
++i) {
const auto &input_name = lnode->getInputConnectionName(i);
if (getLayerNode(input_name)->executeInPlace() == InPlace::RESTRICTING)
return InPlace::RESTRICTING;
if (getLayerNode(input_name)->getInPlaceType() ==
InPlaceType::RESTRICTING)
return InPlaceType::RESTRICTING;
}

return InPlace::NON_RESTRICTING;
return InPlaceType::NON_RESTRICTING;
}

/**
Expand All @@ -710,7 +711,7 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
* inplace, and then its restricting mode.
*/
if (no_op_shared(lnode))
return InPlace::RESTRICTING;
return InPlaceType::RESTRICTING;

/**
* @note Conditions to decide if this layer node can be in-place:
Expand Down Expand Up @@ -740,9 +741,9 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
lnode->getType() == LayerNormalizationLayer::type) {
for (auto i = 0u, num_node = lnode->getNumInputConnections(); i < num_node;
++i) {
if (getLayerNode(lnode->getInputConnectionName(i))->executeInPlace() ==
InPlace::RESTRICTING)
return InPlace::NONE;
if (getLayerNode(lnode->getInputConnectionName(i))->getInPlaceType() ==
InPlaceType::RESTRICTING)
return InPlaceType::NONE;
}

/**
Expand All @@ -751,9 +752,9 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
* layer.
*/
if (io_independent_backwarding(lnode))
return InPlace::NON_RESTRICTING;
return InPlaceType::NON_RESTRICTING;

return InPlace::RESTRICTING;
return InPlaceType::RESTRICTING;
}

/**
Expand All @@ -762,17 +763,17 @@ NetworkGraph::canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode) {
*/
if (lnode->getInputConnections().empty()) {
if (!istrequal(getTensorType()[2], "FP32"))
return InPlace::NONE;
return InPlaceType::NONE;
}

return InPlace::NONE;
return InPlaceType::NONE;
}

void NetworkGraph::inPlaceOptimize() {
if (optimize_memory) {
for (unsigned int idx = 0; idx < graph.size(); ++idx) {
auto const &lnode = getSortedLayerNode(idx);
lnode->executeInPlace(canExecuteInPlace(lnode));
lnode->setInPlaceType(canExecuteInPlace(lnode));
}
}
}
Expand Down Expand Up @@ -842,7 +843,7 @@ NetworkGraph::finalizeContext(const std::shared_ptr<LayerNode> &lnode,
/// @note try move inplace control to finalize
bool shared_var = false, shared_grad = false;

if (lnode->executeInPlace() != InPlace::NONE && lnode->supportInPlace()) {
if (lnode->getInPlaceType() != InPlaceType::NONE && lnode->supportInPlace()) {
setInplaceSharedMemoryConfigByLayer(lnode, shared_var, shared_grad);
for (unsigned int i = 0; i < out_specs.size(); ++i) {
auto &s = out_specs.at(i);
Expand Down Expand Up @@ -998,7 +999,7 @@ NetworkGraph::refinalizeContext(const std::shared_ptr<LayerNode> &lnode,
auto out_specs = init_context.getOutSpecs();
/// @note try move inplace control to finalize
bool shared_var = false, shared_grad = false;
if (lnode->executeInPlace() != InPlace::NONE) {
if (lnode->getInPlaceType() != InPlaceType::NONE) {
setInplaceSharedMemoryConfigByLayer(lnode, shared_var, shared_grad);
for (unsigned int i = 0; i < out_specs.size(); ++i) {
auto &s = out_specs.at(i);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/graph/network_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ class NetworkGraph {
*
* @return the mode of inplace for the layer
*/
InPlace canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode);
InPlaceType canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode);

/**
* @brief compute optimized backward end. This function calculated the valid
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/acti_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ class ActiFunc {
*
* @param val True if execute in-place, else false
*/
void executeInPlace(bool val) {
void setInPlace(bool val) {
if (val && !supportInPlace())
throw std::runtime_error(
"Error setting activation layer to work in-place");
Expand Down
5 changes: 2 additions & 3 deletions nntrainer/layers/activation_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@

namespace nntrainer {
ActivationLayer::ActivationLayer() :
Layer(),
activation_props(new PropTypes(props::Activation())) {
Layer(), activation_props(new PropTypes(props::Activation())) {
acti_func.setActiFunc(ActivationType::ACT_NONE);
}

Expand Down Expand Up @@ -65,7 +64,7 @@ void ActivationLayer::finalize(InitLayerContext &context) {
InitLayerContext::outSpec(context.getInputDimensions()[0], "out",
TensorLifespan::FORWARD_DERIV_LIFESPAN));
context.requestOutputs(std::move(out_specs));
acti_func.executeInPlace(context.executeInPlace());
acti_func.setInPlace(context.getInPlace());
}

void ActivationLayer::forwarding(RunLayerContext &context, bool training) {
Expand Down
6 changes: 3 additions & 3 deletions nntrainer/layers/cl_layers/reshape_cl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ void ReshapeLayerCl::finalize(InitLayerContext &context) {
}

void ReshapeLayerCl::forwarding(RunLayerContext &context, bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
const Tensor &input = context.getInput(SINGLE_INOUT_IDX);
ReshapeProcess(input, output);
Expand All @@ -85,7 +85,7 @@ void ReshapeLayerCl::forwarding(RunLayerContext &context, bool training) {
void ReshapeLayerCl::incremental_forwarding(RunLayerContext &context,
unsigned int from, unsigned int to,
bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
const Tensor &input = context.getInput(SINGLE_INOUT_IDX);
if (from) {
Expand Down Expand Up @@ -294,7 +294,7 @@ void ReshapeLayerCl::copy_cl(const float *input, float *res,
}

void ReshapeLayerCl::calcDerivative(RunLayerContext &context) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
context.getOutgoingDerivative(SINGLE_INOUT_IDX)
.copyData(context.getIncomingDerivative(SINGLE_INOUT_IDX));
}
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/identity_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void IdentityLayer::finalize(InitLayerContext &context) {
}

void IdentityLayer::forwarding(RunLayerContext &context, bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
for (unsigned int i = 0, sz = context.getNumInputs(); i < sz; ++i) {
Tensor &input_ = context.getInput(i);
Tensor &hidden_ = context.getOutput(i);
Expand All @@ -36,7 +36,7 @@ void IdentityLayer::forwarding(RunLayerContext &context, bool training) {
}

void IdentityLayer::calcDerivative(RunLayerContext &context) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
for (unsigned int i = 0, sz = context.getNumInputs(); i < sz; ++i) {
const Tensor &d_hidden = context.getIncomingDerivative(i);
Tensor &d_input = context.getOutgoingDerivative(i);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/input_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void InputLayer::setProperty(const std::vector<std::string> &values) {

void InputLayer::forwarding(RunLayerContext &context, bool training) {
Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
hidden_.copyData(input_);
}
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/layer_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ class InitLayerContext {
*
* @return true if in-place, else false
*/
bool executeInPlace() const { return in_place; }
bool getInPlace() const { return in_place; }

/**
* @brief get Initial value of Loss_Scale. This is set to RunLayerContext
Expand Down Expand Up @@ -889,7 +889,7 @@ class RunLayerContext {
*
* @return true if in-place, else false
*/
bool executeInPlace() const { return in_place; }
bool getInPlace() const { return in_place; }

/**
* @brief get layer weights
Expand Down
14 changes: 7 additions & 7 deletions nntrainer/layers/layer_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ createLayerNode(std::unique_ptr<nntrainer::Layer> &&layer,

LayerNode::LayerNode(std::unique_ptr<nntrainer::Layer> &&l) :
layer(std::move(l)),
inplace(InPlace::NONE),
inplace_type(InPlaceType::NONE),
needs_calc_derivative(false),
needs_calc_gradient(false),

Expand Down Expand Up @@ -689,8 +689,8 @@ InitLayerContext LayerNode::finalize(const std::vector<TensorDim> &input_dims,
}

auto context = InitLayerContext(
actual_input_dims, out_info, executeInPlace() != InPlace::NONE, getName(),
scope, max_norm, tensor_type, loss_scale, mode);
actual_input_dims, out_info, getInPlaceType() != InPlaceType::NONE,
getName(), scope, max_norm, tensor_type, loss_scale, mode);

layer->finalize(context);

Expand Down Expand Up @@ -782,8 +782,8 @@ LayerNode::refinalize(const std::vector<TensorDim> &input_dims) {
}

auto context = InitLayerContext(actual_input_dims, out_info,
executeInPlace() != InPlace::NONE, getName(),
scope, max_norm);
getInPlaceType() != InPlaceType::NONE,
getName(), scope, max_norm);

layer->finalize(context);

Expand Down Expand Up @@ -814,7 +814,7 @@ void LayerNode::forwarding(bool training) {

PROFILE_TIME_START(forward_event_key);
if (reStoreData()) {
if (executeInPlace() == InPlace::NONE) {
if (getInPlaceType() == InPlaceType::NONE) {
for (unsigned int i = 0; i < run_context->getNumOutputs(); ++i) {
run_context->getOutput(i).setValue(0);
if (!run_context->getOutputGradUnsafe(i).isValid())
Expand Down Expand Up @@ -947,7 +947,7 @@ void LayerNode::configureRunContext(const std::vector<Weight *> &weights,
const std::vector<Var_Grad *> &tensors,
float loss_scale) {
run_context = std::make_unique<RunLayerContext>(
getName(), getTrainable(), 0.0f, executeInPlace() != InPlace::NONE,
getName(), getTrainable(), 0.0f, getInPlaceType() != InPlaceType::NONE,
loss_scale, false, weights, inputs, outputs, tensors);
}

Expand Down
16 changes: 8 additions & 8 deletions nntrainer/layers/layer_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class LossScaleForMixed;
* @brief Enum class for the various types of inplace modes supported by layer
*
*/
enum class InPlace {
enum class InPlaceType {
NONE, /**< layer is not inplace */
RESTRICTING, /**< layer is in-place and does place restriction on layers
ahead of it to be in-place */
Expand Down Expand Up @@ -370,19 +370,19 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
*
* @param val in place state for the layer
*/
void executeInPlace(InPlace val) {
if (val != InPlace::NONE && !supportInPlace())
void setInPlaceType(InPlaceType val) {
if (val != InPlaceType::NONE && !supportInPlace())
throw std::runtime_error("Error setting layer to work in-place");

inplace = val;
inplace_type = val;
}

/**
* @brief Get if the layer is going to execute in-place
*
* @return InPlace type for the layer
* @return Inplace type for the layer
*/
InPlace executeInPlace() const { return inplace; }
InPlaceType getInPlaceType() const { return inplace_type; }

/**
* @brief check if this layer requires label to be passed
Expand Down Expand Up @@ -967,8 +967,8 @@ class LayerNode final : public ml::train::Layer, public GraphNode {
std::unique_ptr<nntrainer::Layer>
layer; /**< The actual object in the graph node */

InPlace
inplace; /**< store if the current layer is going to operate in-place */
InPlaceType inplace_type; /**< store if the current layer is going to operate
in-place */
bool needs_calc_derivative; /**< cache if this layer needs to do
calcDerivative */
bool needs_calc_gradient; /**< cache if this layer needs to do calcGradient */
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/multiout_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ void MultiOutLayer::finalize(InitLayerContext &context) {
}

void MultiOutLayer::forwarding(RunLayerContext &context, bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
const Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
for (unsigned int idx = 0; idx < context.getNumOutputs(); ++idx) {
context.getOutput(idx).fill(input_);
Expand All @@ -42,7 +42,7 @@ void MultiOutLayer::forwarding(RunLayerContext &context, bool training) {
void MultiOutLayer::incremental_forwarding(RunLayerContext &context,
unsigned int from, unsigned int to,
bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
if (from) {
NNTR_THROW_IF(to - from != 1, std::invalid_argument)
<< "incremental step size is not 1";
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/reshape_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,14 @@ void ReshapeLayer::finalize(InitLayerContext &context) {
}

void ReshapeLayer::forwarding(RunLayerContext &context, bool training) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
context.getOutput(SINGLE_INOUT_IDX)
.copyData(context.getInput(SINGLE_INOUT_IDX));
}
}

void ReshapeLayer::calcDerivative(RunLayerContext &context) {
if (!context.executeInPlace()) {
if (!context.getInPlace()) {
context.getOutgoingDerivative(SINGLE_INOUT_IDX)
.copyData(context.getIncomingDerivative(SINGLE_INOUT_IDX));
}
Expand Down
Loading
Loading