Skip to content

Commit

Permalink
Make .clang-format competible with version 6
Browse files Browse the repository at this point in the history
Clang-format 6 is widely used. However,
`AllowAllConstructorInitializersOnNextLine` added from #203 is supported
from clang-format 9.

This Pr reverts use of `AllowAllConstructorInitializersOnNextLine`
while having similar linting style.

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <[email protected]>
  • Loading branch information
zhoonit authored and jijoongmoon committed Jun 23, 2020
1 parent 260b691 commit a166b11
Show file tree
Hide file tree
Showing 18 changed files with 117 additions and 113 deletions.
2 changes: 1 addition & 1 deletion .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ BraceWrapping:
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: AfterColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
AllowAllConstructorInitializersOnNextLine: true
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
Expand Down
13 changes: 6 additions & 7 deletions nntrainer/include/activation_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
#include <vector>

namespace nntrainer {


/**
* @class Activation Layer
Expand All @@ -37,7 +36,7 @@ class ActivationLayer : public Layer {
/**
* @brief Constructor of Activation Layer
*/
ActivationLayer() : Layer(){ this->type = LAYER_ACTIVATION; };
ActivationLayer() : Layer() { this->type = LAYER_ACTIVATION; };

/**
* @brief Destructor of Activation Layer
Expand All @@ -57,13 +56,13 @@ class ActivationLayer : public Layer {
* @brief Read Activation layer params. This is essentially noops for now.
* @param[in] file input stream file
*/
void read(std::ifstream &file) { /* noop */ };
void read(std::ifstream &file){/* noop */};

/**
* @brief Save Activation layer params. This is essentially noops for now.
* @param[in] file output stream file
*/
void save(std::ofstream &file) { /* noop */ };
void save(std::ofstream &file){/* noop */};

/**
* @brief forward propagation with input
Expand Down Expand Up @@ -109,9 +108,9 @@ class ActivationLayer : public Layer {
* activation_prime_function to be used
* @retval #ML_ERROR_NONE when successful
*/
int
setActivation(std::function<float(float const)> const &activation_fn,
std::function<float(float const)> const &activation_prime_fn);
int setActivation(
std::function<float(float const)> const &activation_fn,
std::function<float(float const)> const &activation_prime_fn);

/**
* @brief setActivation by preset actiType
Expand Down
14 changes: 7 additions & 7 deletions nntrainer/include/databuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,13 +116,13 @@ class DataBuffer {
* @brief Create Buffer
* @retval DataBuffer
*/
DataBuffer()
: train_running(),
val_running(),
test_running(),
train_thread(),
val_thread(),
test_thread() {
DataBuffer() :
train_running(),
val_running(),
test_running(),
train_thread(),
val_thread(),
test_thread() {
SET_VALIDATION(false);
class_num = 0;
cur_train_bufsize = 0;
Expand Down
30 changes: 16 additions & 14 deletions nntrainer/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,10 @@ namespace nntrainer {
* @brief Enumeration of cost(loss) function type
* 0. MSR ( Mean Squared Roots )
* 1. ENTROPY ( Cross Entropy )
* 2. ENTROPY_SIGMOID (Cross Entropy amalgamated with sigmoid for stability)
* 2. ENTROPY_SOFTMAX (Cross Entropy amalgamated with softmax for stability)
* 2. ENTROPY_SIGMOID (Cross Entropy amalgamated with sigmoid for
* stability)
* 2. ENTROPY_SOFTMAX (Cross Entropy amalgamated with softmax for
* stability)
* 2. Unknown
*/
typedef enum {
Expand Down Expand Up @@ -115,18 +117,18 @@ typedef enum {
*/
class Layer {
public:
Layer()
: last_layer(false),
init_zero(false),
type(LAYER_UNKNOWN),
activation(NULL),
activation_prime(NULL),
loss(0.0),
cost(COST_UNKNOWN),
activation_type(ACT_UNKNOWN),
bn_follow(false),
weight_decay(),
weight_ini_type(WEIGHT_UNKNOWN) {}
Layer() :
last_layer(false),
init_zero(false),
type(LAYER_UNKNOWN),
activation(NULL),
activation_prime(NULL),
loss(0.0),
cost(COST_UNKNOWN),
activation_type(ACT_UNKNOWN),
bn_follow(false),
weight_decay(),
weight_ini_type(WEIGHT_UNKNOWN) {}

/**
* @brief Destructor of Layer Class
Expand Down
9 changes: 5 additions & 4 deletions nntrainer/include/lazy_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,11 @@

#define FWD(...) std::forward<decltype(__VA_ARGS__)>(__VA_ARGS__)

#define _LIFT(X) \
[](nntrainer::Tensor &t, auto &&... args) noexcept( \
noexcept(t.X(FWD(args)...))) -> decltype(t.X(FWD(args)...)) { \
return t.X(FWD(args)...); \
#define _LIFT(X) \
[](nntrainer::Tensor & t, \
auto &&... args) noexcept(noexcept(t.X(FWD(args)...))) \
->decltype(t.X(FWD(args)...)) { \
return t.X(FWD(args)...); \
}

namespace nntrainer {
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/include/loss_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class LossLayer : public Layer {
* @brief update loss
* @param[in] l Tensor data to calculate
*/
void updateLoss(const Tensor& l);
void updateLoss(const Tensor &l);
};
} // namespace nntrainer

Expand Down
3 changes: 2 additions & 1 deletion nntrainer/include/neuralnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,8 @@ class NeuralNetwork {
std::shared_ptr<DataBuffer> data_buffer;

/**
* @brief Continue train from the previous state of optimizer and iterations
* @brief Continue train from the previous state of optimizer and
* iterations
*/
bool continue_train;

Expand Down
14 changes: 10 additions & 4 deletions nntrainer/include/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,16 @@ typedef struct _OptParam {
double epsilon;
float decay_rate;
float decay_steps;
bool continue_train; /** Continue training with previous tensors for adam */

_OptParam() : learning_rate(0.0), beta1(0.0), beta2(0.0), epsilon(0.0),
decay_rate(0.0), decay_steps(0.0), continue_train(false) {}
bool continue_train; /** Continue training with previous tensors for adam */

_OptParam() :
learning_rate(0.0),
beta1(0.0),
beta2(0.0),
epsilon(0.0),
decay_rate(0.0),
decay_steps(0.0),
continue_train(false) {}
} OptParam;

class Optimizer {
Expand Down
2 changes: 0 additions & 2 deletions nntrainer/include/pooling2d_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ class Pooling2DLayer : public Layer {
*/
int setProperty(std::vector<std::string> values);


/* TO DO : support keras type of padding */
enum class PaddingType {
full = 0,
Expand All @@ -143,7 +142,6 @@ class Pooling2DLayer : public Layer {
* @retval Tensor outoput tensor
*/
Tensor pooling2d(unsigned int batch, Tensor in, int &status);

};

} // namespace nntrainer
Expand Down
5 changes: 2 additions & 3 deletions nntrainer/src/fc_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,12 +192,11 @@ Tensor FullyConnectedLayer::backwarding(Tensor derivative, int iteration) {

case COST_ENTROPY:
throw std::runtime_error(
"Error: Cross Entropy not supported without softmax or sigmoid.");
"Error: Cross Entropy not supported without softmax or sigmoid.");
case COST_UNKNOWN:
/** Intended */
default:
throw std::runtime_error(
"Error: unknown cost.");
throw std::runtime_error("Error: unknown cost.");
}
}

Expand Down
7 changes: 3 additions & 4 deletions nntrainer/src/layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,16 +107,15 @@ Tensor Layer::initializeWeight(TensorDim w_dim, WeightIniType init_type,
w.setRandNormal(0, sqrt(2.0 / (dim.height())));
break;
case WEIGHT_LECUN_UNIFORM:
w.setRandUniform(-1.0 * sqrt(1.0 / dim.height()),
sqrt(1.0 / dim.height()));
w.setRandUniform(-1.0 * sqrt(1.0 / dim.height()), sqrt(1.0 / dim.height()));
break;
case WEIGHT_XAVIER_UNIFORM:
w.setRandUniform(-1.0 * sqrt(6.0 / (dim.height() + dim.width())),
sqrt(6.0 / (dim.height() + dim.width())));
sqrt(6.0 / (dim.height() + dim.width())));
break;
case WEIGHT_HE_UNIFORM:
w.setRandUniform(-1.0 * sqrt(6.0 / (dim.height())),
sqrt(6.0 / (dim.height())));
sqrt(6.0 / (dim.height())));
break;
default:
break;
Expand Down
18 changes: 10 additions & 8 deletions nntrainer/src/loss_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
*
*/

#include <loss_layer.h>
#include <layer.h>
#include <lazy_tensor.h>
#include <loss_layer.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
Expand Down Expand Up @@ -67,12 +67,15 @@ Tensor LossLayer::forwarding(Tensor output, Tensor label, int &status) {
// @note: the output should be logit before applying sigmoid
// log(1 + exp(-abs(y))) + max(y, 0)
Tensor mid_term = y.apply(static_cast<float (*)(float)>(&std::fabs))
.multiply(-1.0).apply(static_cast<float (*)(float)>(&std::exp))
.add(1.0).apply(logFloat);
.multiply(-1.0)
.apply(static_cast<float (*)(float)>(&std::exp))
.add(1.0)
.apply(logFloat);
mid_term = mid_term.add(mid_term.apply(relu));

// loss = y * y2 - (log(1 + exp(-abs(y))) + max(y, 0))
l = y2.chain().multiply_i(y)
l = y2.chain()
.multiply_i(y)
.add_i(mid_term)
.multiply_i(-1.0 / y2.getWidth())
.run()
Expand Down Expand Up @@ -106,7 +109,7 @@ Tensor LossLayer::forwarding(Tensor output, Tensor label, int &status) {
return y;
}

void LossLayer::updateLoss(const Tensor& l) {
void LossLayer::updateLoss(const Tensor &l) {
float loss_sum = 0.0;
const float *data = l.getData();

Expand All @@ -117,8 +120,7 @@ void LossLayer::updateLoss(const Tensor& l) {
}

void LossLayer::copy(std::shared_ptr<Layer> l) {
std::shared_ptr<LossLayer> from =
std::static_pointer_cast<LossLayer>(l);
std::shared_ptr<LossLayer> from = std::static_pointer_cast<LossLayer>(l);
this->last_layer = from->last_layer;
this->input.copy(from->input);
this->cost = from->cost;
Expand All @@ -144,7 +146,7 @@ Tensor LossLayer::backwarding(Tensor derivative, int iteration) {
break;
case COST_ENTROPY:
throw std::runtime_error(
"Error: Cross Entropy not supported without softmax or sigmoid.");
"Error: Cross Entropy not supported without softmax or sigmoid.");
case COST_UNKNOWN:
/** intended */
default:
Expand Down
24 changes: 11 additions & 13 deletions nntrainer/src/neuralnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ std::vector<std::string> parseLayerName(std::string ll) {

NeuralNetwork::NeuralNetwork() : NeuralNetwork("") {}

NeuralNetwork::NeuralNetwork(std::string config) : batch_size(0),
NeuralNetwork::NeuralNetwork(std::string config) :
batch_size(0),
learning_rate(0.0),
decay_rate(0.0),
decay_steps(0.0),
Expand Down Expand Up @@ -294,26 +295,23 @@ int NeuralNetwork::init() {
ini, (layers_name[i] + ":kernel_size").c_str(), unknown),
(int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(
size, Layer::PropertyType::kernel_size);
status = conv2d_layer->setSize(size, Layer::PropertyType::kernel_size);
NN_INI_RETURN_STATUS();

status = getValues(
CONV2D_DIM,
iniparser_getstring(ini, (layers_name[i] + ":stride").c_str(), unknown),
(int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(
size, Layer::PropertyType::stride);
status = conv2d_layer->setSize(size, Layer::PropertyType::stride);
NN_INI_RETURN_STATUS();

status = getValues(CONV2D_DIM,
iniparser_getstring(
ini, (layers_name[i] + ":padding").c_str(), unknown),
(int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(
size, Layer::PropertyType::padding);
status = conv2d_layer->setSize(size, Layer::PropertyType::padding);
NN_INI_RETURN_STATUS();

status = conv2d_layer->setFilter(
Expand Down Expand Up @@ -586,7 +584,7 @@ int NeuralNetwork::init(std::shared_ptr<Optimizer> optimizer,
status = fc_layer->setOptimizer(opt);
NN_RETURN_STATUS();

} break;
} break;
case LAYER_BN:
layers[i]->setInputDimension(previous_dim);
status = layers[i]->initialize(last);
Expand Down Expand Up @@ -643,12 +641,12 @@ Tensor NeuralNetwork::forwarding(Tensor input, Tensor output, int &status) {
Tensor X = input;
Tensor Y2 = output;

X = forwarding (input, status);
X = forwarding(input, status);
if (status != ML_ERROR_NONE)
return X;

X = std::static_pointer_cast<LossLayer>(layers[layers.size() - 1])
->forwarding(X, Y2, status);
->forwarding(X, Y2, status);
return X;
}

Expand All @@ -674,7 +672,7 @@ int NeuralNetwork::backwarding(Tensor input, Tensor expected_output,

float NeuralNetwork::getLoss() {
loss = 0.0;
for (unsigned int i=0; i < layers.size(); i++) {
for (unsigned int i = 0; i < layers.size(); i++) {
loss += layers[i]->getLoss();
}

Expand Down Expand Up @@ -853,7 +851,7 @@ int NeuralNetwork::train_run() {
backwarding(nntrainer::Tensor(in), nntrainer::Tensor(label), iter++);
if (status != ML_ERROR_NONE) {
data_buffer->clear(nntrainer::BUF_TRAIN);
ml_loge ("Error: training error in #%d/%d.", i+1, epoch);
ml_loge("Error: training error in #%d/%d.", i + 1, epoch);
return status;
}
std::cout << "#" << i + 1 << "/" << epoch;
Expand Down Expand Up @@ -889,7 +887,7 @@ int NeuralNetwork::train_run() {
nntrainer::Tensor Y2 = nntrainer::Tensor({label[i]});
nntrainer::Tensor Y = forwarding(X, Y2, status);
if (status != ML_ERROR_NONE) {
ml_loge ("Error: forwarding the network resulted in error.");
ml_loge("Error: forwarding the network resulted in error.");
return status;
}

Expand Down
Loading

0 comments on commit a166b11

Please sign in to comment.