diff --git a/nntrainer/layers/conv2d_layer.cpp b/nntrainer/layers/conv2d_layer.cpp index ada8752f7c..83013f04ee 100644 --- a/nntrainer/layers/conv2d_layer.cpp +++ b/nntrainer/layers/conv2d_layer.cpp @@ -85,7 +85,7 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim, int h_stride_end = im_eff_height - eff_k_height - pt; int w_stride_end = im_eff_width - eff_k_width - pl; - auto apply_data = [&](T * val) { + auto apply_data = [&](T *val) { unsigned col_w = 0; for (int hs = -pt; hs <= h_stride_end; hs += hstride) { for (int ws = -pl; ws <= w_stride_end; ws += wstride) { @@ -218,7 +218,7 @@ static void im2col(const Tensor &in, const TensorDim &kdim, TensorDim({out_height * out_width, in.channel() * k_height * k_width}, in.getTensorType())); - auto apply_data = [&](T * out_data) { + auto apply_data = [&](T *out_data) { int h_stride_end = height - eff_k_height - pt; int w_stride_end = width - eff_k_width - pl; diff --git a/nntrainer/layers/fc_layer.cpp b/nntrainer/layers/fc_layer.cpp index 7fbcc4c467..57a8809d1a 100644 --- a/nntrainer/layers/fc_layer.cpp +++ b/nntrainer/layers/fc_layer.cpp @@ -39,8 +39,7 @@ static constexpr size_t SINGLE_INOUT_IDX = 0; enum FCParams { weight, bias }; FullyConnectedLayer::FullyConnectedLayer() : - LayerImpl(), - fc_props(props::Unit()) { + LayerImpl(), fc_props(props::Unit()) { weight_idx.fill(std::numeric_limits::max()); } @@ -273,7 +272,6 @@ void FullyConnectedLayer::calcGradient(RunLayerContext &context) { } input_.dot_deriv_wrt_2(djdw_, derivative_, false, false, !wg_first_access); } - } } /* namespace nntrainer */ diff --git a/nntrainer/layers/pooling2d_layer.cpp b/nntrainer/layers/pooling2d_layer.cpp index 1638da9efe..bbd7bb6245 100644 --- a/nntrainer/layers/pooling2d_layer.cpp +++ b/nntrainer/layers/pooling2d_layer.cpp @@ -188,7 +188,7 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) { unsigned int out_map_size = deriv.height() * deriv.width(); unsigned int in_map_size = height * width; - auto apply_max = [&](T * result_data) { + auto apply_max = [&](T *result_data) { const int *iter = pool_helper.getData(); const T *deriv_data = deriv.getData(); for (unsigned int b = 0; b < batch; ++b) { @@ -207,7 +207,7 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) { } }; - auto apply_average = [&](T * result_data) { + auto apply_average = [&](T *result_data) { int height_stride_end = height - p_height + pt; int width_stride_end = width - p_width + pl; const int *iter = pool_helper.getData(); @@ -239,7 +239,7 @@ void Pooling2DLayer::calcDerivative(RunLayerContext &context) { } }; - auto apply_global_max = [&](T * result_data) { + auto apply_global_max = [&](T *result_data) { const T *deriv_data = deriv.getData(); for (unsigned int b = 0; b < batch; b++) { for (unsigned int c = 0; c < channel; c++) { @@ -372,8 +372,9 @@ void Pooling2DLayer::pooling2d(Tensor &in, bool training, Tensor &output, return max_val; }; - auto pool_fn_global_max = [&, this ]( - const T *in_data, int channel_idx, int start_h, int start_w) { + auto pool_fn_global_max = [&, this](const T *in_data, + int channel_idx, int start_h, + int start_w) { int end_h = start_h + patch_height; int end_w = start_w + patch_width; diff --git a/nntrainer/tensor/tensor.cpp b/nntrainer/tensor/tensor.cpp index 7fa61cf73b..9065192242 100644 --- a/nntrainer/tensor/tensor.cpp +++ b/nntrainer/tensor/tensor.cpp @@ -127,8 +127,7 @@ class SrcSharedTensor { SrcSharedTensor() : src(nullptr), off(0) {} SrcSharedTensor(const Tensor *tensor, size_t offset) : - src(tensor), - off(offset) {} + src(tensor), off(offset) {} /** * @brief Get the allocated src tensor