Skip to content

Commit

Permalink
Merge branch 'main' into check
Browse files Browse the repository at this point in the history
  • Loading branch information
pallaviNNT authored Jul 2, 2024
2 parents 28e9ba7 + b9c27e9 commit ae375b0
Show file tree
Hide file tree
Showing 39 changed files with 2,708 additions and 1,556 deletions.
7 changes: 4 additions & 3 deletions .github/workflows/cpp_linter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,17 @@ jobs:
echo "Install mandatory dev packages to avoid false-positive reports from cpp-linter"
sudo apt-get update
sudo apt-get install libstdc++-*-dev
- uses: cpp-linter/cpp-linter-action@v2.8.0
- uses: cpp-linter/cpp-linter-action@v2.9.0
id: linter
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
style: ''
version: 16
style: file
version: 14
lines-changed-only: true
file-annotations: false
step-summary: true
format-review: true

- name: failing fast
if: steps.linter.outputs.clang-format-checks-failed > 0
Expand Down
9 changes: 9 additions & 0 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ enum LayerType {
LAYER_LOSS_CONSTANT_DERIVATIVE, /**< Synthetic loss layer to feed constant
derivative */
LAYER_RMSNORM = ML_TRAIN_LAYER_TYPE_RMSNORM, /**<RMS NORM Layer */
LAYER_UPSAMPLE2D, /**< Upsample 2D Layer type */
LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN /**< Unknown */
};

Expand Down Expand Up @@ -557,6 +558,14 @@ Identity(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_IDENTITY, properties);
}

/**
* @brief Helper function to create Upsample2d layer
*/
inline std::unique_ptr<Layer>
Upsample2D(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_UPSAMPLE2D, properties);
}

/**
* @brief Helper function to create activation layer
*/
Expand Down
11 changes: 7 additions & 4 deletions nntrainer/app_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
#include <rnncell.h>
#include <split_layer.h>
#include <time_dist.h>
#include <upsample2d_layer.h>
#include <zoneout_lstmcell.h>

#ifdef ENABLE_TFLITE_BACKBONE
Expand Down Expand Up @@ -154,7 +155,7 @@ std::vector<std::string> getPluginPaths() {
* where you would like to look for the layers, while NNTRAINER_CONF_PATH is a
* (buildtime hardcoded @a file path) to locate configuration file *.ini file
*/
/*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than serise of path
/*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than series of path
* like PATH environment variable. this could be improved but for now, it is
* enough
*/
Expand Down Expand Up @@ -211,7 +212,7 @@ const std::string getFullPath(const std::string &path,
std::mutex factory_mutex;

/**
* @brief finialize global context
* @brief finalize global context
*
*/
static void fini_global_context_nntrainer(void) __attribute__((destructor));
Expand All @@ -221,7 +222,7 @@ static void fini_global_context_nntrainer(void) {}
std::once_flag global_app_context_init_flag;

static void add_default_object(AppContext &ac) {
/// @note all layers should be added to the app_context to gaurantee that
/// @note all layers should be added to the app_context to guarantee that
/// createLayer/createOptimizer class is created
using OptType = ml::train::OptimizerType;
ac.registerFactory(nntrainer::createOptimizer<SGD>, SGD::type, OptType::SGD);
Expand Down Expand Up @@ -306,6 +307,8 @@ static void add_default_object(AppContext &ac) {
LayerType::LAYER_POSITIONAL_ENCODING);
ac.registerFactory(nntrainer::createLayer<IdentityLayer>, IdentityLayer::type,
LayerType::LAYER_IDENTITY);
ac.registerFactory(nntrainer::createLayer<Upsample2dLayer>,
Upsample2dLayer::type, LayerType::LAYER_UPSAMPLE2D);

#ifdef ENABLE_NNSTREAMER_BACKBONE
ac.registerFactory(nntrainer::createLayer<NNStreamerLayer>,
Expand All @@ -319,7 +322,7 @@ static void add_default_object(AppContext &ac) {
ac.registerFactory(nntrainer::createLayer<CentroidKNN>, CentroidKNN::type,
LayerType::LAYER_CENTROID_KNN);

/** proprocess layers */
/** preprocess layers */
ac.registerFactory(nntrainer::createLayer<PreprocessFlipLayer>,
PreprocessFlipLayer::type,
LayerType::LAYER_PREPROCESS_FLIP);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/data_iteration.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ class Sample {
* @brief Construct a new Sample object
* @note the batch dimension will be ignored to make a single sample
*
* @param iter iteration obejcts
* @param iter iteration objects
* @param batch nth batch to create the sample
*/
Sample(const Iteration &iter, unsigned int batch);
Expand Down
6 changes: 3 additions & 3 deletions nntrainer/dataset/data_producer.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,18 +109,18 @@ class DataProducer {
}

/**
* @brief this function helps exporting the dataproducer in a predefined
* @brief this function helps exporting the data producer in a predefined
* format, while workarounding issue caused by templated function type eraser
*
* @param exporter exporter that conatins exporting logic
* @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
virtual void exportTo(Exporter &exporter,
const ml::train::ExportMethods &method) const {}

/**
* @brief denote if given producer is thread safe and can be parallelized.
* @note if size() == SIZE_UNDEFIEND, thread safe shall be false
* @note if size() == SIZE_UNDEFINED, thread safe shall be false
*
* @return bool true if thread safe.
*/
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/databuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class DataBuffer : public ml::train::Dataset {
* @brief this function helps exporting the dataset in a predefined format,
* while workarounding issue caused by templated function type eraser
*
* @param exporter exporter that conatins exporting logic
* @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
void exportTo(Exporter &exporter,
Expand Down
6 changes: 2 additions & 4 deletions nntrainer/dataset/dir_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,7 @@ static void readImage(const std::string path, float *input, uint width,
namespace nntrainer {

DirDataProducer::DirDataProducer() :
dir_data_props(new Props()),
num_class(0),
num_data_total(0) {}
dir_data_props(new Props()), num_class(0), num_data_total(0) {}

DirDataProducer::DirDataProducer(const std::string &dir_path) :
dir_data_props(new Props(props::DirPath(dir_path))),
Expand Down Expand Up @@ -140,7 +138,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
auto sz = size(input_dims, label_dims);

NNTR_THROW_IF(sz == 0, std::invalid_argument)
<< "size is zero, dataproducer does not provide anything";
<< "size is zero, data producer does not provide anything";

return [sz, input_dims, this](unsigned int idx, std::vector<Tensor> &inputs,
std::vector<Tensor> &labels) {
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/random_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ RandomDataOneHotProducer::finalize(const std::vector<TensorDim> &input_dims,

/// @todo move this to higher order component
NNTR_THROW_IF(size(input_dims, label_dims) == 0, std::invalid_argument)
<< "size is zero, dataproducer does not provide anything";
<< "size is zero, data producer does not provide anything";

/** prepare states for the generator */
std::vector<std::uniform_int_distribution<unsigned int>> label_chooser_;
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/cl_layers/swiglu_cl.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class SwiGLULayerCl final : public Layer {
/**
* @copydoc bool supportBackwarding() const
*/
bool supportBackwarding() const override { return true; };
bool supportBackwarding() const override { return false; };

/**
* @copydoc Layer::exportTo(Exporter &exporter, ExportMethods method)
Expand Down
35 changes: 31 additions & 4 deletions nntrainer/layers/common_properties.h
Original file line number Diff line number Diff line change
Expand Up @@ -869,10 +869,9 @@ struct ActivationTypeInfo {
Enum::ACT_GELU, Enum::ACT_QUICK_GELU, Enum::ACT_NONE,
Enum::ACT_UNKNOWN};

static constexpr const char *EnumStr[] = {"tanh", "sigmoid", "relu",
"softmax", "leaky_relu", "swish",
"gelu", "quick_gelu", "none",
"unknown"};
static constexpr const char *EnumStr[] = {
"tanh", "sigmoid", "relu", "softmax", "leaky_relu",
"swish", "gelu", "quick_gelu", "none", "unknown"};
};

/**
Expand Down Expand Up @@ -1068,6 +1067,34 @@ class WeightRegularizer final : public BasicRegularizer {
static constexpr const char *key = "weight_regularizer";
};

/**
* @brief Enumeration of upsample type
* @todo Support torch and keras supported modes like bicubic
*/
struct UpsampleModeInfo {
/**
* @brief Upsampling operation type class
*/
enum class Interpolation { nearest, bilinear };

using Enum = Interpolation;

static constexpr std::initializer_list<Interpolation> EnumList = {
Interpolation::nearest, Interpolation::bilinear};

static constexpr const char *EnumStr[] = {"nearest", "bilinear"};
};

/**
* @brief Upsample Type Enumeration Information
*
*/
class UpsampleMode final : public EnumProperty<UpsampleModeInfo> {
public:
using prop_tag = enum_class_prop_tag;
static constexpr const char *key = "upsample";
};

/**
* @brief Enumeration of pooling type
*/
Expand Down
4 changes: 4 additions & 0 deletions nntrainer/layers/layer_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,10 @@ std::string RunLayerContext::getKernelName(LayerKernel layerKernel) {
return "rmsnorm_cl";
case LayerKernel::RMSNORM_FP16:
return "rmsnorm_cl_fp16";
case LayerKernel::SSCAL:
return "sscal_cl";
case LayerKernel::SSCAL_FP16:
return "sscal_cl_fp16";
default:
return "";
}
Expand Down
6 changes: 4 additions & 2 deletions nntrainer/layers/layer_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -840,8 +840,10 @@ class RunLayerContext {
ADD_FP16 = 1 << 7, /**< placeholder for kernel name */
SWIGLU = 1 << 8, /**< placeholder for kernel name */
SWIGLU_FP16 = 1 << 9, /**< placeholder for kernel name */
RMSNORM = 1 << 10,
RMSNORM_FP16 = 1 << 11
SSCAL = 1 << 10, /**< placeholder for kernel name */
SSCAL_FP16 = 1 << 11, /**< placeholder for kernel name */
RMSNORM = 1 << 12,
RMSNORM_FP16 = 1 << 13
};

/**
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/layers/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ layer_sources = [
'reshape_layer.cpp',
'reduce_mean_layer.cpp',
'positional_encoding_layer.cpp',
'identity_layer.cpp'
'identity_layer.cpp',
'upsample2d_layer.cpp'
]

layer_headers = [
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/preprocess_l2norm_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class PreprocessL2NormLayer : public Layer {
public:
/**
* @brief Construct a new L2norm Layer object
* that normlizes given feature with l2norm
* that normalizes given feature with l2norm
*/
PreprocessL2NormLayer() : Layer() {}

Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/split_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void SplitLayer::finalize(InitLayerContext &context) {

/**
* The split is only done along the split_dimension dimension.
* (Assumes input data is continous)
* (Assumes input data is continuous)
* For example, consider input dimension [b,c,h,w], split_number = n
* 1. axis = 1, output_dim = [b,c//n,h,w], num_outputs = n
* 2. axis = 2, output_dim = [b,c,h//n,w], num_outputs = n
Expand Down Expand Up @@ -75,7 +75,7 @@ void SplitLayer::finalize(InitLayerContext &context) {
* to facilitate easier processing.
*
* The helper shape consolidates all the dimensions before the split_dimension
* together and all the dimensions after the split_dimension to faciliate
* together and all the dimensions after the split_dimension to facilitate
* easier splitting of the data.
*/
leading_helper_dim = 1;
Expand Down
Loading

0 comments on commit ae375b0

Please sign in to comment.