Skip to content

Commit

Permalink
Connect layers implementation with graph model (#130)
Browse files Browse the repository at this point in the history
Closes #33

---------

Co-authored-by: AndreySorokin7 <andrey_sorokin_nn@mail,ru>
  • Loading branch information
AndreySorokin7 and AndreySorokin7 authored May 7, 2024
1 parent c9d9033 commit 885a556
Show file tree
Hide file tree
Showing 12 changed files with 171 additions and 146 deletions.
84 changes: 25 additions & 59 deletions include/graph/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,49 +5,18 @@
#include <string>
#include <vector>

namespace itlab_2023 {

enum LayerType {
kInput,
kPooling,
kNormalization,
kDropout,
kElementWise,
kConvolution,
kFullyConnected,
kOutput
};

class LayerExample {
private:
int id_;
std::string name_;
LayerType type_;
std::string version_;
int numInputs_;
int numNeurons_;
std::vector<int> primer_;
#include "layers/Layer.hpp"

public:
LayerExample(LayerType type1) : type_(type1) {}
LayerType getType() { return type_; }
int getNumInputs() const { return numInputs_; }
int getNumNeurons() const { return numNeurons_; }
int checkID() const { return id_; }
void giveID(int id1) { id_ = id1; }
void In(const std::vector<int>& a) { primer_ = a; }
void Work() {}
std::vector<int> Out() { return primer_; }
};
namespace itlab_2023 {

class Graph {
int BiggestSize_;
int V_;
std::vector<LayerExample> layers_;
std::vector<Layer*> layers_;
std::vector<int> arrayV_;
std::vector<int> arrayE_;
std::vector<int> startvec_;
std::vector<int>* outvector_;
Tensor inten_;
Tensor* outten_;
int start_;
int end_;

Expand All @@ -59,35 +28,34 @@ class Graph {
arrayV_.push_back(0);
V_ = 0;
}
void setInput(LayerExample& lay, const std::vector<int>& vec) {
lay.giveID(0);
layers_.push_back(lay);
void setInput(Layer& lay, Tensor& vec) {
lay.setID(0);
layers_.push_back(&lay);
arrayV_.push_back(0);
startvec_ = vec;
start_ = lay.checkID();
inten_ = vec;
start_ = lay.getID();
V_++;
}
void makeConnection(const LayerExample& layPrev, LayerExample& layNext) {
layNext.giveID(V_);
layers_.push_back(layNext);
void makeConnection(const Layer& layPrev, Layer& layNext) {
layNext.setID(V_);
layers_.push_back(&layNext);
arrayV_[V_] = arrayV_[V_ - 1];
arrayV_.push_back(static_cast<int>(arrayE_.size()));
if (layPrev.checkID() == layNext.checkID()) {
if (layPrev.getID() == layNext.getID()) {
throw std::out_of_range("i=j cant add edge");
}
for (int ind = 1; ind < static_cast<int>(arrayV_.size()) -
static_cast<int>(layPrev.checkID()) - 1;
static_cast<int>(layPrev.getID()) - 1;
ind++)
arrayV_[layPrev.checkID() + ind]++;
arrayE_.insert(arrayE_.begin() + arrayV_[layPrev.checkID()],
layNext.checkID());
arrayV_[layPrev.getID() + ind]++;
arrayE_.insert(arrayE_.begin() + arrayV_[layPrev.getID()], layNext.getID());
V_++;
arrayV_[V_] = static_cast<int>(arrayE_.size());
}
bool areLayerNext(const LayerExample& layPrev, const LayerExample& layNext) {
for (int i = arrayV_[layPrev.checkID()]; i < arrayV_[layPrev.checkID() + 1];
bool areLayerNext(const Layer& layPrev, const Layer& layNext) {
for (int i = arrayV_[layPrev.getID()]; i < arrayV_[layPrev.getID() + 1];
i++) {
if (arrayE_[i] == layNext.checkID()) {
if (arrayE_[i] == layNext.getID()) {
return true;
}
}
Expand Down Expand Up @@ -122,15 +90,13 @@ class Graph {
}
}
for (int i : traversal) {
layers_[i].In(startvec_);
layers_[i].Work();
startvec_ = layers_[i].Out();
layers_[i]->run(inten_, *outten_);
inten_ = *outten_;
}
outvector_->assign(startvec_.begin(), startvec_.end());
}
void setOutput(const LayerExample& lay, std::vector<int>& vec) {
end_ = lay.checkID();
outvector_ = &vec;
void setOutput(const Layer& lay, Tensor& vec) {
end_ = lay.getID();
outten_ = &vec;
}
};
} // namespace itlab_2023
7 changes: 5 additions & 2 deletions include/layers/ConvLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,18 @@ class ConvolutionalLayer : public Layer {
size_t stride_;
size_t pads_;
size_t dilations_;
Tensor kernel_;

public:
ConvolutionalLayer() = default;
ConvolutionalLayer(size_t step, size_t pads, size_t dilations) {
ConvolutionalLayer(size_t step, size_t pads, size_t dilations,
const Tensor& kernel) {
stride_ = step;
pads_ = pads;
dilations_ = dilations;
kernel_ = kernel;
}
void run(const Tensor& input, Tensor& output, const Tensor& kernel_) const;
void run(const Tensor& input, Tensor& output) override;
};

template <typename ValueType>
Expand Down
11 changes: 9 additions & 2 deletions include/layers/FCLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,18 @@
namespace itlab_2023 {

class FCLayer : public Layer {
private:
Tensor weights_;
Tensor bias_;

public:
FCLayer() = default;
FCLayer(const Tensor& weights, const Tensor& bias) {
weights_ = weights;
bias_ = bias;
}
static std::string get_name() { return "Fully-connected layer"; }
static void run(const Tensor& input, Tensor& output, const Tensor& weights,
const Tensor& bias);
void run(const Tensor& input, Tensor& output) override;
};

template <typename ValueType>
Expand Down
2 changes: 1 addition & 1 deletion include/layers/InputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class InputLayer : public Layer {
mean_ = mean;
std_ = std;
} // layout = kNchw(0), kNhwc(1)
void run(Tensor& input, Tensor& output) const {
void run(const Tensor& input, Tensor& output) override {
switch (input.get_type()) {
case Type::kInt: {
std::vector<int> in = *input.as<int>();
Expand Down
20 changes: 20 additions & 0 deletions include/layers/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,29 @@

namespace itlab_2023 {

enum LayerType {
kInput,
kPooling,
kNormalization,
kDropout,
kElementWise,
kConvolution,
kFullyConnected,
kOutput,
};

class Layer {
public:
Layer() = default;
int getID() const { return id_; }
void setID(int id) { id_ = id; }
LayerType getName() const { return type_; }
void setName(LayerType type) { type_ = type; }
virtual void run(const Tensor& input, Tensor& output) = 0;

private:
int id_;
LayerType type_;
};

template <typename ValueType>
Expand Down
2 changes: 1 addition & 1 deletion include/layers/OutputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class OutputLayer : public Layer {
OutputLayer() = default;
OutputLayer(const std::vector<std::string>& labels) : labels_(labels) {}
static std::string get_name() { return "Output layer"; }
static void run(const Tensor& input, Tensor& output) { output = input; }
void run(const Tensor& input, Tensor& output) override { output = input; }
std::vector<std::string> get_labels() const { return labels_; }
std::pair<std::vector<std::string>, Tensor> top_k(const Tensor& input,
size_t k) const;
Expand Down
3 changes: 1 addition & 2 deletions src/layers/ConvLayer.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
#include "layers/ConvLayer.hpp"
namespace itlab_2023 {

void ConvolutionalLayer::run(const Tensor& input, Tensor& output,
const Tensor& kernel_) const {
void ConvolutionalLayer::run(const Tensor& input, Tensor& output) {
switch (input.get_type()) {
case Type::kInt: {
ConvImpl<int> used_impl(
Expand Down
15 changes: 7 additions & 8 deletions src/layers/FCLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,24 @@

namespace itlab_2023 {

void FCLayer::run(const Tensor& input, Tensor& output, const Tensor& weights,
const Tensor& bias) {
if (input.get_type() != weights.get_type()) {
void FCLayer::run(const Tensor& input, Tensor& output) {
if (input.get_type() != weights_.get_type()) {
throw std::invalid_argument("Input and weights data type aren't same");
}
if (bias.get_type() != weights.get_type()) {
if (bias_.get_type() != weights_.get_type()) {
throw std::invalid_argument("Bias and weights data type aren't same");
}
switch (input.get_type()) {
case Type::kInt: {
FCLayerImpl<int> used_impl(*weights.as<int>(), weights.get_shape(),
*bias.as<int>());
FCLayerImpl<int> used_impl(*weights_.as<int>(), weights_.get_shape(),
*bias_.as<int>());
output = make_tensor(used_impl.run(*input.as<int>()),
used_impl.get_output_shape());
break;
}
case Type::kFloat: {
FCLayerImpl<float> used_impl(*weights.as<float>(), weights.get_shape(),
*bias.as<float>());
FCLayerImpl<float> used_impl(*weights_.as<float>(), weights_.get_shape(),
*bias_.as<float>());
output = make_tensor(used_impl.run(*input.as<float>()),
used_impl.get_output_shape());
break;
Expand Down
Loading

0 comments on commit 885a556

Please sign in to comment.