Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Connect layers implementation with graph model #130

Merged
merged 10 commits into from
May 7, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 15 additions & 48 deletions include/graph/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,49 +5,18 @@
#include <string>
#include <vector>

namespace itlab_2023 {

enum LayerType {
kInput,
kPooling,
kNormalization,
kDropout,
kElementWise,
kConvolution,
kFullyConnected,
kOutput
};

class LayerExample {
private:
int id_;
std::string name_;
LayerType type_;
std::string version_;
int numInputs_;
int numNeurons_;
std::vector<int> primer_;
#include "layers/Layer.hpp"

public:
LayerExample(LayerType type1) : type_(type1) {}
LayerType getType() { return type_; }
int getNumInputs() const { return numInputs_; }
int getNumNeurons() const { return numNeurons_; }
int checkID() const { return id_; }
void giveID(int id1) { id_ = id1; }
void In(const std::vector<int>& a) { primer_ = a; }
void Work() {}
std::vector<int> Out() { return primer_; }
};
namespace itlab_2023 {

class Graph {
int BiggestSize_;
int V_;
std::vector<LayerExample> layers_;
std::vector<Layer*> layers_;
std::vector<int> arrayV_;
std::vector<int> arrayE_;
std::vector<int> startvec_;
std::vector<int>* outvector_;
Tensor startten_;
Tensor* outten_;
aobolensk marked this conversation as resolved.
Show resolved Hide resolved
int start_;
int end_;

Expand All @@ -59,17 +28,17 @@ class Graph {
arrayV_.push_back(0);
V_ = 0;
}
void setInput(LayerExample& lay, const std::vector<int>& vec) {
void setInput(Layer& lay, Tensor& vec) {
allnes marked this conversation as resolved.
Show resolved Hide resolved
lay.giveID(0);
layers_.push_back(lay);
layers_.push_back(&lay);
arrayV_.push_back(0);
startvec_ = vec;
startten_ = vec;
start_ = lay.checkID();
V_++;
}
void makeConnection(const LayerExample& layPrev, LayerExample& layNext) {
void makeConnection(const Layer& layPrev, Layer& layNext) {
layNext.giveID(V_);
layers_.push_back(layNext);
layers_.push_back(&layNext);
arrayV_[V_] = arrayV_[V_ - 1];
arrayV_.push_back(static_cast<int>(arrayE_.size()));
if (layPrev.checkID() == layNext.checkID()) {
Expand All @@ -84,7 +53,7 @@ class Graph {
V_++;
arrayV_[V_] = static_cast<int>(arrayE_.size());
}
bool areLayerNext(const LayerExample& layPrev, const LayerExample& layNext) {
bool areLayerNext(const Layer& layPrev, const Layer& layNext) {
for (int i = arrayV_[layPrev.checkID()]; i < arrayV_[layPrev.checkID() + 1];
i++) {
if (arrayE_[i] == layNext.checkID()) {
Expand Down Expand Up @@ -122,15 +91,13 @@ class Graph {
}
}
for (int i : traversal) {
layers_[i].In(startvec_);
layers_[i].Work();
startvec_ = layers_[i].Out();
layers_[i]->run(startten_, *outten_);
startten_ = *outten_;
}
outvector_->assign(startvec_.begin(), startvec_.end());
}
void setOutput(const LayerExample& lay, std::vector<int>& vec) {
void setOutput(const Layer& lay, Tensor& vec) {
end_ = lay.checkID();
outvector_ = &vec;
outten_ = &vec;
}
};
} // namespace itlab_2023
7 changes: 5 additions & 2 deletions include/layers/ConvLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,18 @@ class ConvolutionalLayer : public Layer {
size_t stride_;
size_t pads_;
size_t dilations_;
Tensor kernel_;

public:
ConvolutionalLayer() = default;
ConvolutionalLayer(size_t step, size_t pads, size_t dilations) {
ConvolutionalLayer(size_t step, size_t pads, size_t dilations,
const Tensor& kernel) {
stride_ = step;
pads_ = pads;
dilations_ = dilations;
kernel_ = kernel;
}
void run(const Tensor& input, Tensor& output, const Tensor& kernel_) const;
void run(const Tensor& input, Tensor& output) override;
};

template <typename ValueType>
Expand Down
11 changes: 9 additions & 2 deletions include/layers/FCLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,18 @@
namespace itlab_2023 {

class FCLayer : public Layer {
private:
Tensor weights_;
AndreySorokin7 marked this conversation as resolved.
Show resolved Hide resolved
AndreySorokin7 marked this conversation as resolved.
Show resolved Hide resolved
Tensor bias_;
AndreySorokin7 marked this conversation as resolved.
Show resolved Hide resolved
AndreySorokin7 marked this conversation as resolved.
Show resolved Hide resolved

public:
FCLayer() = default;
FCLayer(const Tensor& weights, const Tensor& bias) {
weights_ = weights;
bias_ = bias;
}
static std::string get_name() { return "Fully-connected layer"; }
static void run(const Tensor& input, Tensor& output, const Tensor& weights,
const Tensor& bias);
void run(const Tensor& input, Tensor& output) override;
};

template <typename ValueType>
Expand Down
2 changes: 1 addition & 1 deletion include/layers/InputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class InputLayer : public Layer {
mean_ = mean;
std_ = std;
} // layout = kNchw(0), kNhwc(1)
void run(Tensor& input, Tensor& output) const {
void run(const Tensor& input, Tensor& output) override {
switch (input.get_type()) {
case Type::kInt: {
std::vector<int> in = *input.as<int>();
Expand Down
20 changes: 20 additions & 0 deletions include/layers/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,29 @@

namespace itlab_2023 {

enum LayerType {
kInput,
kPooling,
kNormalization,
kDropout,
kElementWise,
kConvolution,
kFullyConnected,
kOutput
aobolensk marked this conversation as resolved.
Show resolved Hide resolved
};

class Layer {
public:
Layer() = default;
int checkID() const { return id_; }
void giveID(int id1) { id_ = id1; }
allnes marked this conversation as resolved.
Show resolved Hide resolved
LayerType checkType() const { return type_; }
void giveType(LayerType type1) { type_ = type1; }
aobolensk marked this conversation as resolved.
Show resolved Hide resolved
virtual void run(const Tensor& input, Tensor& output) = 0;
aobolensk marked this conversation as resolved.
Show resolved Hide resolved

private:
AndreySorokin7 marked this conversation as resolved.
Show resolved Hide resolved
int id_;
LayerType type_;
};

template <typename ValueType>
Expand Down
2 changes: 1 addition & 1 deletion include/layers/OutputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
OutputLayer() = default;
OutputLayer(const std::vector<std::string>& labels) : labels_(labels) {}
static std::string get_name() { return "Output layer"; }
allnes marked this conversation as resolved.
Show resolved Hide resolved
static void run(const Tensor& input, Tensor& output) { output = input; }
void run(const Tensor& input, Tensor& output) override { output = input; }

Check warning on line 14 in include/layers/OutputLayer.hpp

View check run for this annotation

Codecov / codecov/patch

include/layers/OutputLayer.hpp#L14

Added line #L14 was not covered by tests
std::vector<std::string> get_labels() const { return labels_; }
std::pair<std::vector<std::string>, Tensor> top_k(const Tensor& input,
size_t k) const;
Expand Down
3 changes: 1 addition & 2 deletions src/layers/ConvLayer.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
#include "layers/ConvLayer.hpp"
namespace itlab_2023 {

void ConvolutionalLayer::run(const Tensor& input, Tensor& output,
const Tensor& kernel_) const {
void ConvolutionalLayer::run(const Tensor& input, Tensor& output) {
switch (input.get_type()) {
case Type::kInt: {
ConvImpl<int> used_impl(
Expand Down
15 changes: 7 additions & 8 deletions src/layers/FCLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,24 @@

namespace itlab_2023 {

void FCLayer::run(const Tensor& input, Tensor& output, const Tensor& weights,
const Tensor& bias) {
if (input.get_type() != weights.get_type()) {
void FCLayer::run(const Tensor& input, Tensor& output) {
if (input.get_type() != weights_.get_type()) {
throw std::invalid_argument("Input and weights data type aren't same");
}
if (bias.get_type() != weights.get_type()) {
if (bias_.get_type() != weights_.get_type()) {
throw std::invalid_argument("Bias and weights data type aren't same");
}
switch (input.get_type()) {
case Type::kInt: {
FCLayerImpl<int> used_impl(*weights.as<int>(), weights.get_shape(),
*bias.as<int>());
FCLayerImpl<int> used_impl(*weights_.as<int>(), weights_.get_shape(),
*bias_.as<int>());
output = make_tensor(used_impl.run(*input.as<int>()),
used_impl.get_output_shape());
break;
}
case Type::kFloat: {
FCLayerImpl<float> used_impl(*weights.as<float>(), weights.get_shape(),
*bias.as<float>());
FCLayerImpl<float> used_impl(*weights_.as<float>(), weights_.get_shape(),
*bias_.as<float>());
output = make_tensor(used_impl.run(*input.as<float>()),
used_impl.get_output_shape());
break;
Expand Down
104 changes: 61 additions & 43 deletions test/graph/test_graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,81 +2,99 @@

#include "graph/graph.hpp"
#include "gtest/gtest.h"
#include "layers/FCLayer.hpp"

using namespace itlab_2023;

TEST(graph, check_connection) {
const std::vector<int> vec = {1, 2, 3, 4};
std::vector<int> vec_out(4);
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
const std::vector<float> vec2 = {9.0F, 6.4F, 17.5F};
Tensor weights = make_tensor<float>(vec1, {3, 2});
Tensor output;
Shape wshape({3, 2});
Tensor bias = make_tensor<float>({0.5F, 0.5F, 1.0F});
Graph graph(5);
LayerExample a1(kInput);
LayerExample a2(kFullyConnected);
LayerExample a3(kFullyConnected);
LayerExample a4(kDropout);
graph.setInput(a1, vec);
FCLayer a1;
FCLayer a2;
FCLayer a3;
graph.setInput(a1, bias);
graph.makeConnection(a1, a2);
graph.makeConnection(a2, a3);
graph.makeConnection(a1, a4);
graph.setOutput(a4, vec_out);
ASSERT_EQ(graph.areLayerNext(a1, a2), 1);
}
TEST(graph, check_connection1) {
const std::vector<int> vec = {1, 2, 3, 4};
std::vector<int> vec_out(4);
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
const std::vector<float> vec2 = {9.0F, 6.4F, 17.5F};
Tensor weights = make_tensor<float>(vec1, {3, 2});
Tensor output;
Shape wshape({3, 2});
Tensor bias = make_tensor<float>({0.5F, 0.5F, 1.0F});
Graph graph(5);
LayerExample a1(kInput);
LayerExample a2(kFullyConnected);
LayerExample a3(kFullyConnected);
LayerExample a4(kDropout);
graph.setInput(a1, vec);
FCLayer a1;
FCLayer a2;
FCLayer a3;
FCLayer a4;
graph.setInput(a1, bias);
graph.makeConnection(a1, a2);
graph.makeConnection(a2, a3);
graph.makeConnection(a1, a4);
graph.setOutput(a4, vec_out);
graph.setOutput(a4, bias);
ASSERT_EQ(graph.areLayerNext(a1, a4), 1);
}
TEST(graph, check_connection_when_not_connection) {
const std::vector<int> vec = {1, 2, 3, 4};
std::vector<int> vec_out(4);
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
const std::vector<float> vec2 = {9.0F, 6.4F, 17.5F};
Tensor weights = make_tensor<float>(vec1, {3, 2});
Tensor output;
Shape wshape({3, 2});
Tensor bias = make_tensor<float>({0.5F, 0.5F, 1.0F});
Graph graph(5);
LayerExample a1(kInput);
LayerExample a2(kFullyConnected);
LayerExample a3(kFullyConnected);
LayerExample a4(kDropout);
graph.setInput(a1, vec);
FCLayer a1;
allnes marked this conversation as resolved.
Show resolved Hide resolved
FCLayer a2;
FCLayer a3;
FCLayer a4;
graph.setInput(a1, bias);
graph.makeConnection(a1, a2);
graph.makeConnection(a2, a3);
graph.makeConnection(a1, a4);
graph.setOutput(a4, vec_out);
graph.setOutput(a4, bias);
ASSERT_EQ(graph.areLayerNext(a1, a3), 0);
}
TEST(graph, check_connection_when_not_connection2) {
const std::vector<int> vec = {1, 2, 3, 4};
std::vector<int> vec_out(4);
TEST(graph, check_connection_when_not_connection1) {
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
const std::vector<float> vec2 = {9.0F, 6.4F, 17.5F};
Tensor weights = make_tensor<float>(vec1, {3, 2});
Tensor output;
Shape wshape({3, 2});
Tensor bias = make_tensor<float>({0.5F, 0.5F, 1.0F});
Graph graph(5);
LayerExample a1(kInput);
LayerExample a2(kFullyConnected);
LayerExample a3(kFullyConnected);
LayerExample a4(kDropout);
graph.setInput(a1, vec);
FCLayer a1;
FCLayer a2;
FCLayer a3;
FCLayer a4;
graph.setInput(a1, bias);
graph.makeConnection(a1, a2);
graph.makeConnection(a2, a3);
graph.makeConnection(a1, a4);
graph.setOutput(a4, vec_out);
graph.setOutput(a4, bias);
ASSERT_EQ(graph.areLayerNext(a1, a1), 0);
}
TEST(graph, check_connection_when_not_connection3) {
const std::vector<int> vec = {1, 2, 3, 4};
std::vector<int> vec_out(4);
TEST(graph, check_connection_when_not_connection2) {
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
const std::vector<float> vec2 = {9.0F, 6.4F, 17.5F};
Tensor weights = make_tensor<float>(vec1, {3, 2});
Tensor output;
Shape wshape({3, 2});
Tensor bias = make_tensor<float>({0.5F, 0.5F, 1.0F});
Graph graph(5);
LayerExample a1(kInput);
LayerExample a2(kFullyConnected);
LayerExample a3(kFullyConnected);
LayerExample a4(kDropout);
graph.setInput(a1, vec);
FCLayer a1;
FCLayer a2;
FCLayer a3;
FCLayer a4;
graph.setInput(a1, bias);
graph.makeConnection(a1, a2);
graph.makeConnection(a2, a3);
graph.makeConnection(a1, a4);
graph.setOutput(a4, vec_out);
graph.setOutput(a4, bias);
ASSERT_EQ(graph.areLayerNext(a2, a4), 0);
}
Loading
Loading