Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
ac67f77
added a graph class
Dec 18, 2024
5f36230
gtest dir fix
Dec 19, 2024
4867937
CMake fix 1.0
Feb 25, 2025
fb57554
CMake fix 2.0
Feb 25, 2025
f2afeea
CMake fix 2.0
Feb 25, 2025
82f0dc4
clang-tidy, clang-format and ubuntu-build fix 1.0
Feb 27, 2025
2f1141a
clang-tidy, clang-format, ubuntu-build and cmake fix 2.0
Feb 27, 2025
fdfe528
CMake, builts, clang format/tidy fix 3.0
Feb 27, 2025
116cf4a
tensor add 1.0
Mar 4, 2025
55a9ee8
graph pr fix
Mar 23, 2025
b3e62d4
tensor class v0.1
Mar 23, 2025
f65be87
tensor v0.2
Mar 24, 2025
7e9a389
tensor v0.3
Mar 24, 2025
f2547da
tensor v0.3
Mar 25, 2025
a89b0ba
tensor v0.4
Mar 25, 2025
9dcf39d
tensor built fix
Mar 25, 2025
fbf472e
tensor fix
Mar 25, 2025
ad52dd0
codecov fix
Mar 30, 2025
2fc6832
macos-clang-build
Mar 30, 2025
0efcb04
clang-format
Mar 30, 2025
4d68495
clang-format 2
Mar 30, 2025
2c2d708
clang-tidy
Mar 30, 2025
7d84878
errors fix
Mar 30, 2025
d07c3d8
errors fix 2
Mar 30, 2025
96b403c
error fix 3
Mar 30, 2025
7be04f0
error fix 4
Mar 30, 2025
aaa5d80
errors fix 5
Mar 30, 2025
0ca0331
errors fix 7
Mar 30, 2025
ff1a8f8
add layer, change graph
Mar 31, 2025
abd51c9
clang-tidy fix
Mar 31, 2025
e901675
clang-tidy and clang-format fix
Mar 31, 2025
f379e51
clang-format and clang-tidy fix 2
Mar 31, 2025
b185542
clang-format fix
Mar 31, 2025
5ae7ad8
clang-format fix 2
Mar 31, 2025
e1350f0
tensor fix
Apr 7, 2025
3155cd8
clang-tidy fix
Apr 7, 2025
9de22dc
add layer's mocks, change graph
Lepsps May 13, 2025
d64ad06
throws fix
Jul 8, 2025
7bba704
build and clang fix, add test with grath branching
Oct 20, 2025
9ee9fe3
clang-tidy fix 1.0
Oct 20, 2025
5009b33
clang-tidy fix 1.01
Oct 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ jobs:
submodules: recursive
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y cmake ninja-build ccache scons
- name: ccache
uses: hendrikmuhs/[email protected]
Expand All @@ -37,6 +38,7 @@ jobs:
submodules: recursive
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y cmake ninja-build ccache scons
- name: ccache
uses: hendrikmuhs/[email protected]
Expand Down Expand Up @@ -81,6 +83,7 @@ jobs:
submodules: true
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y cmake ninja-build ccache gcovr lcov scons
- uses: actions/checkout@v4
with:
Expand All @@ -102,7 +105,7 @@ jobs:
cmake --build build --parallel
- name: Test
run: |
build/bin/run_tests
build/test/run_test
env:
CTEST_OUTPUT_ON_FAILURE: 1
- name: Generate lcov Coverage Data
Expand Down
27 changes: 21 additions & 6 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,15 +1,30 @@
cmake_minimum_required(VERSION 3.20)
set(CMAKE_CXX_STANDARD 11)

project(cpp_template)

include(cmake/configure.cmake)
set(ProjectName "itlab")
project(${ProjectName})

include_directories(include)

enable_testing()

add_subdirectory(3rdparty)
add_subdirectory(app)
add_subdirectory(include)
include(FetchContent)
FetchContent_Declare(
googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG v1.14.0
)
FetchContent_MakeAvailable(googletest)


#add_subdirectory(3rdparty/googletest)
add_subdirectory(src)
add_subdirectory(test)

# REPORT
message( STATUS "")
message( STATUS "General configuration for ${PROJECT_NAME}")
message( STATUS "======================================")
message( STATUS "")
message( STATUS " Configuration: ${CMAKE_BUILD_TYPE}")
message( STATUS "")
41 changes: 41 additions & 0 deletions include/graph/graph.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#ifndef GRAPH_H
#define GRAPH_H

#include <string>
#include <unordered_map>
#include <vector>

#include "./layer/layer.h"
#include "./tensor/tensor.h"

class Network {
private:
std::unordered_map<int, Layer*> layers_;
Tensor<double> inputTensor_;
Tensor<double>* outputTensor_;
int start_ = -1;
int end_ = -1;
bool bfs_helper(int start, int vert, bool flag,
std::vector<int>* v_ord) const;

public:
Network();

bool addLayer(Layer& lay, const std::vector<int>& inputs = {},
const std::vector<int>& outputs = {});
void addEdge(Layer& layPrev, Layer& layNext);
void removeEdge(Layer& layPrev, Layer& layNext);
void removeLayer(Layer& lay);
int getLayers() const;
int getEdges() const;
bool isEmpty() const;
bool hasPath(Layer& layPrev, Layer& layNext) const;
std::vector<int> inference(int start) const;
void setInput(Layer& lay, Tensor<double>& vec);
void setOutput(Layer& lay, Tensor<double>& vec);
void run();
std::vector<std::string> getLayersTypeVector() const;
~Network();
};

#endif
30 changes: 30 additions & 0 deletions include/layer/ConcatenateLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#ifndef CONCATENATE_LAYER_H
#define CONCATENATE_LAYER_H

#include <string>
#include <vector>

#include "layer/layer.h"
#include "tensor/tensor.h"

class ConcatenateLayerMock : public Layer {
private:
std::vector<Shape> input_shapes_config_;
Shape output_shape_computed_;
unsigned int concatenation_axis_;
bool configured_ = false;

public:
explicit ConcatenateLayerMock(int id);

void configure(const std::vector<Shape>& inputs_shapes, unsigned int axis,
Shape& output_shape_ref);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
44 changes: 44 additions & 0 deletions include/layer/ConvLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#ifndef CONV_LAYER_H
#define CONV_LAYER_H

#include <string>

#include "layer/layer.h"
#include "tensor/tensor.h"

struct ConvPadStrideInfo {
unsigned int stride_x{1};
unsigned int stride_y{1};
unsigned int pad_x{0};
unsigned int pad_y{0};

ConvPadStrideInfo(unsigned int sx = 1, unsigned int sy = 1,
unsigned int px = 0, unsigned int py = 0)
: stride_x(sx), stride_y(sy), pad_x(px), pad_y(py) {}
};

class ConvolutionLayerMock : public Layer {
private:
ConvPadStrideInfo conv_info_;
Shape input_shape_config_;
Shape weights_shape_config_;
Shape biases_shape_config_;
Shape output_shape_computed_;
bool has_biases_ = false;
bool configured_ = false;

public:
explicit ConvolutionLayerMock(int id);

void configure(const Shape& input_s, const Shape& weights_s,
const Shape* biases_s, Shape& output_s_ref,
const ConvPadStrideInfo& info);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
39 changes: 39 additions & 0 deletions include/layer/ElementwiseLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#ifndef ELEMENTWISE_LAYER_H
#define ELEMENTWISE_LAYER_H

#include <cstdint>
#include <string>

#include "layer/layer.h"
#include "tensor/tensor.h"

enum class ElementwiseOp : std::uint8_t {
kAdd,
kSub,
kMul,
kDiv,
kMax,
kMin,
kSquaredDiff
};

class ElementwiseLayerMock : public Layer {
private:
ElementwiseOp op_type_;
Shape common_shape_;
bool configured_ = false;

public:
ElementwiseLayerMock(int id, ElementwiseOp op);

void configure(const Shape& input1_shape, const Shape& input2_shape,
Shape& output_shape_ref);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
35 changes: 35 additions & 0 deletions include/layer/MatMulLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#ifndef MATMUL_LAYER_H
#define MATMUL_LAYER_H

#include <string>

#include "layer/layer.h"
#include "tensor/tensor.h"

struct MatMulInfo {
bool transpose_x{false};
bool transpose_y{false};
};

class MatMulLayerMock : public Layer {
private:
MatMulInfo matmul_info_;
Shape input_x_shape_;
Shape input_y_shape_;
Shape output_shape_;
bool configured_ = false;

public:
MatMulLayerMock(int id, const MatMulInfo& info);

void configure(const Shape& input_x_shape, const Shape& input_y_shape,
Shape& output_shape_ref);

void exec(const Tensor<double>& input_x, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
45 changes: 45 additions & 0 deletions include/layer/PoolingLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#ifndef POOLING_LAYER_H
#define POOLING_LAYER_H

#include <cstddef>
#include <cstdint>
#include <string>

#include "layer/layer.h"
#include "tensor/tensor.h"

enum class PoolingType : std::uint8_t { kMax, kAvg, kL2 };

struct PoolingLayerInfo {
PoolingType pool_type{PoolingType::kMax};
int pool_size_x{2};
int pool_size_y{2};
int stride_x{1};
int stride_y{1};
int pad_x{0};
int pad_y{0};
bool exclude_padding{true};
};

class PoolingLayerMock : public Layer {
private:
PoolingLayerInfo pool_info_;
Shape input_shape_;
Shape output_shape_;
size_t h_in_idx_ = 0;
size_t w_in_idx_ = 0;
bool configured_ = false;

public:
PoolingLayerMock(int id, const PoolingLayerInfo& info);

void configure(const Shape& input_shape, Shape& output_shape_ref);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
28 changes: 28 additions & 0 deletions include/layer/ReshapeLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#ifndef RESHAPE_LAYER_H
#define RESHAPE_LAYER_H

#include <string>

#include "layer/layer.h"
#include "tensor/tensor.h"

class ReshapeLayerMock : public Layer {
private:
Shape input_shape_config_;
Shape target_output_shape_config_;
bool configured_ = false;

public:
explicit ReshapeLayerMock(int id);

void configure(const Shape& input_shape, const Shape& target_output_shape,
Shape& output_shape_ref);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
31 changes: 31 additions & 0 deletions include/layer/SliceLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#ifndef SLICE_LAYER_H
#define SLICE_LAYER_H

#include <string>
#include <vector>

#include "layer/layer.h"
#include "tensor/tensor.h"

class SliceLayerMock : public Layer {
private:
Shape input_shape_config_;
Shape output_shape_computed_;
std::vector<int> slice_starts_;
std::vector<int> slice_sizes_;
bool configured_ = false;

public:
explicit SliceLayerMock(int id);

void configure(const Shape& input_shape, const std::vector<int>& starts,
const std::vector<int>& sizes, Shape& output_shape_ref);

void exec(const Tensor<double>& input, Tensor<double>& output) override;

Shape get_output_shape() override;

std::string get_type_name() const override;
};

#endif
Loading