diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt index a5c4dce..b704bc8 100644 --- a/app/CMakeLists.txt +++ b/app/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(example) +add_subdirectory(layer_example) \ No newline at end of file diff --git a/app/example/CMakeLists.txt b/app/example/CMakeLists.txt index 033d285..5ac1ad8 100644 --- a/app/example/CMakeLists.txt +++ b/app/example/CMakeLists.txt @@ -1 +1 @@ -add_executable(example main.cpp) +add_executable(example main.cpp) \ No newline at end of file diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt new file mode 100644 index 0000000..f47ced7 --- /dev/null +++ b/app/layer_example/CMakeLists.txt @@ -0,0 +1,11 @@ +set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") + +add_executable(Concat ConcatLayer.cpp) + +include_directories(${ARM_DIR}) +include_directories(${ARM_DIR}/include) +target_link_directories(Concat PUBLIC ${ARM_DIR}/build) + +target_link_libraries(Concat arm_compute) + +add_dependencies(Concat build_compute_library) \ No newline at end of file diff --git a/app/layer_example/ConcatLayer.cpp b/app/layer_example/ConcatLayer.cpp new file mode 100644 index 0000000..7a17ab0 --- /dev/null +++ b/app/layer_example/ConcatLayer.cpp @@ -0,0 +1,42 @@ +#include +#include + +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input1, input2; + Tensor output; + std::vector input; + + const int input_width = 3; + const int input_height = 3; + const int axis = 2; + + input1.allocator()->init( + TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init( + TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + + fill_random_tensor(input1, 0.F, 1.F); + fill_random_tensor(input2, 0.F, 1.F); + + input.push_back(&input1); + input.push_back(&input2); + + NEConcatenateLayer concat; + concat.configure(input, &output, axis); + output.allocator()->allocate(); + + concat.run(); + + output.print(std::cout); +} \ No newline at end of file diff --git a/include/layer/layer.h b/include/layer/layer.h new file mode 100644 index 0000000..973cac8 --- /dev/null +++ b/include/layer/layer.h @@ -0,0 +1,35 @@ +#ifndef LAYER_H +#define LAYER_H + +#include + +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +struct LayerAttributes { + int id = -1; +}; + +class Layer { + protected: + int id_; + + public: + Layer() = default; + explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {} + explicit Layer(int id) : id_(id) {} + virtual ~Layer() = default; + void setID(int id) { id_ = id; } + int getID() const { return id_; } + virtual std::string getInfoString() const; + virtual void exec() = 0; + + virtual std::string get_type_name() const = 0; + void addNeighbor(Layer* neighbor); + void removeNeighbor(Layer* neighbor); + std::list neighbors_; +}; +#endif \ No newline at end of file diff --git a/src/layer/ConcatenateLayer.cpp b/src/layer/ConcatenateLayer.cpp new file mode 100644 index 0000000..7111d41 --- /dev/null +++ b/src/layer/ConcatenateLayer.cpp @@ -0,0 +1,60 @@ +#ifndef ACL_CONCATENATE_LAYER_H +#define ACL_CONCATENATE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +class ConcatenateLayer : public Layer { + private: + NEConcatenateLayer concat_; + bool configured_ = false; + + public: + ConcatenateLayer(int id) : Layer(id) {} + + void configure(const std::vector& inputs_shapes, + unsigned int axis, TensorShape& output_shape, + std::vector& input, Tensor& output) { + try { + std::vector inpcopy; + std::vector inp_info; + + for (int i = 0; i < input.size(); i++) { + input[i]->allocator()->init( + TensorInfo(inputs_shapes[i], 1, DataType::F32)); + input[i]->allocator()->allocate(); + inp_info.push_back(input[i]->info()); + inpcopy.push_back(input[i]); + } + + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEConcatenateLayer::validate(inp_info, output.info(), axis)) { + throw std::runtime_error("ConcatenateLayer: Validation failed"); + } + + output.allocator()->allocate(); + concat_.configure(inpcopy, &output, axis); + + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ConcatenateLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ConcatenateLayer: Layer not configured."); + } + concat_.run(); + } + + std::string get_type_name() const override { return "ConcatenateLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ConvLayer.cpp b/src/layer/ConvLayer.cpp new file mode 100644 index 0000000..53dcb3d --- /dev/null +++ b/src/layer/ConvLayer.cpp @@ -0,0 +1,58 @@ +#ifndef ACL_CONVOLUTION_LAYER_SIMPLIFIED_H +#define ACL_CONVOLUTION_LAYER_SIMPLIFIED_H + +#include +#include +#include + +#include "layer/layer.h" + +class ConvolutionLayer : public Layer { + private: + NEConvolutionLayer conv_; + bool configured_ = false; + + public: + ConvolutionLayer(int id) : Layer(id) {} + + void configure(const TensorShape& input_shape, + const TensorShape& weights_shape, + const TensorShape& biases_shape, TensorShape& output_shape, + const PadStrideInfo& info, Tensor& input, Tensor& weights, + Tensor& biases, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + weights.allocator()->init(TensorInfo(weights_shape, 1, DataType::F32)); + biases.allocator()->init(TensorInfo(biases_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEConvolutionLayer::validate(input.info(), weights.info(), + biases.info(), output.info(), info)) { + throw std::runtime_error("ConvolutionLayer: Validation failed"); + } + + input.allocator()->allocate(); + weights.allocator()->allocate(); + biases.allocator()->allocate(); + output.allocator()->allocate(); + + conv_.configure(&input, &weights, &biases, &output, info); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ConvolutionLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ConvolutionLayer: Layer not configured."); + } + conv_.run(); + } + + std::string get_type_name() const override { return "ConvolutionLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ElementwiseLayer.cpp b/src/layer/ElementwiseLayer.cpp new file mode 100644 index 0000000..728590d --- /dev/null +++ b/src/layer/ElementwiseLayer.cpp @@ -0,0 +1,191 @@ +#ifndef ACL_ELEMENTWISE_LAYER_H +#define ACL_ELEMENTWISE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +enum class ElementwiseOp { kAdd, kDiv, kAbs, kSigm, kSwish, kSquaredDiff }; + +class ElementwiseLayer : public Layer { + private: + ElementwiseOp op_type_; + NEActivationLayer act_; + NEArithmeticAddition add_; + NEElementwiseDivision div_; + NEElementwiseSquaredDiff sqdiff_; + bool configured_ = false; + + public: + ElementwiseLayer(int id, ElementwiseOp op) : op_type_(op), Layer(id) {} + + ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::kAdd) {} + + void configure(const TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + switch (op_type_) { + case ElementwiseOp::kAbs: { + if (!NEActivationLayer::validate( + input.info(), output.info(), + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::ABS))) { + throw std::runtime_error("AbsLayer: Validation failed"); + } + + act_.configure(&input, &output, + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::ABS)); + break; + } + case ElementwiseOp::kSigm: { + if (!NEActivationLayer::validate( + input.info(), output.info(), + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::LOGISTIC))) { + throw std::runtime_error("SigmoidLayer: Validation failed"); + } + + act_.configure( + &input, &output, + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::LOGISTIC)); + break; + } + case ElementwiseOp::kSwish: { + if (!NEActivationLayer::validate( + input.info(), output.info(), + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::SWISH))) { + throw std::runtime_error("SwishLayer: Validation failed"); + } + + act_.configure(&input, &output, + ActivationLayerInfo( + ActivationLayerInfo::ActivationFunction::SWISH)); + break; + } + default: + throw std::runtime_error( + "ElementwiseLayer: This operation requires two inputs"); + } + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ElementwiseLayer configuration error: " << e.what() + << std::endl; + } + } + + void configure(const TensorShape& input1_shape, + const TensorShape& input2_shape, TensorShape& output_shape, + Tensor& input1, Tensor& input2, Tensor& output) { + try { + input1.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); + input2.allocator()->init(TensorInfo(input2_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); + switch (op_type_) { + case ElementwiseOp::kAdd: { + if (!NEArithmeticAddition::validate(input1.info(), input2.info(), + output.info(), + ConvertPolicy::WRAP)) { + throw std::runtime_error("AddLayer: Validation failed"); + } + + add_.configure(&input1, &input2, &output, ConvertPolicy::WRAP); + break; + } + case ElementwiseOp::kDiv: { + if (!NEElementwiseDivision::validate(input1.info(), input2.info(), + output.info())) { + throw std::runtime_error("DivLayer: Validation failed"); + } + + div_.configure(&input1, &input2, &output); + break; + } + case ElementwiseOp::kSquaredDiff: { + if (!NEElementwiseSquaredDiff::validate(input1.info(), input2.info(), + output.info())) { + throw std::runtime_error("SquaredDiffLayer: Validation failed"); + } + + sqdiff_.configure(&input1, &input2, &output); + break; + } + default: + throw std::runtime_error( + "ElementwiseLayer: This operation requires single input"); + } + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ElementwiseLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "ElementwiseLayer: Layer not configured before exec."); + } + switch (op_type_) { + case ElementwiseOp::kAbs: + case ElementwiseOp::kSigm: + case ElementwiseOp::kSwish: + act_.run(); + break; + case ElementwiseOp::kAdd: { + add_.run(); + break; + } + case ElementwiseOp::kDiv: { + div_.run(); + break; + } + case ElementwiseOp::kSquaredDiff: { + sqdiff_.run(); + break; + } + default: + throw std::runtime_error( + "ElementwiseLayer: This operation requires single input"); + } + } + + std::string get_type_name() const override { + switch (op_type_) { + case ElementwiseOp::kAdd: + return "ElementwiseAddLayer"; + case ElementwiseOp::kDiv: + return "ElementwiseDivLayer"; + case ElementwiseOp::kAbs: + return "ElementwiseAbsLayer"; + case ElementwiseOp::kSigm: + return "ElementwiseSigmoidLayer"; + case ElementwiseOp::kSwish: + return "ElementwiseSwishLayer"; + case ElementwiseOp::kSquaredDiff: + return "ElementwiseSquaredDiffLayer"; + default: + return "ElementwiseUnknownLayer"; + } + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/MatMulLayer.cpp b/src/layer/MatMulLayer.cpp new file mode 100644 index 0000000..7a21d97 --- /dev/null +++ b/src/layer/MatMulLayer.cpp @@ -0,0 +1,59 @@ +#ifndef ACL_MATMUL_LAYER_H +#define ACL_MATMUL_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class MatMulLayer : public Layer { + private: + NEMatMul m_; + bool configured_ = false; + + public: + MatMulLayer(int id) : Layer(id) {}; + + void configure(TensorShape& input_x_shape, TensorShape& input_y_shape, + TensorShape& output_shape, Tensor& input_x, Tensor& input_y, + Tensor& output) { + try { + input_x.allocator()->init(TensorInfo(input_x_shape, 1, DataType::F32)); + input_y.allocator()->init(TensorInfo(input_y_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEMatMul::validate(input_x.info(), input_y.info(), output.info(), + MatMulInfo(), CpuMatMulSettings(), + ActivationLayerInfo())) { + throw std::runtime_error("MatMulLayer: Validation failed"); + } + + input_x.allocator()->allocate(); + input_y.allocator()->allocate(); + output.allocator()->allocate(); + + m_.configure(&input_x, &input_y, &output, MatMulInfo(), + CpuMatMulSettings(), ActivationLayerInfo()); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "MatMulLayer configuration error: " << e.what() << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "MatMulLayer: Layer not configured before exec."); + } + m_.run(); + } + + std::string get_type_name() const override { return "MatMulLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/PoolingLayer.cpp b/src/layer/PoolingLayer.cpp new file mode 100644 index 0000000..8fbca02 --- /dev/null +++ b/src/layer/PoolingLayer.cpp @@ -0,0 +1,55 @@ +#ifndef ACL_POOLING_LAYER_H +#define ACL_POOLING_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +class PoolingLayer : public Layer { + private: + NEPoolingLayer pool_; + bool configured_ = false; + + public: + PoolingLayer(int id) : Layer(id) {} + + void configure(TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEPoolingLayer::validate( + input.info(), output.info(), + PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC))) { + throw std::runtime_error("PoolingLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + pool_.configure(&input, &output, + PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC)); + + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "PoolingLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "PoolingLayer: Layer not configured before exec."); + } + pool_.run(); + } + + std::string get_type_name() const override { return "PoolingLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ReshapeLayer.cpp b/src/layer/ReshapeLayer.cpp new file mode 100644 index 0000000..96c6523 --- /dev/null +++ b/src/layer/ReshapeLayer.cpp @@ -0,0 +1,50 @@ +#ifndef ACL_RESHAPE_LAYER_H +#define ACL_RESHAPE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +class ReshapeLayer : public Layer { + private: + NEReshapeLayer reshape_; + bool configured_ = false; + + public: + ReshapeLayer(int id) : Layer(id) {} + + void configure(const TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEReshapeLayer::validate(input.info(), output.info())) { + throw std::runtime_error("ReshapeLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + reshape_.configure(&input, &output); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ReshapeLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ReshapeLayer: Layer not configured."); + } + reshape_.run(); + } + + std::string get_type_name() const override { return "ReshapeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ResizeLayer.cpp b/src/layer/ResizeLayer.cpp new file mode 100644 index 0000000..15ebaf1 --- /dev/null +++ b/src/layer/ResizeLayer.cpp @@ -0,0 +1,59 @@ +#ifndef ACL_RESIZE_LAYER_H +#define ACL_RESIZE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class ResizeLayer : public Layer { + private: + NEScale resize_; + bool configured_ = false; + + public: + ResizeLayer(int id) : Layer(id) {} + void configure(TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NEScale::validate( + input.info(), output.info(), + ScaleKernelInfo{InterpolationPolicy::NEAREST_NEIGHBOR, + BorderMode::REPLICATE, PixelValue(), + SamplingPolicy::CENTER})) { + throw std::runtime_error("ResizeLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + resize_.configure(&input, &output, + ScaleKernelInfo{InterpolationPolicy::NEAREST_NEIGHBOR, + BorderMode::REPLICATE, PixelValue(), + SamplingPolicy::CENTER}); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "ResizeLayer configuration error: " << e.what() << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "ResizeLayer: Layer not configured before exec."); + } + resize_.run(); + } + + std::string get_type_name() const override { return "ResizeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SliceLayer.cpp b/src/layer/SliceLayer.cpp new file mode 100644 index 0000000..468c835 --- /dev/null +++ b/src/layer/SliceLayer.cpp @@ -0,0 +1,49 @@ +#ifndef ACL_SLICE_LAYER_H +#define ACL_SLICE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +class SliceLayer : public Layer { + private: + NESlice slice_; + bool configured_ = false; + + public: + SliceLayer(int id) : Layer(id) {} + + void configure(const TensorShape& input_shape, Coordinates starts, + Coordinates ends, TensorShape& output_shape, Tensor& input, + Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NESlice::validate(input.info(), output.info(), starts, ends)) { + throw std::runtime_error("SliceLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + slice_.configure(&input, &output, starts, ends); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "SliceLayer configuration error: " << e.what() << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("SliceLayer: Layer not configured."); + } + slice_.run(); + } + std::string get_type_name() const override { return "SliceLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SoftmaxLayer.cpp b/src/layer/SoftmaxLayer.cpp new file mode 100644 index 0000000..76b8287 --- /dev/null +++ b/src/layer/SoftmaxLayer.cpp @@ -0,0 +1,54 @@ +#ifndef ACL_SOFTMAX_LAYER_H +#define ACL_SOFTMAX_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class SoftmaxLayer : public Layer { + private: + NESoftmaxLayer sml_; + bool configured_ = false; + + public: + SoftmaxLayer(int id) : Layer(id) {} + + void configure(TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NESoftmaxLayer::validate(input.info(), output.info())) { + throw std::runtime_error("SoftmaxLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + sml_.configure(&input, &output); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "SoftmaxLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "SoftmaxLayer: Layer not configured before exec."); + } + sml_.run(); + } + + std::string get_type_name() const override { return "SoftmaxLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SplitLayer.cpp b/src/layer/SplitLayer.cpp new file mode 100644 index 0000000..d36af7e --- /dev/null +++ b/src/layer/SplitLayer.cpp @@ -0,0 +1,53 @@ +#ifndef ACL_SPLIT_LAYER_H +#define ACL_SPLIT_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +class SplitLayer : public Layer { + private: + NESplit split_; + bool configured_ = false; + + public: + SplitLayer(int id) : Layer(id) {} + + void configure(const TensorShape& input_shape, unsigned int axis, + Tensor& input, std::vector& outputs) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + input.allocator()->allocate(); + + std::vector outputs_info; + for (auto& output : outputs) { + if (output != nullptr) { + outputs_info.push_back(output->info()); + } + } + + if (!NESplit::validate(input.info(), outputs_info, axis)) { + throw std::runtime_error("SplitLayer: Validation failed"); + } + + split_.configure(&input, outputs, axis); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "SplitLayer configuration error: " << e.what() << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("SplitLayer: Layer not configured."); + } + split_.run(); + } + + std::string get_type_name() const override { return "SplitLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/TransposeLayer.cpp b/src/layer/TransposeLayer.cpp new file mode 100644 index 0000000..a13e6f4 --- /dev/null +++ b/src/layer/TransposeLayer.cpp @@ -0,0 +1,54 @@ +#ifndef ACL_TRANSPOSE_LAYER_H +#define ACL_TRANSPOSE_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class TransposeLayer : public Layer { + private: + NETranspose t_; + bool configured_ = false; + + public: + TransposeLayer(int id) : Layer(id) {} + + void configure(TensorShape& input_shape, TensorShape& output_shape, + Tensor& input, Tensor& output) { + try { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + if (!NETranspose::validate(input.info(), output.info())) { + throw std::runtime_error("TransposeLayer: Validation failed"); + } + + input.allocator()->allocate(); + output.allocator()->allocate(); + + t_.configure(&input, &output); + configured_ = true; + } catch (const std::exception& e) { + configured_ = false; + std::cerr << "TransposeLayer configuration error: " << e.what() + << std::endl; + } + } + + void exec() override { + if (!configured_) { + throw std::runtime_error( + "TransposeLayer: Layer not configured before exec."); + } + t_.run(); + } + + std::string get_type_name() const override { return "TransposeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/layer.cpp b/src/layer/layer.cpp new file mode 100644 index 0000000..42842eb --- /dev/null +++ b/src/layer/layer.cpp @@ -0,0 +1,13 @@ +#include "layer/layer.h" + +void Layer::addNeighbor(Layer* neighbor) { + if (neighbor != nullptr) { + neighbors_.push_back(neighbor); + } +} + +void Layer::removeNeighbor(Layer* neighbor) { neighbors_.remove(neighbor); } + +std::string Layer::getInfoString() const { + return "Layer (ID: " + std::to_string(id_) + ")"; +} \ No newline at end of file