CPP interface - Tyill/sunnet GitHub Wiki

The interface is available here.

Operators

Examples of use


Create net

Creating a Network Architecture

        /// create net
        /// @param[in] jnNet - network architecture in JSON
        /// @param[in] weightPath - path to file with weight
        Net(const std::string& jnNet = "", const std::string& weightPath = ""){
        
            if (!jnNet.empty())
                createNet(jnNet);

            if (net_ && !weightPath.empty())
                loadAllWeightFromFile(weightPath);            
        };

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "FC1")
        .addNode("FC1", sn::FullyConnected(125), "FC2")
        .addNode("FC2", sn::FullyConnected(10), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Training net

You can train a network in two ways:

  • by calling one function 'snTraining'
  • and by the standard way: forwarding the function 'snForward', calculating your own error, passing back the function 'snBackward'.

Let's see the first option.

         /// training action - cycle forward-backward
        /// @param[in] lr - lerning rate
        /// @param[in] inTns - in tensor NCHW(bsz, ch, h, w)
        /// @param[inout] outTns - out tensor NCHW(bsz, ch, h, w)
        /// @param[in] targetTns - target tensor
        /// @param[inout] outAccurate - accurate error
        /// @return true - ok
        bool training(snFloat lr, Tensor& inTns, Tensor& outTns, Tensor& targetTns, snFloat& outAccurate){

            if (!net_ && !createNet()) return false;

            return snTraining(net_, lr, inTns.size(), inTns.data(), 
                outTns.size(), outTns.data(),
                targetTns.data(), &outAccurate);
        }

Example:

        float accurat = 0;
        snet.training(lr,
            sn::Tensor(sn::snLSize(w, h, d, batchSz), inLayer),
            sn::Tensor(sn::snLSize(w1, h1, d, batchSz), outLayer),
            sn::Tensor(sn::snLSize(w1, h1, d, batchSz), targetLayer),
            accurat);

        accuratSumm += accurat;     
        cout << k << " metrix " << accuratSumm / k << " " << snet.getLastErrorStr() << endl;

The function takes a batch of input data and the target result.
Returns the result and the evaluation by a batch.

Accurate is calculated as:

    snFloat* targetData = targetTens->getData();
    snFloat* outData = outTens->getData();
    
    size_t accCnt = 0, osz = outTens->size().size();
    for (size_t i = 0; i < osz; ++i){

        if (abs(outData[i] - targetData[i]) < 0.1)
            ++accCnt; 
    }

    return (accCnt * 1.F) / osz;

Architecture of net

Getting network structure in Json.

        /// architecture of net in json
        /// @return jn arch
        std::string getArchitecNetJN(){

            if (!net_ && !createNet()) return "";

            char* arch = nullptr;
            snGetArchitecNet(net_, &arch);
            
            std::string ret = arch;

            snFreeResources(0, arch);

            return ret;
        }

Save and load weight of layer

        /// save all weight's in file
        /// @param[in] path - file path
        /// @return true - ok
        bool saveAllWeightToFile(const std::string& path){

            if (!net_) return false;

            return snSaveAllWeightToFile(net_, path.c_str());
        }

        /// load all weight's from file
        /// @param[in] path - file path
        /// @return true - ok
        bool loadAllWeightFromFile(const std::string& path){

            if (!net_ && !createNet()) return false;

            return snLoadAllWeightFromFile(net_, path.c_str());
        }

Set and get params of layer

        /// add node (layer)
        /// @param[in] name - name node in architecture of net
        /// @param[in] nd - tensor node
        /// @param[in] nextNodes - next nodes through a space
        /// @return ref Net
        template<typename T> 
        Net& addNode(const std::string& name, T& nd, const std::string& nextNodes){
                        
            nodes_.push_back(node{ name, nd.name(), nd.getParamsJn(), nextNodes });
            
            return *this;
        }

        /// update param node (layer)
        /// @param[in] name - name node in architecture of net
        /// @param[in] nd - tensor node
        /// @return true - ok
        template<typename T>
        bool updateNode(const std::string& name, const T& nd){

            bool ok = false;
            if (net_)
                ok = snSetParamNode(net_, name.c_str(), nd.getParamsJn());
        .......

        /// get output of node
        /// @param[in] name - name node in architecture of net
        /// @param[out] output - output tensor NCHW(bsz, ch, h, w)
        /// @return true - ok
        bool getOutputNode(const std::string& name, Tensor& output){

            if (!net_) return false;
        .......
         

Monitoring gradients and weights

You can specify your own callback function, and insert your 'UserLayer' node after the node of interest.

        /// add user callback
        /// @param[in] name - name userCBack in architecture of net
        /// @param[in] cback - call back function
        /// @param[in] udata - aux data
        /// @return true - ok
        bool addUserCBack(const std::string& name, snUserCBack cback, snUData udata){

            if (net_)
                snAddUserCallBack(net_, name.c_str(), cback, udata);
            else
                ucb_.push_back(uCBack{ name, cback, udata });
        }

Input

The input node receives the user data, and transmits further along the chain.

  namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "FC2")
  ....    

Output

The interface is not implemented as unnecessary.
For the last node, the next one is set as "Output".

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "FC1")
        .addNode("FC1", sn::FullyConnected(125), "FC2")
        .
        .
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

FullyConnected

   
    class FullyConnected{

    public:

        uint32_t units;                            ///< Number of out neurons. !Required parameter [0..)
        active act = active::relu;                 ///< Activation function type. Optional parameter
        optimizer opt = optimizer::adam;           ///< Optimizer of weights. Optional parameter
        snFloat dropOut = 0.0;                     ///< Random disconnection of neurons. Optional parameter [0..1.F]
        batchNormType bnorm = batchNormType::none; ///< Type of batch norm. Optional parameter
        uint32_t gpuDeviceId = 0;                  ///< GPU Id
        bool freeze = false;                       ///< Do not change weights. Optional parameter
        bool useBias = true;                       ///< +bias. Optional parameter
        weightInit wini = weightInit::he;          ///< Type of initialization of weights. Optional parameter
        snFloat decayMomentDW = 0.9;               ///< Optimizer of weights moment change. Optional parameter [0..1.F]
        snFloat decayMomentWGr = 0.99;             ///< Optimizer of weights moment change of prev. Optional parameter [0..1.F]
        snFloat lmbRegular = 0.001;                ///< Optimizer of weights l2Norm. Optional parameter [0..1.F]
        snFloat batchNormLr = 0.001;               ///< Learning rate for batch norm coef. Optional parameter [0..)
        
        FullyConnected(uint32_t units_,                          
                       active act_ = active::relu,                
                       optimizer opt_ = optimizer::adam,    
        .... 
 }        

The default parameters are specified.

Convolution

   
    class Convolution{

    public:
        
        uint32_t filters;                          ///< Number of output layers. !Required parameter [0..)
        active act = active::relu;                 ///< Activation function type. Optional parameter
        optimizer opt = optimizer::adam;           ///< Optimizer of weights. Optional parameter
        snFloat dropOut = 0.0;                     ///< Random disconnection of neurons. Optional parameter [0..1.F]
        batchNormType bnorm = batchNormType::none; ///< Type of batch norm. Optional parameter
        uint32_t fWidth = 3;                       ///< Width of mask. Optional parameter(> 0)
        uint32_t fHeight = 3;                      ///< Height of mask. Optional parameter(> 0)
        uint32_t padding = 0;                      ///< Padding around the edges. Optional parameter
        uint32_t stride = 1;                       ///< Mask movement step. Optional parameter(> 0)
        uint32_t dilate = 1;                       ///< Expansion mask (> 0). Optional parameter(> 0)
        uint32_t gpuDeviceId = 0;                  ///< GPU Id
        bool freeze = false;                       ///< Do not change weights. Optional parameter
        bool useBias= true;                        ///< +bias. Optional parameter
        weightInit wini = weightInit::he;          ///< Type of initialization of weights. Optional parameter
        snFloat decayMomentDW = 0.9;               ///< Optimizer of weights moment change. Optional parameter [0..1.F]
        snFloat decayMomentWGr = 0.99;             ///< Optimizer of weights moment change of prev. Optional parameter [0..1.F]
        snFloat lmbRegular = 0.001;                ///< Optimizer of weights l2Norm. Optional parameter [0..1.F]
        snFloat batchNormLr = 0.001;               ///< Learning rate for batch norm coef. Optional parameter [0..)

        Convolution(uint32_t kernel_,              
            active act_ = active::relu,      
        ....

The default parameters are specified.

Deconvolution

   
    class Deconvolution{

    public:

        uint32_t filters;                          ///< Number of output layers. !Required parameter [0..)
        active act = active::relu;                 ///< Activation function type. Optional parameter
        optimizer opt = optimizer::adam;           ///< Optimizer of weights. Optional parameter
        snFloat dropOut = 0.0;                     ///< Random disconnection of neurons. Optional parameter [0..1.F]
        batchNormType bnorm = batchNormType::none; ///< Type of batch norm. Optional parameter
        uint32_t fWidth = 3;                       ///< Width of mask. Optional parameter(> 0)
        uint32_t fHeight = 3;                      ///< Height of mask. Optional parameter(> 0)
        uint32_t stride = 2;                       ///< Mask movement step. Optional parameter(> 0)
        uint32_t gpuDeviceId = 0;                  ///< GPU Id
        bool freeze = false;                       ///< Do not change weights. Optional parameter
        weightInit wini = weightInit::he;          ///< Type of initialization of weights. Optional parameter
        snFloat decayMomentDW = 0.9;               ///< Optimizer of weights moment change. Optional parameter [0..1.F]
        snFloat decayMomentWGr = 0.99;             ///< Optimizer of weights moment change of prev. Optional parameter [0..1.F]
        snFloat lmbRegular = 0.001;                ///< Optimizer of weights l2Norm. Optional parameter [0..1.F]
        snFloat batchNormLr = 0.001;               ///< Learning rate for batch norm coef. Optional parameter [0..)
        
        Deconvolution(uint32_t filters_,
            active act_ = active::relu,
            optimizer opt_ = optimizer::adam,
        .....

The default parameters are specified.

Pooling

    
    class Pooling{

    public:
            
        uint32_t kernel = 2;              ///< Square Mask Size. Optional parameter (> 0) 
        uint32_t stride = 2;              ///< Mask movement step. Optional parameter(> 0)
        poolType pool = poolType::max;    ///< Operator Type. Optional parameter 
        uint32_t gpuDeviceId = 0;         ///< GPU Id
      
        Pooling(uint32_t kernel_ = 2,
            poolType pool_ = poolType::max          
        .... 

The default parameters are specified.
If the mask does not completely enter the image, the image automatically extends around the edges.

LossFunction

Operator for automatic error calculation.
Depending on the network task being solved, supports the following types of errors:

  • "softMaxACrossEntropy" - for multiclass classification
  • "binaryCrossEntropy" - for binary classification
  • "regressionMSE" - regression of a function with least-squares estimation
  • "userLoss" - user operator
   
    class LossFunction{

    public:
           
        lossType loss;

        LossFunction(lossType loss_) : loss(loss_){};

Switch

Operator for transferring data to several nodes at once.
In the process, you can change the way out - function net.updateNode().
Data can only be received from one node.

    
    class Switch{

    public:

        std::string nextWay;
       
        Switch(const std::string& nextWay_) :nextWay(nextWay_){};

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "SW")
        .addNode("SW", sn::Switch(), "FC1 FC2")
        .addNode("FC1", sn::FullyConnected(10), "Sum")
        .addNode("FC2", sn::FullyConnected(10), "Sum")
        .addNode("Sum", sn::Summator(), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Lock

Operator to block further calculation at the current location.
In the process, you can change the way out - function net.updateNode().
It is designed for the ability to dynamically disconnect the parallel branches of the network during operation.

   
    class Lock{

    public:
             
        lockType lockTp;    ///< Blocking activity. Optional parameter

        Lock(lockType lockTp_) : lockTp(lockTp_){} 

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "SW")
        .addNode("SW", sn::Switch(), "FC1 FC2")
        .addNode("FC1", sn::FullyConnected(10), "LC")
        .addNode("LC", sn::Lock(sn::lockType::unlock), "Sum")
        .addNode("FC2", sn::FullyConnected(10), "Sum")
        .addNode("Sum", sn::Summator(), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Summator

The operator is designed to combine the values of two layers.
The consolidation can be performed by the following options: "summ", "diff", "mean".
The dimensions of the input layers must be the same.

    class Summator{

    public:
         
        summatorType summType;

        Summator(summatorType summType_ = summatorType::summ) : summType(summType_){};

Example:

namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "FC1 FC2")
        .addNode("FC1", sn::FullyConnected(10), "Sum")
        .addNode("FC2", sn::FullyConnected(10), "Sum")
        .addNode("Sum", sn::Summator(), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Crop

ROI clipping in each image of each channel.

 class Crop{

    public:
        
        rect rct;

        Crop(const rect& rct_) : rct(rct_){};

Concat

The operator connects the channels with multiple layers.

 class Concat{

    public:
              
        std::string sequence;

        Concat(const std::string& sequence_) : sequence(sequence_){};

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "C1 C2")

        .addNode("C1", sn::Convolution(20), "R1")
        .addNode("R1", sn::Resize(sn::diap(0 20), sn::diap(0 20), "Conc")

        .addNode("C2", sn::Convolution(20), "R2")
        .addNode("R2", sn::Resize(sn::diap(0 20), sn::diap(20 40)), "Conc")

        .addNode("Conc", sn::Concat("R1 R2"), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Resize

Change the number of channels.
Works in conjunction with "Concat".

 class Resize{

    public:
               
        diap fwdDiap, bwdDiap;

        Resize(const diap& fwdDiap_, const diap& bwdDiap_) :
            fwdDiap(fwdDiap_), bwdDiap(bwdDiap_){};

Example:

    namespace sn = SN_API;

    sn::Net snet;
    
    snet.addNode("Input", sn::Input(), "C1 C2")

        .addNode("C1", sn::Convolution(20), "R1")
        .addNode("R1", sn::Resize(sn::diap(0 20), sn::diap(0 20), "Conc")

        .addNode("C2", sn::Convolution(20), "R2")
        .addNode("R2", sn::Resize(sn::diap(0 20), sn::diap(20 40)), "Conc")

        .addNode("Conc", sn::Concat("R1 R2"), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

Activation

Activation function operator.

    /*
    Activation function operator
    */
    class Activation{

    public:

        active act = active::relu;                 ///< Activation function type. Optional parameter

        Activation(const active& act_) : act(act_){};

BatchNorm

    /*
    Batch norm
    */
    class BatchNormLayer{

    public:

        BatchNormLayer(){};

UserLayer

Custom layer.
CallBack is set by the user, the 'net.addUserCBack' function

 class UserLayer{

    public:
       
        std::string cbackName;

        UserLayer(const std::string& cbackName_) : cbackName(cbackName_){};

Example:

    namespace sn = SN_API;

    sn::Net snet;

    net.addUserCBack("myLayer", myLayer, 0);
    
    snet.addNode("Input", sn::Input(), "C1 C2")
        .addNode("C1", sn::Convolution(20), "UL")
        .addNode("UL", sn::UserLayer("myLayer"), "C2")
        .addNode("C2", sn::Convolution(20), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

MNIST

#include <string>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <map>
#include <filesystem>

#include "../cpp/snNet.h"
#include "../cpp/snTensor.h"
#include "../cpp/snOperator.h"

#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp"

using namespace std;
namespace sn = SN_API;

bool loadImage(string& imgPath, int classCnt, vector<vector<string>>& imgName, vector<int>& imgCntDir, map<string, cv::Mat>& images){

    for (int i = 0; i < classCnt; ++i){

        namespace fs = std::tr2::sys;

        if (!fs::exists(fs::path(imgPath + to_string(i) + "/"))) continue;

        fs::directory_iterator it(imgPath + to_string(i) + "/"); int cnt = 0;
        while (it != fs::directory_iterator()){

            fs::path p = it->path();
            if (fs::is_regular_file(p) && (p.extension() == ".png")){

                imgName[i].push_back(p.filename());
            }
            ++it;
            ++cnt;
        }

        imgCntDir[i] = cnt;
    }

    return true;
}

int main(int argc, char* argv[]){
       
    sn::Net snet;
        
    snet.addNode("Input", sn::Input(), "C1")
        .addNode("C1", sn::Convolution(15, 3, 0, 1, sn::batchNormType::none, sn::active::relu), "C2")
        .addNode("C2", sn::Convolution(15, 3, 0, 1, sn::batchNormType::none, sn::active::relu), "P1")
        .addNode("P1", sn::Pooling(), "FC1")
        .addNode("FC1", sn::FullyConnected(128, sn::batchNormType::none), "FC2")
        .addNode("FC2", sn::FullyConnected(10), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

    string imgPath = "c://cpp//other//sunnet//example//mnist//images//";
    
    int batchSz = 100, classCnt = 10, w = 28, h = 28; float lr = 0.001F;
    vector<vector<string>> imgName(classCnt);
    vector<int> imgCntDir(classCnt);
    map<string, cv::Mat> images;
       
    if (!loadImage(imgPath, classCnt, imgName, imgCntDir, images)){
        cout << "Error 'loadImage' path: " << imgPath << endl;
        system("pause");
        return -1;
    }

    //snet.loadAllWeightFromFile("c:\\cpp\\w.dat");


    sn::Tensor inLayer(sn::snLSize(w, h, 1, batchSz));
    sn::Tensor targetLayer(sn::snLSize(classCnt, 1, 1, batchSz));
    sn::Tensor outLayer(sn::snLSize(classCnt, 1, 1, batchSz));
       
    size_t sum_metric = 0;
    size_t num_inst = 0;
    float accuratSumm = 0;
    for (int k = 0; k < 10; ++k){

        targetLayer.clear();
       
        srand(clock());
                
        for (int i = 0; i < batchSz; ++i){

            // directory
            int ndir = rand() % classCnt;
            while (imgCntDir[ndir] == 0) ndir = rand() % classCnt;

            // image
            int nimg = rand() % imgCntDir[ndir];

            // read
            cv::Mat img; string nm = imgName[ndir][nimg];
            if (images.find(nm) != images.end())
                img = images[nm];
            else{
                img = cv::imread(imgPath + to_string(ndir) + "/" + nm, CV_LOAD_IMAGE_UNCHANGED);
                images[nm] = img;
            }

            float* refData = inLayer.data() + i * w * h;
           
            double mean = cv::mean(img)[0];
            size_t nr = img.rows, nc = img.cols;
            for (size_t r = 0; r < nr; ++r){
                uchar* pt = img.ptr<uchar>(r);
                for (size_t c = 0; c < nc; ++c)
                    refData[r * nc + c] = pt[c] - mean;
            } 

            float* tarData = targetLayer.data() + classCnt * i;

            tarData[ndir] = 1;
        }

        // training
        float accurat = 0;
        snet.training(lr,
            inLayer,
            outLayer,
            targetLayer,
            accurat);
     
        // calc error
        sn::snFloat* targetData = targetLayer.data();
        sn::snFloat* outData = outLayer.data();
        size_t accCnt = 0, bsz = batchSz;
        for (int i = 0; i < bsz; ++i){

            float* refTarget = targetData + i * classCnt;
            float* refOutput = outData + i * classCnt;

            int maxOutInx = distance(refOutput, max_element(refOutput, refOutput + classCnt)),
                maxTargInx = distance(refTarget, max_element(refTarget, refTarget + classCnt));

            if (maxTargInx == maxOutInx)
                ++accCnt;
        }
              
        accuratSumm += (accCnt * 1.F) / bsz;

        cout << k << " accurate " << accuratSumm / k << " " << snet.getLastErrorStr() << endl;        
    }
    
    snet.saveAllWeightToFile("c:\\cpp\\w.dat");

    system("pause");
    return 0;
}

CIFAR-10

#include <string>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <map>
#include <filesystem>

#include "../cpp/snNet.h"
#include "../cpp/snTensor.h"
#include "../cpp/snOperator.h"

#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp"

using namespace std;
namespace sn = SN_API;

bool loadImage(string& imgPath, int classCnt, vector<vector<string>>& imgName, vector<int>& imgCntDir, map<string, cv::Mat>& images){

    for (int i = 0; i < classCnt; ++i){

        namespace fs = std::tr2::sys;

        if (!fs::exists(fs::path(imgPath + to_string(i) + "/"))) continue;

        fs::directory_iterator it(imgPath + to_string(i) + "/"); int cnt = 0;
        while (it != fs::directory_iterator()){

            fs::path p = it->path();
            if (fs::is_regular_file(p) && (p.extension() == ".png")){

                imgName[i].push_back(p.filename());
            }
            ++it;
            ++cnt; if (cnt > 1000) break;
        }

        imgCntDir[i] = cnt;
    }

    return true;
}

int main(int argc, char* argv[]){
       
    sn::Net snet;   
    snet.addNode("Input", sn::Input(), "C1")
        .addNode("C1", sn::Convolution(15, 3, -1, 1,  sn::batchNormType::beforeActive), "C2")
        .addNode("C2", sn::Convolution(15, 3, 0, 1, sn::batchNormType::beforeActive), "P1")
        .addNode("P1", sn::Pooling(), "C3")
     
        .addNode("C3", sn::Convolution(25, 3, -1, 1, sn::batchNormType::beforeActive), "C4")
        .addNode("C4", sn::Convolution(25, 3, 0, 1, sn::batchNormType::beforeActive), "P2")
        .addNode("P2", sn::Pooling(), "C5")
     
        .addNode("C5", sn::Convolution(40, 3, -1, 1, sn::batchNormType::beforeActive), "C6")
        .addNode("C6", sn::Convolution(40, 3, 0, 1, sn::batchNormType::beforeActive), "P3")
        .addNode("P3", sn::Pooling(), "FC1")
    
        .addNode("FC1", sn::FullyConnected(2048, sn::batchNormType::beforeActive), "FC2")
        .addNode("FC2", sn::FullyConnected(128, sn::batchNormType::beforeActive), "FC3")
        .addNode("FC3", sn::FullyConnected(10), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

    string imgPath = "c://cpp//other//sunnet//example//cifar10//images//";
    
    int batchSz = 100, classCnt = 10, w = 28, h = 28, d = 3; float lr = 0.001F;
    vector<vector<string>> imgName(classCnt);
    vector<int> imgCntDir(classCnt);
    map<string, cv::Mat> images;
       
    if (!loadImage(imgPath, classCnt, imgName, imgCntDir, images)){
        cout << "Error 'loadImage' path: " << imgPath << endl;
        system("pause");
        return -1;
    }

    sn::Tensor inLayer(sn::snLSize(w, h, d, batchSz));
    sn::Tensor targetLayer(sn::snLSize(classCnt, 1, 1, batchSz));
    sn::Tensor outLayer(sn::snLSize(classCnt, 1, 1, batchSz));
       
    size_t sum_metric = 0;
    size_t num_inst = 0;
    float accuratSumm = 0;
    for (int k = 0; k < 1000; ++k){

        targetLayer.clear();
       
        srand(clock());
                
        for (int i = 0; i < batchSz; ++i){

            // directory
            int ndir = rand() % classCnt;
            while (imgCntDir[ndir] == 0) ndir = rand() % classCnt;

            // image
            int nimg = rand() % imgCntDir[ndir];

            // read
            cv::Mat img; string nm = imgName[ndir][nimg];
            if (images.find(nm) != images.end())
                img = images[nm];
            else{
                img = cv::imread(imgPath + to_string(ndir) + "/" + nm, CV_LOAD_IMAGE_UNCHANGED);
                images[nm] = img;
            }

            float* refData = inLayer.data() + i * w * h * d;
           
            size_t nr = img.rows, nc = img.cols;
            for (size_t r = 0; r < nr; ++r){
                uchar* pt = img.ptr<uchar>(r);
                for (size_t c = 0; c < nc * 3; c += 3){
                    refData[r * nc + c] = pt[c];
                    refData[r * nc + c + 1] = pt[c + 1];
                    refData[r * nc + c + 2] = pt[c + 2];
                }
            } 

            float* tarData = targetLayer.data() + classCnt * i;

            tarData[ndir] = 1;
        }

        // training
        float accurat = 0;
        snet.training(lr,
            inLayer,
            outLayer,
            targetLayer,
            accurat);

        // calc error
        sn::snFloat* targetData = targetLayer.data();
        sn::snFloat* outData = outLayer.data();
        size_t accCnt = 0, bsz = batchSz;
        for (int i = 0; i < bsz; ++i){

            float* refTarget = targetData + i * classCnt;
            float* refOutput = outData + i * classCnt;

            int maxOutInx = distance(refOutput, max_element(refOutput, refOutput + classCnt)),
                maxTargInx = distance(refTarget, max_element(refTarget, refTarget + classCnt));

            if (maxTargInx == maxOutInx)
                ++accCnt;
        }
              
        accuratSumm += (accCnt * 1.F) / bsz;

        cout << k << " accurate " << accuratSumm / k << " " << snet.getLastErrorStr() << endl;        
    }
       
    system("pause");
    return 0;
}

UNET-tiny

#include <string>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <map>
#include <filesystem>

#include "../cpp/snNet.h"
#include "../cpp/snTensor.h"
#include "../cpp/snOperator.h"

#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp"

using namespace std;
namespace sn = SN_API;

bool loadImage(string& imgPath, int classCnt, vector<string>& imgName, int& imgCntDir){
      
    namespace fs = std::tr2::sys;

    if (!fs::exists(fs::path(imgPath))) return false;

    fs::directory_iterator it(imgPath); int cnt = 0;
    while (it != fs::directory_iterator()){

        fs::path p = it->path();
        if (fs::is_regular_file(p) && (p.extension() == ".png")){

            imgName.push_back(p.filename());
        }
        ++it;
        ++cnt;
    }

    imgCntDir = cnt;
   
    return true;
}

int main(int argc, char* argv[]){
       
    sn::Net snet;   
 
    snet.addNode("In", sn::Input(), "C1")
        .addNode("C1", sn::Convolution(10, -1), "C2")
        .addNode("C2", sn::Convolution(10, 0), "P1 Crop1")
        .addNode("Crop1", sn::Crop(sn::rect(0, 0, 487, 487)), "Rsz1")
        .addNode("Rsz1", sn::Resize(sn::diap(0, 10), sn::diap(0, 10)), "Conc1")
        .addNode("P1", sn::Pooling(), "C3")

        .addNode("C3", sn::Convolution(10, -1), "C4")
        .addNode("C4", sn::Convolution(10, 0), "P2 Crop2")
        .addNode("Crop2", sn::Crop(sn::rect(0, 0, 247, 247)), "Rsz2")
        .addNode("Rsz2", sn::Resize(sn::diap(0, 10), sn::diap(0, 10)), "Conc2")
        .addNode("P2", sn::Pooling(), "C5")

        .addNode("C5", sn::Convolution(10, 0), "C6")
        .addNode("C6", sn::Convolution(10, 0), "DC1")
        .addNode("DC1", sn::Deconvolution(10, sn::active::relu), "Rsz3")
        .addNode("Rsz3", sn::Resize(sn::diap(0, 10), sn::diap(10, 20)), "Conc2")

        .addNode("Conc2", sn::Concat("Rsz2 Rsz3"), "C7")

        .addNode("C7", sn::Convolution(10, 0), "C8")
        .addNode("C8", sn::Convolution(10, 0), "DC2")
        .addNode("DC2", sn::Deconvolution(10, sn::active::relu), "Rsz4")
        .addNode("Rsz4", sn::Resize(sn::diap(0, 10), sn::diap(10, 20)), "Conc1")

        .addNode("Conc1", sn::Concat("Rsz1 Rsz4"), "C9")

        .addNode("C9", sn::Convolution(10, 0), "C10");

    sn::Convolution convOut(1, 0);
    convOut.act = sn::active::sigmoid;
    snet.addNode("C10", convOut, "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::binaryCrossEntropy), "Output");
    
    string imgPath = "c://cpp//other//sunnet//example//unet//images//";
    string labelPath = "c://cpp//other//sunnet//example//unet//labels//";

    int batchSz = 10, w = 512, h = 512, wo = 483, ho = 483; float lr = 0.001F;
    vector<string> imgName;
    int imgCntDir = 0;
       
    if (!loadImage(imgPath, 1, imgName, imgCntDir)){
        cout << "Error loadImage path: " << imgPath << endl;
        system("pause");
        return -1;
    }

    sn::Tensor inLayer(sn::snLSize(w, h, 1, batchSz));
    sn::Tensor targetLayer(sn::snLSize(wo, ho, 1, batchSz));
    sn::Tensor outLayer(sn::snLSize(wo, ho, 1, batchSz));
       
    size_t sum_metric = 0;
    size_t num_inst = 0;
    float accuratSumm = 0;
    for (int k = 0; k < 1000; ++k){
               
        srand(clock());
                
        for (int i = 0; i < batchSz; ++i){
                       
            // image
            int nimg = rand() % imgCntDir;

            // read
            string nm = imgName[nimg];
            cv::Mat img = cv::imread(imgPath + nm, CV_LOAD_IMAGE_UNCHANGED);

            float* refData = inLayer.data() + i * w * h;
           
            for (size_t r = 0; r < h; ++r){
                uchar* pt = img.ptr<uchar>(r);
                for (size_t c = 0; c < w; c += 1){
                    refData[r * w + c] = pt[c];                   
                }
            } 

            cv::Mat imgLabel = cv::imread(labelPath + nm, CV_LOAD_IMAGE_UNCHANGED);

            cv::resize(imgLabel, imgLabel, cv::Size(wo, ho));

            refData = targetLayer.data() + i * wo * ho;

            for (size_t r = 0; r < ho; ++r){
                uchar* pt = imgLabel.ptr<uchar>(r);
                for (size_t c = 0; c < wo; c += 1){
                    refData[r * wo + c] = pt[c] / 255.;
                }
            }

        }

        // training
        float accurat = 0;
        snet.training(lr,
            inLayer,
            outLayer,
            targetLayer,
            accurat);

        // calc error                     
        accuratSumm += accurat;

        cout << k << " accurate " << accuratSumm / (k + 1) << " " << snet.getLastErrorStr() << endl;        
    }
       
    system("pause");
    return 0;
}

ResNet50

network creation example here

#include <string>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <map>
#include <filesystem>


#include <omp.h>
#include "../cpp/snNet.h"
#include "../cpp/snTensor.h"
#include "../cpp/snOperator.h"

#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp"

using namespace std;
namespace sn = SN_API;


void idntBlock(sn::Net& net, vector<uint32_t>&& filters, uint32_t kernelSize, string oprName, string nextOprName){
    
    net.addNode(oprName + "2a", sn::Convolution(filters[0], 1, 0, 1, sn::batchNormType::beforeActive, sn::active::relu), oprName + "2b")
       .addNode(oprName + "2b", sn::Convolution(filters[1], kernelSize, -1, 1, sn::batchNormType::beforeActive, sn::active::relu), oprName + "2c")
       .addNode(oprName + "2c", sn::Convolution(filters[2], 1, 0, 1, sn::batchNormType::beforeActive, sn::active::none), oprName + "Sum");

    net.addNode(oprName + "Sum", sn::Summator(sn::summatorType::summ), oprName + "Act")
       .addNode(oprName + "Act", sn::Activation(sn::active::relu), nextOprName);
}

void convBlock(sn::Net& net, vector<uint32_t>&& filters, uint32_t kernelSize, uint32_t stride, string oprName, string nextOprName){

    net.addNode(oprName + "2a", sn::Convolution(filters[0], 1, 0, stride, sn::batchNormType::beforeActive, sn::active::relu), oprName + "2b")
       .addNode(oprName + "2b", sn::Convolution(filters[1], kernelSize, -1, 1, sn::batchNormType::beforeActive, sn::active::relu), oprName + "2c")
       .addNode(oprName + "2c", sn::Convolution(filters[2], 1, 0, 1, sn::batchNormType::beforeActive, sn::active::none), oprName + "Sum");

    // shortcut
    net.addNode(oprName + "1", sn::Convolution(filters[2], 1, 0, stride, sn::batchNormType::beforeActive, sn::active::none), oprName + "Sum");

    // summator
    net.addNode(oprName + "Sum", sn::Summator(sn::summatorType::summ), oprName + "Act")
       .addNode(oprName + "Act", sn::Activation(sn::active::relu), nextOprName);
}

sn::Net createNet(){

    auto net = sn::Net();

    net.addNode("In", sn::Input(), "conv1")
       .addNode("conv1", sn::Convolution(64, 7, 3, 2, sn::batchNormType::beforeActive, sn::active::none), "pool1_pad")
       .addNode("pool1_pad", sn::Pooling(3, 2, sn::poolType::max), "res2a_branch1 res2a_branch2a");
    
    convBlock(net, vector<uint32_t>{ 64, 64, 256 }, 3, 1, "res2a_branch", "res2b_branch2a res2b_branchSum");
    idntBlock(net, vector<uint32_t>{ 64, 64, 256 }, 3, "res2b_branch", "res2c_branch2a res2c_branchSum");
    idntBlock(net, vector<uint32_t>{ 64, 64, 256}, 3, "res2c_branch", "res3a_branch1 res3a_branch2a");

    convBlock(net, vector<uint32_t>{ 128, 128, 512 }, 3, 2, "res3a_branch", "res3b_branch2a res3b_branchSum");
    idntBlock(net, vector<uint32_t>{ 128, 128, 512 }, 3, "res3b_branch", "res3c_branch2a res3c_branchSum");
    idntBlock(net, vector<uint32_t>{ 128, 128, 512 }, 3, "res3c_branch", "res3d_branch2a res3d_branchSum");
    idntBlock(net, vector<uint32_t>{ 128, 128, 512 }, 3, "res3d_branch", "res4a_branch1 res4a_branch2a");

    convBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, 2, "res4a_branch", "res4b_branch2a res4b_branchSum");
    idntBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, "res4b_branch", "res4c_branch2a res4c_branchSum");
    idntBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, "res4c_branch", "res4d_branch2a res4d_branchSum");
    idntBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, "res4d_branch", "res4e_branch2a res4e_branchSum");
    idntBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, "res4e_branch", "res4f_branch2a res4f_branchSum");
    idntBlock(net, vector<uint32_t>{ 256, 256, 1024 }, 3, "res4f_branch", "res5a_branch1 res5a_branch2a");

    convBlock(net, vector<uint32_t>{ 512, 512, 2048 }, 3, 2, "res5a_branch", "res5b_branch2a res5b_branchSum");
    idntBlock(net, vector<uint32_t>{ 512, 512, 2048 }, 3, "res5b_branch", "res5c_branch2a res5c_branchSum");
    idntBlock(net, vector<uint32_t>{ 512, 512, 2048 }, 3, "res5c_branch", "avg_pool");

    net.addNode("avg_pool", sn::Pooling(7, 7, sn::poolType::avg), "fc1000")
       .addNode("fc1000", sn::FullyConnected(1000, sn::active::none), "LS")
       .addNode("LS", sn::LossFunction(sn::lossType::softMaxToCrossEntropy), "Output");

    return net;
}

int main(int argc, char* argv[]){
       
    sn::Net snet = createNet();
  
    // using python for create file 'resNet50Weights.dat' as: 
    // CMD: cd c:\cpp\other\sunnet\example\resnet50\
    // CMD: python createNet.py  
    
    if (!snet.loadAllWeightFromFile("c:/cpp/other/sunnet/example/resnet50/resNet50Weights.dat")){
        cout << "error loadAllWeightFromFile: " << snet.getLastErrorStr() << endl;
        system("pause");
        return -1;
    }
    
    string imgPath = "c:/cpp/other/sunnet/example/resnet50/images/elephant.jpg";
    
    int classCnt = 1000, w = 224, h = 224;
  
    sn::Tensor inLayer(sn::snLSize(w, h, 3, 1));
    sn::Tensor outLayer(sn::snLSize(classCnt, 1, 1, 1));
  
    cv::Mat img = cv::imread(imgPath, CV_LOAD_IMAGE_UNCHANGED);

    if ((img.cols != w) || (img.rows != h))
       cv::resize(img, img, cv::Size(w, h));

    vector<cv::Mat> channels(3);
    split(img, channels);    // BGR order 

    float* refData = inLayer.data();
      
    // B
    for (size_t r = 0; r < h; ++r){
        uchar* pt = channels[0].ptr<uchar>(r);
        for (size_t c = 0; c < w; ++c)
            refData[r * w + c] = pt[c];
    } 

    // G
    refData += h * w;
    for (size_t r = 0; r < h; ++r){
        uchar* pt = channels[1].ptr<uchar>(r);
        for (size_t c = 0; c < w; ++c)
            refData[r * w + c] = pt[c];
    }

    // R
    refData += h * w;   
    for (size_t r = 0; r < h; ++r){
        uchar* pt = channels[2].ptr<uchar>(r);
        for (size_t c = 0; c < w; ++c)
            refData[r * w + c] = pt[c];
    }


    for (int i = 0; i < 100; ++i){
        double ctm = omp_get_wtime();        
        snet.forward(false, inLayer, outLayer);
        cout << std::to_string(omp_get_wtime() - ctm) << endl;
    
        float* refOutput = outLayer.data();

        int maxInx = distance(refOutput, max_element(refOutput, refOutput + classCnt));

        // for check: c:\cpp\other\sunnet\example\resnet50\imagenet_class_index.json
    
        cout << "inx " << maxInx << " accurate " << refOutput[maxInx] << snet.getLastErrorStr() << endl;
    }

    system("pause");
    return 0;
}

AutoEncoder

#include <string>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <map>
#include <filesystem>

#include "../cpp/snNet.h"
#include "../cpp/snTensor.h"
#include "../cpp/snOperator.h"

#include "Lib/OpenCV_3.3.0/opencv2/core/core_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/core/core.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/imgproc/imgproc.hpp"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui_c.h"
#include "Lib/OpenCV_3.3.0/opencv2/highgui/highgui.hpp"

using namespace std;
namespace sn = SN_API;

bool loadImage(string& imgPath, int classCnt, vector<vector<string>>& imgName, vector<int>& imgCntDir, map<string, cv::Mat>& images){

    for (int i = 0; i < classCnt; ++i){

        namespace fs = std::tr2::sys;

        if (!fs::exists(fs::path(imgPath + to_string(i) + "/"))) continue;

        fs::directory_iterator it(imgPath + to_string(i) + "/"); int cnt = 0;
        while (it != fs::directory_iterator()){

            fs::path p = it->path();
            if (fs::is_regular_file(p) && (p.extension() == ".png")){

                imgName[i].push_back(p.filename());
            }
            ++it;
            ++cnt;
        }

        imgCntDir[i] = cnt;
    }

    return true;
}

int main(int argc, char* argv[]){

    sn::Net snet;

    snet.addNode("Input", sn::Input(), "FC1")
        .addNode("FC1", sn::FullyConnected(256, sn::active::relu), "FC2")
        .addNode("FC2", sn::FullyConnected(128, sn::active::relu), "FC3")
        .addNode("FC3", sn::FullyConnected(32, sn::active::relu), "FC4")
        .addNode("FC4", sn::FullyConnected(128, sn::active::relu), "FC5")
        .addNode("FC5", sn::FullyConnected(256, sn::active::relu), "FC6")
        .addNode("FC6", sn::FullyConnected(784, sn::active::sigmoid), "LS")
        .addNode("LS", sn::LossFunction(sn::lossType::binaryCrossEntropy), "Output");

    string imgPath = "c://cpp//sunnet//example//autoEncoder//images//";

    int classCnt = 5, batchSz = 100, w = 28, h = 28;
    float lr = 0.001F;

    vector<vector<string>> imgName(classCnt);
    vector<int> imgCntDir(classCnt);
    map<string, cv::Mat> images;

    if (!loadImage(imgPath, classCnt, imgName, imgCntDir, images)){
        cout << "Error 'loadImage' path: " << imgPath << endl;
        system("pause");
        return -1;
    }

    //snet.loadAllWeightFromFile("c:\\cpp\\w.dat");


    sn::Tensor inLayer(sn::snLSize(w, h, 1, batchSz));
    sn::Tensor outLayer(sn::snLSize(w * h, 1, 1, batchSz));

    size_t sum_metric = 0;
    size_t num_inst = 0;
    float accuratSumm = 0;
    for (int k = 0; k < 1000; ++k){

        srand(clock());

        for (int i = 0; i < batchSz; ++i){

            // directory
            int ndir = rand() % classCnt;
            while (imgCntDir[ndir] == 0)
                ndir = rand() % classCnt;

            // image
            int nimg = rand() % imgCntDir[ndir];

            // read
            cv::Mat img;
            string nm = imgName[ndir][nimg];
            if (images.find(nm) != images.end())
                img = images[nm];
            else{
                img = cv::imread(imgPath + to_string(ndir) + "/" + nm, CV_LOAD_IMAGE_UNCHANGED);
                images[nm] = img;
            }

            float* refData = inLayer.data() + i * w * h;
   
            size_t nr = img.rows, nc = img.cols;
            for (size_t r = 0; r < nr; ++r){
                uchar* pt = img.ptr<uchar>(r);
                for (size_t c = 0; c < nc; ++c)
                    refData[r * nc + c] = pt[c] / 255.0;
            }
        }

        // training
        float accurat = 0;
        snet.training(lr,
                      inLayer,
                      outLayer,
                      inLayer,
                      accurat);

        float* refData = outLayer.data();

        cv::Mat img(w, h, CV_8U);
        for (size_t r = 0; r < h; ++r){
            uchar* pt = img.ptr<uchar>(r);
            for (size_t c = 0; c < w; ++c)
                pt[c] = refData[r * w + c] * 255.0;
        }

        cv::namedWindow("1", 0);
        cv::imshow("1", img);
        cv::waitKey(1);

        accuratSumm += accurat;

        cout << k << " accurate " << accuratSumm / k << " " << snet.getLastErrorStr() << endl;
    }
    
    snet.saveAllWeightToFile("c:\\cpp\\w.dat");

    system("pause");
    return 0;
}
⚠️ **GitHub.com Fallback** ⚠️