@@ -0,0 +1,262 @@
/***********************************************************************************************************************
* 文件名称:layer.h
* 摘 要:定义网络参数
*
* 当前版本:0.1.0
* 作 者:徐博文
* 日 期:2017-01-12
* 备 注:创建
***********************************************************************************************************************/
#ifndef _LAYER_H_
#define _LAYER_H_

#include "tree.h"

typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
REORG,
BLANK
} LAYER_TYPE;

struct layer{
LAYER_TYPE type;
ACTIVATION activation;
COST_TYPE cost_type;
void(*forward) (struct layer, struct network_state);
void(*backward) (struct layer, struct network_state);
void(*update) (struct layer, int, float, float, float);
void(*forward_gpu) (struct layer, struct network_state);
void(*backward_gpu) (struct layer, struct network_state);
void(*update_gpu) (struct layer, int, float, float, float);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int truths;
int h, w, c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int does_cost;
int joint;
int noadjust;
int reorg;
int log;

int adam;
float B1;
float B2;
float eps;
float *m_gpu;
float *v_gpu;
int t;
float *m;
float *v;

tree *softmax_tree;
int *map;

float alpha;
float beta;
float kappa;

float coord_scale;
float object_scale;
float noobject_scale;
float class_scale;
int bias_match;
int random;
float thresh;
int classfix;
int absolute;

int dontload;
int dontloadscales;

float temperature;
float probability;
float scale;

int *indexes;
float *rand;
float *cost;
char *cweights;
float *state;
float *prev_state;
float *forgot_state;
float *forgot_delta;
float *state_delta;

float *concat;
float *concat_delta;

float *binary_weights;

float *biases;
float *bias_updates;

float *scales;
float *scale_updates;

float *weights;
float *weight_updates;

float *col_image;
int * input_layers;
int * input_sizes;
float * delta;
float * output;
float * squared;
float * norms;

float * spatial_mean;
float * mean;
float * variance;

float * mean_delta;
float * variance_delta;

float * rolling_mean;
float * rolling_variance;

float * x;
float * x_norm;

struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;

struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;

struct layer *input_z_layer;
struct layer *state_z_layer;

struct layer *input_r_layer;
struct layer *state_r_layer;

struct layer *input_h_layer;
struct layer *state_h_layer;

float *z_cpu;
float *r_cpu;
float *h_cpu;

float *binary_input;

size_t workspace_size;

#ifdef GPU
float *z_gpu;
float *r_gpu;
float *h_gpu;

int *indexes_gpu;
float * prev_state_gpu;
float * forgot_state_gpu;
float * forgot_delta_gpu;
float * state_gpu;
float * state_delta_gpu;
float * gate_gpu;
float * gate_delta_gpu;
float * save_gpu;
float * save_delta_gpu;
float * concat_gpu;
float * concat_delta_gpu;

float *binary_input_gpu;
float *binary_weights_gpu;

float * mean_gpu;
float * variance_gpu;

float * rolling_mean_gpu;
float * rolling_variance_gpu;

float * variance_delta_gpu;
float * mean_delta_gpu;

float * col_image_gpu;

float * x_gpu;
float * x_norm_gpu;
float * weights_gpu;
float * weight_updates_gpu;

float * biases_gpu;
float * bias_updates_gpu;

float * scales_gpu;
float * scale_updates_gpu;

float * output_gpu;
float * delta_gpu;
float * rand_gpu;
float * squared_gpu;
float * norms_gpu;
#ifdef CUDNN
cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
cudnnFilterDescriptor_t weightDesc;
cudnnFilterDescriptor_t dweightDesc;
cudnnConvolutionDescriptor_t convDesc;
cudnnConvolutionFwdAlgo_t fw_algo;
cudnnConvolutionBwdDataAlgo_t bd_algo;
cudnnConvolutionBwdFilterAlgo_t bf_algo;
#endif
}
#endif
@@ -0,0 +1,79 @@
/***********************************************************************************************************************
* 文件名称:net_def.h
* 摘 要:规定net中的返回值
*
* 当前版本:0.1.0
* 作 者:徐博文
* 日 期:2017-01-12
* 备 注:创建
***********************************************************************************************************************/
#ifndef _NET_DEF_H_
#define _NET_DEF_H_

/***************************************************************************************************
* 状态码
***************************************************************************************************/
//状态码数据类型
typedef int NET_STATUS; //组件接口函数返回值都定义为该类型

//函数返回状态类型

//算法库可以在库头文件中自定义状态类型,自定义状态类型值 < -1000。
typedef enum _NET_STATUS_CODE
{
//cpu指令集支持错误码
NET_STS_ERR_CPUID = -29, //cpu不支持优化代码中的指令集

//内部模块返回的基本错误类型
NET_STS_ERR_STEP = -28, //数据step不正确(除NET_IMAGE结构体之外)
NET_STS_ERR_DATA_SIZE = -27, //数据大小不正确(一维数据len,二维数据的NET_SIZE)
NET_STS_ERR_BAD_ARG = -26, //参数范围不正确

//算法库加密相关错误码定义
NET_STS_ERR_EXPIRE = -25, //算法库使用期限错误
NET_STS_ERR_ENCRYPT = -24, //加密错误

//以下为组件接口函数使用的错误类型
NET_STS_ERR_CALL_BACK = -23, //回调函数出错
NET_STS_ERR_OVER_MAX_MEM = -22, //超过NET限定最大内存
NET_STS_ERR_NULL_PTR = -21, //函数参数指针为空(共用)

//检查NET_KEY_PARAM、NET_KEY_PARAM_LIST成员变量的错误类型
NET_STS_ERR_PARAM_NUM = -20, //param_num参数不正确
NET_STS_ERR_PARAM_VALUE = -19, //value参数不正确或者超出范围
NET_STS_ERR_PARAM_INDEX = -18, //index参数不正确

//检查cfg_type, cfg_size, prc_type, in_size, out_size, func_type是否正确
NET_STS_ERR_FUNC_SIZE = -17, //子处理时输入、输出参数大小不正确
NET_STS_ERR_FUNC_TYPE = -16, //子处理类型不正确
NET_STS_ERR_PRC_SIZE = -15, //处理时输入、输出参数大小不正确
NET_STS_ERR_PRC_TYPE = -14, //处理类型不正确
NET_STS_ERR_CFG_SIZE = -13, //设置、获取参数输入、输出结构体大小不正确
NET_STS_ERR_CFG_TYPE = -12, //设置、获取参数类型不正确

//检查NET_IMAGE成员变量的错误类型
NET_STS_ERR_IMG_DATA_NULL = -11, //图像数据存储地址为空(某个分量)
NET_STS_ERR_IMG_STEP = -10, //图像宽高与step参数不匹配
NET_STS_ERR_IMG_SIZE = -9, //图像宽高不正确或者超出范围
NET_STS_ERR_IMG_FORMAT = -8, //图像格式不正确或者不支持

//检查NET_MEM_TAB成员变量的错误类型
NET_STS_ERR_MEM_ADDR_ALIGN = -7, //内存地址不满足对齐要求
NET_STS_ERR_MEM_SIZE_ALIGN = -6, //内存空间大小不满足对齐要求
NET_STS_ERR_MEM_LACK = -5, //内存空间大小不够
NET_STS_ERR_MEM_ALIGN = -4, //内存对齐不满足要求
NET_STS_ERR_MEM_NULL = -3, //内存地址为空

//检查ability成员变量的错误类型
NET_STS_ERR_ABILITY_ARG = -2, //ABILITY存在无效参数

//通用类型
NET_STS_ERR = -1, //不确定类型错误(接口函数共用)
NET_STS_OK = 0, //处理正确(接口函数共用)
NET_STS_WARNING = 1 //警告

}NET_STATUS_CODE;


#endif //_NET_DEF_H_

@@ -0,0 +1,31 @@
/***********************************************************************************************************************
* 文件名称:tree.h
* 摘 要:构建tree结构体以及相关方法
*
* 当前版本:0.1.0
* 作 者:徐博文
* 日 期:2017-01-12
* 备 注:创建
***********************************************************************************************************************/

#ifndef TREE_H
#define TREE_H

typedef struct{
int *leaf;
int n;
int *parent;
int *group;
char **name;

int groups;
int *group_size;
int *group_offset;
} tree;

tree *read_tree(char *filename);
void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves);
void change_leaves(tree *t, char *leaf_list);
float get_hierarchy_probability(float *x, tree *hier, int c);

#endif //TREE_H
@@ -0,0 +1,25 @@
/***********************************************************************************************************************
* 文件名称:net_def.h
* 摘 要:规定net中的返回值
*
* 当前版本:0.1.0
* 作 者:徐博文
* 日 期:2017-01-12
* 备 注:创建
***********************************************************************************************************************/
#include "net_def.h"
#include "cnn.h"

/*************************************************************************************************
* 函数名:train_net
*
* 功能:训练网络
* 返回值: 无
*************************************************************************************************/
void train_net(char *cfgfile, char *weightfile)
{
char *train_images;


}

Large diffs are not rendered by default.