Skip to content

Commit

Permalink
added method to read and write matrices and tensors, Assert Functions…
Browse files Browse the repository at this point in the history
…, and tools to format the code
  • Loading branch information
giovastabile committed Apr 10, 2019
1 parent 73654a9 commit 2c1a6a2
Show file tree
Hide file tree
Showing 30 changed files with 6,614 additions and 4,886 deletions.
79 changes: 79 additions & 0 deletions code_formatter.sh
@@ -0,0 +1,79 @@
#!/bin/bash

#######################################

required_command="astyle"
code_directory="include/"
tutorial_directory="tutorials/"

#######################################

usage() {
echo
echo -e "\tUsage: $(basename $0) [files]"
echo
echo -e "\tIf files are not specified, $(basename $0) formats all ".C" and ".H" files"
echo -e "\tin source directory; otherwise, it formats all given files."
echo
echo -e "\tRequired command: $required_command"
echo
exit 0
}


[[ $1 == "-h" ]] && usage

# Test for required program
for comm in $required_command; do
command -v $comm >/dev/null 2>&1 || {
echo "I require $comm but it's not installed. Aborting." >&2;
exit 1
}
done

# Set the files to format
[[ $# != 0 ]] && src_files=$@ || src_files="--recursive $code_directory**.h,**.H"
[[ $# != 0 ]] && tutorial_files=$@ || tutorial_files="--recursive $tutorial_directory**.cpp,**.H"

echo $tutorial_files
echo $src_files

# Here the important part: astyle formats the src files.
astyle --style=bsd\
--indent=spaces=4\
--indent-classes\
--indent-switches\
--indent-col1-comments\
--break-blocks\
--pad-oper\
--pad-comma\
--pad-header\
--delete-empty-lines\
--align-pointer=type\
--align-reference=type\
--add-braces\
--convert-tabs\
--close-templates\
--max-code-length=80\
--mode=c\
$src_files

# Here the important part: astyle formats the tutorial files.
astyle --style=bsd\
--indent=spaces=4\
--indent-classes\
--indent-switches\
--indent-col1-comments\
--break-blocks\
--pad-oper\
--pad-comma\
--pad-header\
--delete-empty-lines\
--align-pointer=type\
--align-reference=type\
--add-braces\
--convert-tabs\
--close-templates\
--max-code-length=80\
--mode=c\
$tutorial_files
44 changes: 23 additions & 21 deletions include/Activation/Identity.h
Expand Up @@ -4,7 +4,8 @@
#include <Eigen/Core>
#include "../Config.h"

namespace MiniDNN {
namespace MiniDNN
{


///
Expand All @@ -18,26 +19,27 @@ namespace MiniDNN {
///
class Identity
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = z
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.noalias() = Z;
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = I
// g = J * f = f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A, const Matrix& F, Matrix& G)
{
G.noalias() = F;
}
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = z
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.noalias() = Z;
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = I
// g = J * f = f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A,
const Matrix& F, Matrix& G)
{
G.noalias() = F;
}
};


Expand Down
44 changes: 23 additions & 21 deletions include/Activation/ReLU.h
Expand Up @@ -4,7 +4,8 @@
#include <Eigen/Core>
#include "../Config.h"

namespace MiniDNN {
namespace MiniDNN
{


///
Expand All @@ -14,26 +15,27 @@ namespace MiniDNN {
///
class ReLU
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = max(z, 0)
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = Z.array().cwiseMax(Scalar(0));
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(sign(a)) = diag(a > 0)
// g = J * f = (a > 0) .* f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A, const Matrix& F, Matrix& G)
{
G.array() = (A.array() > Scalar(0)).select(F, Scalar(0));
}
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = max(z, 0)
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = Z.array().cwiseMax(Scalar(0));
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(sign(a)) = diag(a > 0)
// g = J * f = (a > 0) .* f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A,
const Matrix& F, Matrix& G)
{
G.array() = (A.array() > Scalar(0)).select(F, Scalar(0));
}
};


Expand Down
44 changes: 23 additions & 21 deletions include/Activation/Sigmoid.h
Expand Up @@ -4,7 +4,8 @@
#include <Eigen/Core>
#include "../Config.h"

namespace MiniDNN {
namespace MiniDNN
{


///
Expand All @@ -14,26 +15,27 @@ namespace MiniDNN {
///
class Sigmoid
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = 1 / (1 + exp(-z))
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = Scalar(1) / (Scalar(1) + (-Z.array()).exp());
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(a .* (1 - a))
// g = J * f = a .* (1 - a) .* f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A, const Matrix& F, Matrix& G)
{
G.array() = A.array() * (Scalar(1) - A.array()) * F.array();
}
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;

public:
// a = activation(z) = 1 / (1 + exp(-z))
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = Scalar(1) / (Scalar(1) + (-Z.array()).exp());
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(a .* (1 - a))
// g = J * f = a .* (1 - a) .* f
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A,
const Matrix& F, Matrix& G)
{
G.array() = A.array() * (Scalar(1) - A.array()) * F.array();
}
};


Expand Down
52 changes: 27 additions & 25 deletions include/Activation/Softmax.h
@@ -1,7 +1,8 @@
#ifndef ACTIVATION_SOFTMAX_H_
#define ACTIVATION_SOFTMAX_H_

namespace MiniDNN {
namespace MiniDNN
{


///
Expand All @@ -11,30 +12,31 @@ namespace MiniDNN {
///
class Softmax
{
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Array<Scalar, 1, Eigen::Dynamic> RowArray;

public:
// a = activation(z) = softmax(z)
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = (Z.rowwise() - Z.colwise().maxCoeff()).array().exp();
RowArray colsums = A.colwise().sum();
A.array().rowwise() /= colsums;
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(a) - a * a'
// g = J * f = a .* f - a * (a' * f) = a .* (f - a'f)
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A, const Matrix& F, Matrix& G)
{
RowArray a_dot_f = A.cwiseProduct(F).colwise().sum();
G.array() = A.array() * (F.array().rowwise() - a_dot_f);
}
private:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Array<Scalar, 1, Eigen::Dynamic> RowArray;

public:
// a = activation(z) = softmax(z)
// Z = [z1, ..., zn], A = [a1, ..., an], n observations
static inline void activate(const Matrix& Z, Matrix& A)
{
A.array() = (Z.rowwise() - Z.colwise().maxCoeff()).array().exp();
RowArray colsums = A.colwise().sum();
A.array().rowwise() /= colsums;
}

// Apply the Jacobian matrix J to a vector f
// J = d_a / d_z = diag(a) - a * a'
// g = J * f = a .* f - a * (a' * f) = a .* (f - a'f)
// Z = [z1, ..., zn], G = [g1, ..., gn], F = [f1, ..., fn]
// Note: When entering this function, Z and G may point to the same matrix
static inline void apply_jacobian(const Matrix& Z, const Matrix& A,
const Matrix& F, Matrix& G)
{
RowArray a_dot_f = A.cwiseProduct(F).colwise().sum();
G.array() = A.array() * (F.array().rowwise() - a_dot_f);
}
};


Expand Down
55 changes: 30 additions & 25 deletions include/Callback.h
Expand Up @@ -4,7 +4,8 @@
#include <Eigen/Core>
#include "Config.h"

namespace MiniDNN {
namespace MiniDNN
{


class Network;
Expand All @@ -27,30 +28,34 @@ class Network;
///
class Callback
{
protected:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::RowVectorXi IntegerVector;

public:
// Public members that will be set by the network during the training process
int m_nbatch; // Number of total batches
int m_batch_id; // The index for the current mini-batch (0, 1, ..., m_nbatch-1)
int m_nepoch; // Total number of epochs (one run on the whole data set) in the training process
int m_epoch_id; // The index for the current epoch (0, 1, ..., m_nepoch-1)

Callback() :
m_nbatch(0), m_batch_id(0), m_nepoch(0), m_epoch_id(0)
{}

virtual ~Callback() {}

// Before training a mini-batch
virtual void pre_training_batch(const Network* net, const Matrix& x, const Matrix& y) {}
virtual void pre_training_batch(const Network* net, const Matrix& x, const IntegerVector& y) {}

// After a mini-batch is trained
virtual void post_training_batch(const Network* net, const Matrix& x, const Matrix& y) {}
virtual void post_training_batch(const Network* net, const Matrix& x, const IntegerVector& y) {}
protected:
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::RowVectorXi IntegerVector;

public:
// Public members that will be set by the network during the training process
int m_nbatch; // Number of total batches
int m_batch_id; // The index for the current mini-batch (0, 1, ..., m_nbatch-1)
int m_nepoch; // Total number of epochs (one run on the whole data set) in the training process
int m_epoch_id; // The index for the current epoch (0, 1, ..., m_nepoch-1)

Callback() :
m_nbatch(0), m_batch_id(0), m_nepoch(0), m_epoch_id(0)
{}

virtual ~Callback() {}

// Before training a mini-batch
virtual void pre_training_batch(const Network* net, const Matrix& x,
const Matrix& y) {}
virtual void pre_training_batch(const Network* net, const Matrix& x,
const IntegerVector& y) {}

// After a mini-batch is trained
virtual void post_training_batch(const Network* net, const Matrix& x,
const Matrix& y) {}
virtual void post_training_batch(const Network* net, const Matrix& x,
const IntegerVector& y) {}
};


Expand Down

0 comments on commit 2c1a6a2

Please sign in to comment.