Skip to content

Commit

Permalink
Merge branch 'master' into num_classes
Browse files Browse the repository at this point in the history
  • Loading branch information
rcurtin committed Jul 12, 2017
2 parents b62d2e5 + 57badcf commit 82d8aa1
Show file tree
Hide file tree
Showing 114 changed files with 1,446 additions and 1,739 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ install:
- mkdir build && cd build && cmake -DDEBUG=OFF -DPROFILE=OFF .. && make -j2

script:
- travis_wait 30 ./bin/mlpack_test -p
- CTEST_OUTPUT_ON_FAILURE=1 travis_wait 30 ctest -j2

notifications:
email:
Expand Down
3 changes: 2 additions & 1 deletion COPYRIGHT.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ Copyright:
Copyright 2017, Parminder Singh <parmsingh101@gmail.com>
Copyright 2017, CodeAi <benjamin.bales@assrc.us>
Copyright 2017, Franciszek Stokowacki <franek.stokowacki@gmail.com>

Copyright 2017, Samikshya Chand <samikshya289@gmail.com>

License: BSD-3-clause
All rights reserved.
.
Expand Down
1 change: 1 addition & 0 deletions src/mlpack/core.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,7 @@
* - Parminder Singh <parmsingh101@gmail.com>
* - CodeAi (deep learning bug detector) <benjamin.bales@assrc.us>
* - Franciszek Stokowacki <franek.stokowacki@gmail.com>
* - Samikshya Chand <samikshya289@gmail.com>
*/

// First, include all of the prerequisites.
Expand Down
6 changes: 2 additions & 4 deletions src/mlpack/core/data/binarize.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,11 @@ void Binarize(const arma::Mat<T>& input,
{
output.copy_size(input);

const int totalElems = static_cast<int>(input.n_elem);
const T *inPtr = input.memptr();
T *outPtr = output.memptr();

#pragma omp parallel for
for (int i = 0; i < totalElems; ++i)
for (omp_size_t i = 0; i < (omp_size_t) input.n_elem; ++i)
outPtr[i] = inPtr[i] > threshold;
}

Expand Down Expand Up @@ -81,10 +80,9 @@ void Binarize(const arma::Mat<T>& input,
const size_t dimension)
{
output = input;
const int totalCols = static_cast<int>(input.n_cols);

#pragma omp parallel for
for (int i = 0; i < totalCols; ++i)
for (omp_size_t i = 0; i < (omp_size_t) input.n_cols; ++i)
output(dimension, i) = input(dimension, i) > threshold;
}

Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/core/optimizers/ada_delta/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
set(SOURCES
ada_delta.hpp
ada_delta_impl.hpp
ada_delta.cpp
ada_delta_update.hpp
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,24 +11,19 @@
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/
#ifndef MLPACK_CORE_OPTIMIZERS_ADA_DELTA_ADA_DELTA_IMPL_HPP
#define MLPACK_CORE_OPTIMIZERS_ADA_DELTA_ADA_DELTA_IMPL_HPP

#include "ada_delta.hpp"

namespace mlpack {
namespace optimization {

template<typename DecomposableFunctionType>
AdaDelta<DecomposableFunctionType>::AdaDelta(DecomposableFunctionType& function,
const double stepSize,
const double rho,
const double epsilon,
const size_t maxIterations,
const double tolerance,
const bool shuffle) :
optimizer(function,
stepSize,
AdaDelta::AdaDelta(const double stepSize,
const double rho,
const double epsilon,
const size_t maxIterations,
const double tolerance,
const bool shuffle) :
optimizer(stepSize,
maxIterations,
tolerance,
shuffle,
Expand All @@ -37,5 +32,3 @@ AdaDelta<DecomposableFunctionType>::AdaDelta(DecomposableFunctionType& function,

} // namespace optimization
} // namespace mlpack

#endif
36 changes: 4 additions & 32 deletions src/mlpack/core/optimizers/ada_delta/ada_delta.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,7 @@ namespace optimization {
* of points in the dataset, and Evaluate(coordinates, 0) will evaluate the
* objective function on the first point in the dataset (presumably, the dataset
* is held internally in the DecomposableFunctionType).
*
* @tparam DecomposableFunctionType Decomposable objective function type to be
* minimized.
*/
template<typename DecomposableFunctionType>
class AdaDelta
{
public:
Expand All @@ -73,7 +69,6 @@ class AdaDelta
* are processed (i.e., one iteration equals one point; one iteration does not
* equal one pass over the dataset).
*
* @param function Function to be optimized (minimized).
* @param stepSize Step size for each iteration.
* @param rho Smoothing constant.
* @param epsilon Value used to initialise the mean squared gradient
Expand All @@ -84,8 +79,7 @@ class AdaDelta
* @param shuffle If true, the function order is shuffled; otherwise, each
* function is visited in linear order.
*/
AdaDelta(DecomposableFunctionType& function,
const double stepSize = 1.0,
AdaDelta(const double stepSize = 1.0,
const double rho = 0.95,
const double epsilon = 1e-6,
const size_t maxIterations = 100000,
Expand All @@ -97,36 +91,17 @@ class AdaDelta
* be modified to store the finishing point of the algorithm, and the final
* objective value is returned.
*
* @tparam DecomposableFunctionType Type of the function to optimize.
* @param function Function to optimize.
* @param iterate Starting point (will be modified).
* @return Objective value of the final point.
*/
template<typename DecomposableFunctionType>
double Optimize(DecomposableFunctionType& function, arma::mat& iterate)
{
return optimizer.Optimize(function, iterate);
}

/**
* Optimize the given function using AdaDelta. The given starting point will
* be modified to store the finishing point of the algorithm, and the final
* objective value is returned.
*
* @param iterate Starting point (will be modified).
* @return Objective value of the final point.
*/
double Optimize(arma::mat& iterate)
{
return optimizer.Optimize(iterate);
}

//! Get the instantiated function to be optimized.
const DecomposableFunctionType& Function() const
{
return optimizer.Function();
}
//! Modify the instantiated function.
DecomposableFunctionType& Function() { return optimizer.Function(); }

//! Get the step size.
double StepSize() const { return optimizer.StepSize(); }
//! Modify the step size.
Expand Down Expand Up @@ -159,13 +134,10 @@ class AdaDelta

private:
//! The Stochastic Gradient Descent object with AdaDelta policy.
SGD<DecomposableFunctionType, AdaDeltaUpdate> optimizer;
SGD<AdaDeltaUpdate> optimizer;
};

} // namespace optimization
} // namespace mlpack

// Include implementation.
#include "ada_delta_impl.hpp"

#endif
2 changes: 1 addition & 1 deletion src/mlpack/core/optimizers/ada_grad/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
set(SOURCES
ada_grad.hpp
ada_grad_impl.hpp
ada_grad.cpp
ada_grad_update.hpp
)

Expand Down
31 changes: 31 additions & 0 deletions src/mlpack/core/optimizers/ada_grad/ada_grad.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/**
* @file ada_grad_impl.hpp
* @author Abhinav Moudgil
*
* Implementation of AdaGrad optimizer.
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/

#include "ada_grad.hpp"

namespace mlpack {
namespace optimization {

AdaGrad::AdaGrad(const double stepSize,
const double epsilon,
const size_t maxIterations,
const double tolerance,
const bool shuffle) :
optimizer(stepSize,
maxIterations,
tolerance,
shuffle,
AdaGradUpdate(epsilon))
{ /* Nothing to do. */ }

} // namespace optimization
} // namespace mlpack
37 changes: 4 additions & 33 deletions src/mlpack/core/optimizers/ada_grad/ada_grad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,7 @@ namespace optimization {
* of points in the dataset, and Evaluate(coordinates, 0) will evaluate the
* objective function on the first point in the dataset (presumably, the dataset
* is held internally in the DecomposableFunctionType).
*
* @tparam DecomposableFunctionType Decomposable objective function type to be
* minimized.
*/
template<typename DecomposableFunctionType>
class AdaGrad
{
public:
Expand All @@ -72,7 +68,6 @@ class AdaGrad
* are processed (i.e., one iteration equals one point; one iteration does not
* equal one pass over the dataset).
*
* @param function Function to be optimized (minimized).
* @param stepSize Step size for each iteration
* @param epsilon Value used to initialise the squared gradient parameter.
* @param maxIterations Maximum number of iterations allowed (0 means no
Expand All @@ -81,8 +76,7 @@ class AdaGrad
* @param shuffle If true, the function order is shuffled; otherwise, each
* function is visited in linear order.
*/
AdaGrad(DecomposableFunctionType& function,
const double stepSize = 0.01,
AdaGrad(const double stepSize = 0.01,
const double epsilon = 1e-8,
const size_t maxIterations = 100000,
const double tolerance = 1e-5,
Expand All @@ -93,37 +87,17 @@ class AdaGrad
* be modified to store the finishing point of the algorithm, and the final
* objective value is returned.
*
* @tparam DecomposableFunctionType Type of the function to optimize.
* @param function Function to optimize.
* @param iterate Starting point (will be modified).
* @return Objective value of the final point.
*/
template<typename DecomposableFunctionType>
double Optimize(DecomposableFunctionType& function, arma::mat& iterate)
{
return optimizer.Optimize(function, iterate);
}

/**
* Optimize the given function using AdaGrad. The given starting point will
* be modified to store the finishing point of the algorithm, and the final
* objective value is returned.
*
* @param iterate Starting point (will be modified).
* @return Objective value of the final point.
*/
double Optimize(arma::mat& iterate)
{
return optimizer.Optimize(iterate);
}

//! Get the instantiated function to be optimized.
const DecomposableFunctionType& Function() const
{
return optimizer.Function();
}

//! Modify the instantiated function.
DecomposableFunctionType& Function() { return optimizer.Function(); }

//! Get the step size.
double StepSize() const { return optimizer.StepSize(); }
//! Modify the step size.
Expand Down Expand Up @@ -151,13 +125,10 @@ class AdaGrad

private:
//! The Stochastic Gradient Descent object with AdaGrad policy.
SGD<DecomposableFunctionType, AdaGradUpdate> optimizer;
SGD<AdaGradUpdate> optimizer;
};

} // namespace optimization
} // namespace mlpack

// Include implementation.
#include "ada_grad_impl.hpp"

#endif
39 changes: 0 additions & 39 deletions src/mlpack/core/optimizers/ada_grad/ada_grad_impl.hpp

This file was deleted.

Loading

0 comments on commit 82d8aa1

Please sign in to comment.