diff --git a/HISTORY.md b/HISTORY.md
index ab7a041408a..b117ee68958 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,5 +1,12 @@
### mlpack 2.0.2
###### 2016-??-??
+ * Added the function LSHSearch::Projections(), which returns an arma::cube
+ with each projection table in a slice (#663). Instead of Projection(i), you
+ should now use Projections().slice(i).
+
+ * A new constructor has been added to LSHSearch that creates objects using
+ projection tables provided in an arma::cube (#663).
+
* Handle zero-variance dimensions in DET (#515).
* Add MiniBatchSGD optimizer (src/mlpack/core/optimizers/minibatch_sgd/) and
@@ -22,6 +29,9 @@
mlpack_allknn and mlpack_allkfn programs will remain as copies until mlpack
3.0.0.
+ * Add --random_initialization option to mlpack_hmm_train, for use when no
+ labels are provided.
+
### mlpack 2.0.1
###### 2016-02-04
* Fix CMake to properly detect when MKL is being used with Armadillo.
diff --git a/doc/guide/formats.hpp b/doc/guide/formats.hpp
index 4b2d7378c2b..24e9a1b43ae 100644
--- a/doc/guide/formats.hpp
+++ b/doc/guide/formats.hpp
@@ -16,9 +16,10 @@ suitable numeric representation. Therefore, in general, datasets on disk should
contain only numeric features in order to be loaded successfully by mlpack.
The types of datasets that mlpack can load are roughly the same as the types of
-matrices that Armadillo can load. When datasets are loaded by mlpack, \b the
-\b "file's type is detected using the file's extension". mlpack supports the
-following file types:
+matrices that Armadillo can load. However, the load functionality that mlpack
+provides only supports loading dense datasets. When datasets are loaded
+by mlpack, the file's type is detected using the file's extension.
+mlpack supports the following file types:
- csv (comma-separated values), denoted by .csv or .txt
- tsv (tab-separated values), denoted by .tsv, .csv, or .txt
@@ -101,6 +102,42 @@ As with the command-line programs, the type of data to be loaded is
automatically detected from the filename extension. For more details, see the
mlpack::data::Load() and mlpack::data::Save() documentation.
+@section sparseload Dealing with sparse matrices
+
+As mentioned earlier, support for loading sparse matrices in mlpack is not
+available at this time. To use a sparse matrix with mlpack code, you will have
+to write a C++ program instead of using any of the command-line tools, because
+the command-line tools all use dense datasets internally. (There is one
+exception: the \c mlpack_cf program, for collaborative filtering, loads sparse
+coordinate lists.)
+
+In addition, the \c mlpack::data::Load() function does not support loading any
+sparse format; so the best idea is to use undocumented Armadillo functionality
+to load coordinate lists. Suppose you have a coordinate list file like the one
+below:
+
+\code
+$ cat cl.csv
+0 0 0.332
+1 3 3.126
+4 4 1.333
+\endcode
+
+This represents a 5x5 matrix with three nonzero elements. We can load this
+using Armadillo:
+
+\code
+arma::sp_mat matrix;
+matrix.load("cl.csv", arma::coord_ascii);
+matrix = matrix.t(); // We must transpose after load!
+\endcode
+
+The transposition after loading is necessary if the coordinate list is in
+row-major format (that is, if each row in the matrix represents a point and each
+column represents a feature). Be sure that the matrix you use with mlpack
+methods has points as columns and features as rows! See \ref matrices for more
+information.
+
@section formatcat Categorical features and command line programs
In some situations it is useful to represent data not just as a numeric matrix
diff --git a/doc/guide/iodoc.hpp b/doc/guide/iodoc.hpp
index 33eda5eccf2..8cdf417b7ec 100644
--- a/doc/guide/iodoc.hpp
+++ b/doc/guide/iodoc.hpp
@@ -132,7 +132,7 @@ Documentation is automatically generated using those macros, and when the
program is run with --help the following is displayed:
@code
-$ pca --help
+$ mlpack_pca --help
Principal Components Analysis
This program performs principal components analysis on the given dataset. It
diff --git a/doc/tutorials/range_search/range_search.txt b/doc/tutorials/range_search/range_search.txt
index a1a3a979e64..277a8444682 100644
--- a/doc/tutorials/range_search/range_search.txt
+++ b/doc/tutorials/range_search/range_search.txt
@@ -67,7 +67,7 @@ option is used so that output is given. Further documentation on each
individual option can be found by typing
@code
-$ range_search --help
+$ mlpack_range_search --help
@endcode
@subsection cli_ex1_rstut One dataset, points with distance <= 0.01
@@ -147,7 +147,7 @@ empty.
@subsection cli_ex2_rstut Query and reference dataset, range [1.0, 1.5]
@code
-$ range_search -q query_dataset.csv -r reference_dataset.csv -n \
+$ mlpack_range_search -q query_dataset.csv -r reference_dataset.csv -n \
> neighbors_out.csv -d distances_out.csv -L 1.0 -U 1.5 -v
[INFO ] Loading 'reference_dataset.csv' as CSV data. Size is 3 x 1000.
[INFO ] Loaded reference data from 'reference_dataset.csv' (3x1000).
@@ -196,8 +196,8 @@ faster computation. The leaf size is modifiable through the command-line
interface, as shown below.
@code
-$ range_search -r dataset.csv -n neighbors_out.csv -d distances_out.csv -L 0.7 \
-> -U 0.8 -l 15 -v
+$ mlpack_range_search -r dataset.csv -n neighbors_out.csv -d distances_out.csv \
+> -L 0.7 -U 0.8 -l 15 -v
[INFO ] Loading 'dataset.csv' as CSV data. Size is 3 x 1000.
[INFO ] Loaded reference data from 'dataset.csv' (3x1000).
[INFO ] Building reference tree...
diff --git a/src/mlpack/core/data/CMakeLists.txt b/src/mlpack/core/data/CMakeLists.txt
index ee4f8a8634e..ea87d0f13ab 100644
--- a/src/mlpack/core/data/CMakeLists.txt
+++ b/src/mlpack/core/data/CMakeLists.txt
@@ -14,6 +14,7 @@ set(SOURCES
save.hpp
save_impl.hpp
serialization_shim.hpp
+ split_data.hpp
)
# add directory name to sources
diff --git a/src/mlpack/core/data/load_impl.hpp b/src/mlpack/core/data/load_impl.hpp
index 947b3600e97..5479bab17d5 100644
--- a/src/mlpack/core/data/load_impl.hpp
+++ b/src/mlpack/core/data/load_impl.hpp
@@ -29,6 +29,75 @@
namespace mlpack {
namespace data {
+namespace details{
+
+template
+std::vector ToTokens(Tokenizer &lineTok)
+{
+ std::vector tokens;
+ std::transform(std::begin(lineTok), std::end(lineTok),
+ std::back_inserter(tokens),
+ [&tokens](std::string const &str)
+ {
+ std::string trimmedToken(str);
+ boost::trim(trimmedToken);
+ return std::move(trimmedToken);
+ });
+
+ return tokens;
+}
+
+inline
+void TransPoseTokens(std::vector> const &input,
+ std::vector &output,
+ size_t index)
+{
+ output.clear();
+ for(size_t i = 0; i != input.size(); ++i)
+ {
+ output.emplace_back(input[i][index]);
+ }
+}
+
+template
+void MapToNumerical(const std::vector &tokens,
+ size_t &row,
+ DatasetInfo &info,
+ arma::Mat &matrix)
+{
+ auto notNumber = [](const std::string &str)
+ {
+ eT val(0);
+ std::stringstream token;
+ token.str(str);
+ token>>val;
+ return token.fail();
+ };
+
+ const bool notNumeric = std::any_of(std::begin(tokens),
+ std::end(tokens), notNumber);
+ if(notNumeric)
+ {
+ for(size_t i = 0; i != tokens.size(); ++i)
+ {
+ const eT val = static_cast(info.MapString(tokens[i], row));
+ matrix.at(row, i) = val;
+ }
+ }
+ else
+ {
+ std::stringstream token;
+ for(size_t i = 0; i != tokens.size(); ++i)
+ {
+ token.str(tokens[i]);
+ token>>matrix.at(row, i);
+ token.clear();
+ }
+ }
+}
+
+}
+
template
bool inline inplace_transpose(arma::Mat& X)
{
@@ -37,7 +106,7 @@ bool inline inplace_transpose(arma::Mat& X)
X = arma::trans(X);
return false;
}
- catch (std::bad_alloc& exception)
+ catch (std::bad_alloc&)
{
#if (ARMA_VERSION_MAJOR >= 4) || \
((ARMA_VERSION_MAJOR == 3) && (ARMA_VERSION_MINOR >= 930))
@@ -386,85 +455,42 @@ bool Load(const std::string& filename,
}
stream.close();
- stream.open(filename, std::fstream::in);
+ stream.open(filename, std::fstream::in);
- // Extract line by line.
- std::stringstream token;
- size_t row = 0;
- while (!stream.bad() && !stream.fail() && !stream.eof())
+ if(transpose)
{
- std::getline(stream, buffer, '\n');
-
- // Look at each token. Unfortunately we have to do this character by
- // character, because things may be escaped in quotes.
- Tokenizer lineTok(buffer, sep);
- size_t col = 0;
- for (Tokenizer::iterator it = lineTok.begin(); it != lineTok.end(); ++it)
+ std::vector> tokensArray;
+ std::vector tokens;
+ while (!stream.bad() && !stream.fail() && !stream.eof())
{
- // Attempt to extract as type eT. If that fails, we'll assume it's a
- // string and map it (which may involve retroactively mapping everything
- // we've seen so far).
- token.clear();
- token.str(*it);
-
- eT val = eT(0);
- token >> val;
-
- if (token.fail())
+ // Extract line by line.
+ std::getline(stream, buffer, '\n');
+ Tokenizer lineTok(buffer, sep);
+ tokens = details::ToTokens(lineTok);
+ if(tokens.size() == cols)
{
- // Conversion failed; but it may be a NaN or inf. Armadillo has
- // convenient functions to check.
- if (!arma::diskio::convert_naninf(val, token.str()))
- {
- // We need to perform a mapping.
- const size_t dim = (transpose) ? col : row;
- if (info.Type(dim) == Datatype::numeric)
- {
- // We must map everything we have seen up to this point and change
- // the values in the matrix.
- if (transpose)
- {
- // Whatever we've seen so far has successfully mapped to an eT.
- // So we need to print it back to a string. We'll use
- // Armadillo's functionality for that.
- for (size_t i = 0; i < row; ++i)
- {
- std::stringstream sstr;
- arma::arma_ostream::print_elem(sstr, matrix.at(i, col),
- false);
- eT newVal = info.MapString(sstr.str(), col);
- matrix.at(i, col) = newVal;
- }
- }
- else
- {
- for (size_t i = 0; i < col; ++i)
- {
- std::stringstream sstr;
- arma::arma_ostream::print_elem(sstr, matrix.at(row, i),
- false);
- eT newVal = info.MapString(sstr.str(), row);
- matrix.at(row, i) = newVal;
- }
- }
- }
-
- // Strip whitespace from either side of the string.
- std::string trimmedToken(token.str());
- boost::trim(trimmedToken);
- val = info.MapString(trimmedToken, dim);
- }
+ tokensArray.emplace_back(std::move(tokens));
}
-
- if (transpose)
- matrix(col, row) = val;
- else
- matrix(row, col) = val;
-
- ++col;
}
-
- ++row;
+ for(size_t i = 0; i != cols; ++i)
+ {
+ details::TransPoseTokens(tokensArray, tokens, i);
+ details::MapToNumerical(tokens, i,
+ info, matrix);
+ }
+ }
+ else
+ {
+ size_t row = 0;
+ while (!stream.bad() && !stream.fail() && !stream.eof())
+ {
+ // Extract line by line.
+ std::getline(stream, buffer, '\n');
+ Tokenizer lineTok(buffer, sep);
+ details::MapToNumerical(details::ToTokens(lineTok), row,
+ info, matrix);
+ ++row;
+ }
}
}
else if (extension == "arff")
diff --git a/src/mlpack/core/data/serialization_template_version.hpp b/src/mlpack/core/data/serialization_template_version.hpp
new file mode 100644
index 00000000000..6b617a8e1d1
--- /dev/null
+++ b/src/mlpack/core/data/serialization_template_version.hpp
@@ -0,0 +1,37 @@
+/**
+ * @file serialization_template_version.hpp
+ * @author Ryan Curtin
+ *
+ * A better version of the BOOST_CLASS_VERSION() macro that supports templated
+ * classes.
+ */
+#ifndef MLPACK_CORE_DATA_SERIALIZATION_TEMPLATE_VERSION_HPP
+#define MLPACK_CORE_DATA_SERIALIZATION_TEMPLATE_VERSION_HPP
+
+/**
+ * Use this like BOOST_CLASS_VERSION(), but for templated classes. The first
+ * argument is the signature for the template. Here is an example for
+ * math::Range:
+ *
+ * BOOST_TEMPLATE_CLASS_VERSION(template, math::Range, 1);
+ */
+#define BOOST_TEMPLATE_CLASS_VERSION(SIGNATURE, T, N) \
+namespace boost { \
+namespace serialization { \
+SIGNATURE \
+struct version> \
+{ \
+ typedef mpl::int_ type; \
+ typedef mpl::integral_c_tag tag; \
+ BOOST_STATIC_CONSTANT(int, value = version::type::value); \
+ BOOST_MPL_ASSERT(( \
+ boost::mpl::less< \
+ boost::mpl::int_, \
+ boost::mpl::int_<256> \
+ > \
+ )); \
+}; \
+} \
+}
+
+#endif
diff --git a/src/mlpack/core/data/split_data.hpp b/src/mlpack/core/data/split_data.hpp
index 38196fdbe00..d02f6c663e4 100644
--- a/src/mlpack/core/data/split_data.hpp
+++ b/src/mlpack/core/data/split_data.hpp
@@ -1,18 +1,17 @@
/**
* @file split_data.hpp
- * @author Tham Ngap Wei
+ * @author Tham Ngap Wei, Keon Kim
*
- * Defines TrainTestSplit(), a utility function to split a dataset into a
+ * Defines Split(), a utility function to split a dataset into a
* training set and a test set.
*/
-#ifndef MLPACK_CORE_UTIL_SPLIT_DATA_HPP
-#define MLPACK_CORE_UTIL_SPLIT_DATA_HPP
+#ifndef MLPACK_CORE_DATA_SPLIT_DATA_HPP
+#define MLPACK_CORE_DATA_SPLIT_DATA_HPP
#include
namespace mlpack {
namespace data {
-
/**
* Given an input dataset and labels, split into a training set and test set.
* Example usage below. This overload places the split dataset into the four
@@ -29,7 +28,7 @@ namespace data {
*
* // Split the dataset into a training and test set, with 30% of the data being
* // held out for the test set.
- * TrainTestSplit(input, label, trainData,
+ * Split(input, label, trainData,
* testData, trainLabel, testLabel, 0.3);
* @endcode
*
@@ -42,13 +41,13 @@ namespace data {
* @param testRatio Percentage of dataset to use for test set (between 0 and 1).
*/
template
-void TrainTestSplit(const arma::Mat& input,
- const arma::Row& inputLabel,
- arma::Mat& trainData,
- arma::Mat& testData,
- arma::Row& trainLabel,
- arma::Row& testLabel,
- const double testRatio)
+void Split(const arma::Mat& input,
+ const arma::Row& inputLabel,
+ arma::Mat& trainData,
+ arma::Mat& testData,
+ arma::Row& trainLabel,
+ arma::Row& testLabel,
+ const double testRatio)
{
const size_t testSize = static_cast(input.n_cols * testRatio);
const size_t trainSize = input.n_cols - testSize;
@@ -74,6 +73,52 @@ void TrainTestSplit(const arma::Mat& input,
}
}
+/**
+ * Given an input dataset, split into a training set and test set.
+ * Example usage below. This overload places the split dataset into the two
+ * output parameters given (trainData, testData).
+ *
+ * @code
+ * arma::mat input = loadData();
+ * arma::mat trainData;
+ * arma::mat testData;
+ * math::RandomSeed(100); // Set the seed if you like.
+ *
+ * // Split the dataset into a training and test set, with 30% of the data being
+ * // held out for the test set.
+ * Split(input, trainData, testData, 0.3);
+ * @endcode
+ *
+ * @param input Input dataset to split.
+ * @param trainData Matrix to store training data into.
+ * @param testData Matrix to store test data into.
+ * @param testRatio Percentage of dataset to use for test set (between 0 and 1).
+ */
+template
+void Split(const arma::Mat& input,
+ arma::Mat& trainData,
+ arma::Mat& testData,
+ const double testRatio)
+{
+ const size_t testSize = static_cast(input.n_cols * testRatio);
+ const size_t trainSize = input.n_cols - testSize;
+ trainData.set_size(input.n_rows, trainSize);
+ testData.set_size(input.n_rows, testSize);
+
+ const arma::Col order =
+ arma::shuffle(arma::linspace>(0, input.n_cols -1,
+ input.n_cols));
+
+ for (size_t i = 0; i != trainSize; ++i)
+ {
+ trainData.col(i) = input.col(order[i]);
+ }
+ for (size_t i = 0; i != testSize; ++i)
+ {
+ testData.col(i) = input.col(order[i + trainSize]);
+ }
+}
+
/**
* Given an input dataset and labels, split into a training set and test set.
* Example usage below. This overload returns the split dataset as a std::tuple
@@ -84,34 +129,62 @@ void TrainTestSplit(const arma::Mat& input,
* @code
* arma::mat input = loadData();
* arma::Row label = loadLabel();
- * auto splitResult = TrainTestSplit(input, label, 0.2);
+ * auto splitResult = Split(input, label, 0.2);
* @endcode
*
* @param input Input dataset to split.
* @param label Input labels to split.
- * @param trainData Matrix to store training data into.
- * @param testData Matrix to store test data into.
- * @param trainLabel Vector to store training labels into.
- * @param testLabel Vector to store test labels into.
* @param testRatio Percentage of dataset to use for test set (between 0 and 1).
* @return std::tuple containing trainData (arma::Mat), testData
* (arma::Mat), trainLabel (arma::Row), and testLabel (arma::Row).
*/
template
std::tuple, arma::Mat, arma::Row, arma::Row>
-TrainTestSplit(const arma::Mat& input,
- const arma::Row& inputLabel,
- const double testRatio)
+Split(const arma::Mat& input,
+ const arma::Row& inputLabel,
+ const double testRatio)
{
arma::Mat trainData;
arma::Mat testData;
arma::Row trainLabel;
arma::Row testLabel;
- TrainTestSplit(input, inputLabel, trainData, testData, trainLabel, testLabel,
+ Split(input, inputLabel, trainData, testData, trainLabel, testLabel,
testRatio);
- return std::make_tuple(trainData, testData, trainLabel, testLabel);
+ return std::make_tuple(std::move(trainData),
+ std::move(testData),
+ std::move(trainLabel),
+ std::move(testLabel));
+}
+
+/**
+ * Given an input dataset, split into a training set and test set.
+ * Example usage below. This overload returns the split dataset as a std::tuple
+ * with two elements: an arma::Mat containing the training data and an
+ * arma::Mat containing the test data.
+ *
+ * @code
+ * arma::mat input = loadData();
+ * auto splitResult = Split(input, 0.2);
+ * @endcode
+ *
+ * @param input Input dataset to split.
+ * @param testRatio Percentage of dataset to use for test set (between 0 and 1).
+ * @return std::tuple containing trainData (arma::Mat)
+ * and testData (arma::Mat).
+ */
+template
+std::tuple, arma::Mat>
+Split(const arma::Mat& input,
+ const double testRatio)
+{
+ arma::Mat trainData;
+ arma::Mat testData;
+ Split(input, trainData, testData, testRatio);
+
+ return std::make_tuple(std::move(trainData),
+ std::move(testData));
}
} // namespace data
diff --git a/src/mlpack/core/dists/gaussian_distribution.cpp b/src/mlpack/core/dists/gaussian_distribution.cpp
index cd9559fd58f..b8d1b042b63 100644
--- a/src/mlpack/core/dists/gaussian_distribution.cpp
+++ b/src/mlpack/core/dists/gaussian_distribution.cpp
@@ -6,6 +6,7 @@
* Implementation of Gaussian distribution class.
*/
#include "gaussian_distribution.hpp"
+#include
using namespace mlpack;
using namespace mlpack::distribution;
@@ -116,18 +117,7 @@ void GaussianDistribution::Train(const arma::mat& observations)
covariance /= (observations.n_cols - 1);
// Ensure that the covariance is positive definite.
- if (det(covariance) <= 1e-50)
- {
- Log::Debug << "GaussianDistribution::Train(): Covariance matrix is not "
- << "positive definite. Adding perturbation." << std::endl;
-
- double perturbation = 1e-30;
- while (det(covariance) <= 1e-50)
- {
- covariance.diag() += perturbation;
- perturbation *= 10; // Slow, but we don't want to add too much.
- }
- }
+ gmm::PositiveDefiniteConstraint::ApplyConstraint(covariance);
FactorCovariance();
}
@@ -173,7 +163,8 @@ void GaussianDistribution::Train(const arma::mat& observations,
}
// Normalize.
- mean /= sumProb;
+ if (sumProb > 0)
+ mean /= sumProb;
// Now find the covariance.
for (size_t i = 0; i < observations.n_cols; i++)
@@ -183,21 +174,11 @@ void GaussianDistribution::Train(const arma::mat& observations,
}
// This is probably biased, but I don't know how to unbias it.
- covariance /= sumProb;
+ if (sumProb > 0)
+ covariance /= sumProb;
// Ensure that the covariance is positive definite.
- if (det(covariance) <= 1e-50)
- {
- Log::Debug << "GaussianDistribution::Train(): Covariance matrix is not "
- << "positive definite. Adding perturbation." << std::endl;
-
- double perturbation = 1e-30;
- while (det(covariance) <= 1e-50)
- {
- covariance.diag() += perturbation;
- perturbation *= 10; // Slow, but we don't want to add too much.
- }
- }
+ gmm::PositiveDefiniteConstraint::ApplyConstraint(covariance);
FactorCovariance();
}
diff --git a/src/mlpack/core/tree/ballbound.hpp b/src/mlpack/core/tree/ballbound.hpp
index 68a16d759dd..14f289a14ee 100644
--- a/src/mlpack/core/tree/ballbound.hpp
+++ b/src/mlpack/core/tree/ballbound.hpp
@@ -16,14 +16,14 @@ namespace bound {
/**
* Ball bound encloses a set of points at a specific distance (radius) from a
- * specific point (center). TMetricType is the custom metric type that defaults
+ * specific point (center). MetricType is the custom metric type that defaults
* to the Euclidean (L2) distance.
*
+ * @tparam MetricType metric type used in the distance measure.
* @tparam VecType Type of vector (arma::vec or arma::sp_vec or similar).
- * @tparam TMetricType metric type used in the distance measure.
*/
-template>
+template,
+ typename VecType = arma::vec>
class BallBound
{
public:
@@ -31,8 +31,6 @@ class BallBound
typedef typename VecType::elem_type ElemType;
//! A public version of the vector type.
typedef VecType Vec;
- //! Needed for BinarySpaceTree.
- typedef TMetricType MetricType;
private:
//! The radius of the ball bound.
@@ -40,7 +38,7 @@ class BallBound
//! The center of the ball bound.
VecType center;
//! The metric used in this bound.
- TMetricType* metric;
+ MetricType* metric;
/**
* To know whether this object allocated memory to the metric member
@@ -179,9 +177,9 @@ class BallBound
ElemType Diameter() const { return 2 * radius; }
//! Returns the distance metric used in this bound.
- const TMetricType& Metric() const { return *metric; }
+ const MetricType& Metric() const { return *metric; }
//! Modify the distance metric used in this bound.
- TMetricType& Metric() { return *metric; }
+ MetricType& Metric() { return *metric; }
//! Serialize the bound.
template
@@ -189,8 +187,8 @@ class BallBound
};
//! A specialization of BoundTraits for this bound type.
-template
-struct BoundTraits>
+template
+struct BoundTraits>
{
//! These bounds are potentially loose in some dimensions.
const static bool HasTightBounds = false;
diff --git a/src/mlpack/core/tree/ballbound_impl.hpp b/src/mlpack/core/tree/ballbound_impl.hpp
index 8e0e658fab5..885acb5a8e0 100644
--- a/src/mlpack/core/tree/ballbound_impl.hpp
+++ b/src/mlpack/core/tree/ballbound_impl.hpp
@@ -18,10 +18,10 @@ namespace mlpack {
namespace bound {
//! Empty Constructor.
-template
-BallBound::BallBound() :
+template
+BallBound::BallBound() :
radius(std::numeric_limits::lowest()),
- metric(new TMetricType()),
+ metric(new MetricType()),
ownsMetric(true)
{ /* Nothing to do. */ }
@@ -30,11 +30,11 @@ BallBound::BallBound() :
*
* @param dimension Dimensionality of ball bound.
*/
-template
-BallBound::BallBound(const size_t dimension) :
+template
+BallBound::BallBound(const size_t dimension) :
radius(std::numeric_limits::lowest()),
center(dimension),
- metric(new TMetricType()),
+ metric(new MetricType()),
ownsMetric(true)
{ /* Nothing to do. */ }
@@ -44,18 +44,18 @@ BallBound::BallBound(const size_t dimension) :
* @param radius Radius of ball bound.
* @param center Center of ball bound.
*/
-template
-BallBound::BallBound(const ElemType radius,
+template
+BallBound::BallBound(const ElemType radius,
const VecType& center) :
radius(radius),
center(center),
- metric(new TMetricType()),
+ metric(new MetricType()),
ownsMetric(true)
{ /* Nothing to do. */ }
//! Copy Constructor. To prevent memory leaks.
-template
-BallBound::BallBound(const BallBound& other) :
+template
+BallBound::BallBound(const BallBound& other) :
radius(other.radius),
center(other.center),
metric(other.metric),
@@ -63,8 +63,8 @@ BallBound::BallBound(const BallBound& other) :
{ /* Nothing to do. */ }
//! For the same reason as the copy constructor: to prevent memory leaks.
-template
-BallBound& BallBound::operator=(
+template
+BallBound& BallBound::operator=(
const BallBound& other)
{
radius = other.radius;
@@ -74,8 +74,8 @@ BallBound& BallBound::operator=(
}
//! Move constructor.
-template
-BallBound::BallBound(BallBound&& other) :
+template
+BallBound::BallBound(BallBound&& other) :
radius(other.radius),
center(other.center),
metric(other.metric),
@@ -89,17 +89,17 @@ BallBound::BallBound(BallBound&& other) :
}
//! Destructor to release allocated memory.
-template
-BallBound::~BallBound()
+template
+BallBound::~BallBound()
{
if (ownsMetric)
delete metric;
}
//! Get the range in a certain dimension.
-template
-math::RangeType::ElemType>
-BallBound::operator[](const size_t i) const
+template
+math::RangeType::ElemType>
+BallBound::operator[](const size_t i) const
{
if (radius < 0)
return math::Range();
@@ -110,8 +110,8 @@ BallBound::operator[](const size_t i) const
/**
* Determines if a point is within the bound.
*/
-template
-bool BallBound::Contains(const VecType& point) const
+template
+bool BallBound::Contains(const VecType& point) const
{
if (radius < 0)
return false;
@@ -122,10 +122,10 @@ bool BallBound::Contains(const VecType& point) const
/**
* Calculates minimum bound-to-point squared distance.
*/
-template
+template
template
-typename BallBound::ElemType
-BallBound::MinDistance(
+typename BallBound::ElemType
+BallBound::MinDistance(
const OtherVecType& point,
typename boost::enable_if>* /* junk */) const
{
@@ -138,9 +138,9 @@ BallBound::MinDistance(
/**
* Calculates minimum bound-to-bound squared distance.
*/
-template
-typename BallBound::ElemType
-BallBound::MinDistance(const BallBound& other)
+template
+typename BallBound::ElemType
+BallBound::MinDistance(const BallBound& other)
const
{
if (radius < 0)
@@ -156,10 +156,10 @@ BallBound::MinDistance(const BallBound& other)
/**
* Computes maximum distance.
*/
-template
+template
template
-typename BallBound::ElemType
-BallBound::MaxDistance(
+typename BallBound::ElemType
+BallBound::MaxDistance(
const OtherVecType& point,
typename boost::enable_if >* /* junk */) const
{
@@ -172,9 +172,9 @@ BallBound::MaxDistance(
/**
* Computes maximum distance.
*/
-template
-typename BallBound::ElemType
-BallBound::MaxDistance(const BallBound& other)
+template
+typename BallBound::ElemType
+BallBound::MaxDistance(const BallBound& other)
const
{
if (radius < 0)
@@ -188,10 +188,10 @@ BallBound::MaxDistance(const BallBound& other)
*
* Example: bound1.MinDistanceSq(other) for minimum squared distance.
*/
-template
+template
template
-math::RangeType::ElemType>
-BallBound::RangeDistance(
+math::RangeType::ElemType>
+BallBound::RangeDistance(
const OtherVecType& point,
typename boost::enable_if >* /* junk */) const
{
@@ -206,9 +206,9 @@ BallBound::RangeDistance(
}
}
-template
-math::RangeType::ElemType>
-BallBound::RangeDistance(
+template
+math::RangeType::ElemType>
+BallBound::RangeDistance(
const BallBound& other) const
{
if (radius < 0)
@@ -226,9 +226,9 @@ BallBound::RangeDistance(
/**
* Expand the bound to include the given bound.
*
-template
+template
const BallBound&
-BallBound::operator|=(
+BallBound::operator|=(
const BallBound& other)
{
double dist = metric->Evaluate(center, other);
@@ -246,10 +246,10 @@ BallBound::operator|=(
* The difference lies in the way we initialize the ball bound. The way we
* expand the bound is same.
*/
-template
+template
template
-const BallBound&
-BallBound::operator|=(const MatType& data)
+const BallBound&
+BallBound::operator|=(const MatType& data)
{
if (radius < 0)
{
@@ -277,9 +277,9 @@ BallBound::operator|=(const MatType& data)
}
//! Serialize the BallBound.
-template
+template
template
-void BallBound::Serialize(
+void BallBound::Serialize(
Archive& ar,
const unsigned int /* version */)
{
diff --git a/src/mlpack/core/tree/binary_space_tree/typedef.hpp b/src/mlpack/core/tree/binary_space_tree/typedef.hpp
index 7d58f6750e7..28145d11bb2 100644
--- a/src/mlpack/core/tree/binary_space_tree/typedef.hpp
+++ b/src/mlpack/core/tree/binary_space_tree/typedef.hpp
@@ -103,7 +103,7 @@ template
using BallTree = BinarySpaceTree;
/**
@@ -132,7 +132,7 @@ template
using MeanSplitBallTree = BinarySpaceTree;
} // namespace tree
diff --git a/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
index 49adbe8377f..44dbf95fe77 100644
--- a/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
@@ -60,7 +60,7 @@ void RStarTreeSplit::SplitLeafNode(TreeType *tree,std::vector& r
tree->Children()[(tree->NumChildren())++] = copy;
assert(tree->NumChildren() == 1);
- copy->Split().SplitLeafNode(copy,relevels);
+ copy->Split().SplitLeafNode(copy, relevels);
return;
}
@@ -77,7 +77,7 @@ void RStarTreeSplit::SplitLeafNode(TreeType *tree,std::vector& r
size_t p = tree->MaxLeafSize() * 0.3; // The paper says this works the best.
if (p == 0)
{
- tree->Split().SplitLeafNode(tree,relevels);
+ tree->Split().SplitLeafNode(tree, relevels);
return;
}
@@ -270,7 +270,7 @@ void RStarTreeSplit::SplitLeafNode(TreeType *tree,std::vector& r
// just in case, we use an assert.
assert(par->NumChildren() <= par->MaxNumChildren() + 1);
if (par->NumChildren() == par->MaxNumChildren() + 1)
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
assert(treeOne->Parent()->NumChildren() <= treeOne->MaxNumChildren());
assert(treeOne->Parent()->NumChildren() >= treeOne->MinNumChildren());
@@ -306,7 +306,7 @@ bool RStarTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector
tree->NullifyData();
tree->Children()[(tree->NumChildren())++] = copy;
- copy->Split().SplitNonLeafNode(copy,relevels);
+ copy->Split().SplitNonLeafNode(copy, relevels);
return true;
}
@@ -662,7 +662,7 @@ bool RStarTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector
assert(par->NumChildren() <= par->MaxNumChildren() + 1);
if (par->NumChildren() == par->MaxNumChildren() + 1)
{
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
}
// We have to update the children of each of these new nodes so that they
diff --git a/src/mlpack/core/tree/rectangle_tree/r_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/r_tree_split_impl.hpp
index 442e49f7ed0..69bf041d5ab 100644
--- a/src/mlpack/core/tree/rectangle_tree/r_tree_split_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/r_tree_split_impl.hpp
@@ -53,7 +53,7 @@ void RTreeSplit::SplitLeafNode(TreeType *tree,std::vector& relev
tree->NullifyData();
// Because this was a leaf node, numChildren must be 0.
tree->Children()[(tree->NumChildren())++] = copy;
- copy->Split().SplitLeafNode(copy,relevels);
+ copy->Split().SplitLeafNode(copy, relevels);
return;
}
@@ -84,7 +84,7 @@ void RTreeSplit::SplitLeafNode(TreeType *tree,std::vector& relev
// just in case, we use an assert.
assert(par->NumChildren() <= par->MaxNumChildren() + 1);
if (par->NumChildren() == par->MaxNumChildren() + 1)
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
assert(treeOne->Parent()->NumChildren() <= treeOne->MaxNumChildren());
assert(treeOne->Parent()->NumChildren() >= treeOne->MinNumChildren());
@@ -116,7 +116,7 @@ bool RTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector& re
tree->NumChildren() = 0;
tree->NullifyData();
tree->Children()[(tree->NumChildren())++] = copy;
- copy->Split().SplitNonLeafNode(copy,relevels);
+ copy->Split().SplitNonLeafNode(copy, relevels);
return true;
}
@@ -149,7 +149,7 @@ bool RTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector& re
assert(par->NumChildren() <= par->MaxNumChildren() + 1);
if (par->NumChildren() == par->MaxNumChildren() + 1)
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
// We have to update the children of each of these new nodes so that they
// record the correct parent.
diff --git a/src/mlpack/core/tree/rectangle_tree/rectangle_tree_impl.hpp b/src/mlpack/core/tree/rectangle_tree/rectangle_tree_impl.hpp
index 8184f89c06b..d993a450901 100644
--- a/src/mlpack/core/tree/rectangle_tree/rectangle_tree_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/rectangle_tree_impl.hpp
@@ -667,7 +667,7 @@ void RectangleTree::
// If we are full, then we need to split (or at least try). The SplitType
// takes care of this and of moving up the tree if necessary.
- split.SplitLeafNode(this,relevels);
+ split.SplitLeafNode(this, relevels);
}
else
{
@@ -677,7 +677,7 @@ void RectangleTree::
// If we are full, then we need to split (or at least try). The SplitType
// takes care of this and of moving up the tree if necessary.
- split.SplitNonLeafNode(this,relevels);
+ split.SplitNonLeafNode(this, relevels);
}
}
diff --git a/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
index a619b725a54..0b434542001 100644
--- a/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
@@ -66,7 +66,7 @@ void XTreeSplit::SplitLeafNode(TreeType *tree,std::vector& relev
// Because this was a leaf node, numChildren must be 0.
tree->Children()[(tree->NumChildren())++] = copy;
assert(tree->NumChildren() == 1);
- copy->Split().SplitLeafNode(copy,relevels);
+ copy->Split().SplitLeafNode(copy, relevels);
return;
}
@@ -84,7 +84,7 @@ void XTreeSplit::SplitLeafNode(TreeType *tree,std::vector& relev
size_t p = tree->MaxLeafSize() * 0.3;
if (p == 0)
{
- tree->Split().SplitLeafNode(tree,relevels);
+ tree->Split().SplitLeafNode(tree, relevels);
return;
}
@@ -297,7 +297,7 @@ void XTreeSplit::SplitLeafNode(TreeType *tree,std::vector& relev
// in case, we use an assert.
assert(par->NumChildren() <= par->MaxNumChildren() + 1);
if (par->NumChildren() == par->MaxNumChildren() + 1)
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
assert(treeOne->Parent()->NumChildren() <=
treeOne->Parent()->MaxNumChildren());
@@ -336,7 +336,7 @@ bool XTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector& re
tree->NumChildren() = 0;
tree->NullifyData();
tree->Children()[(tree->NumChildren())++] = copy;
- copy->Split().SplitNonLeafNode(copy,relevels);
+ copy->Split().SplitNonLeafNode(copy, relevels);
return true;
}
@@ -830,7 +830,7 @@ bool XTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector& re
if (par->NumChildren() == par->MaxNumChildren() + 1)
{
- par->Split().SplitNonLeafNode(par,relevels);
+ par->Split().SplitNonLeafNode(par, relevels);
}
// We have to update the children of each of these new nodes so that they
diff --git a/src/mlpack/core/tree/traversal_info.hpp b/src/mlpack/core/tree/traversal_info.hpp
index 43941e2e656..4ad01843f59 100644
--- a/src/mlpack/core/tree/traversal_info.hpp
+++ b/src/mlpack/core/tree/traversal_info.hpp
@@ -9,6 +9,9 @@
#ifndef MLPACK_CORE_TREE_TRAVERSAL_INFO_HPP
#define MLPACK_CORE_TREE_TRAVERSAL_INFO_HPP
+namespace mlpack {
+namespace tree {
+
/**
* The TraversalInfo class holds traversal information which is used in
* dual-tree (and single-tree) traversals. A traversal should be updating the
@@ -82,4 +85,7 @@ class TraversalInfo
double lastBaseCase;
};
+} // namespace tree
+} // namespace mlpack
+
#endif
diff --git a/src/mlpack/methods/CMakeLists.txt b/src/mlpack/methods/CMakeLists.txt
index d0ea04ca587..5734d5c9d8a 100644
--- a/src/mlpack/methods/CMakeLists.txt
+++ b/src/mlpack/methods/CMakeLists.txt
@@ -15,6 +15,7 @@ endmacro ()
# Recurse into each method mlpack provides.
set(DIRS
+ preprocess
adaboost
amf
ann
@@ -47,6 +48,7 @@ set(DIRS
radical
range_search
rann
+ rmva
regularized_svd
softmax_regression
sparse_autoencoder
diff --git a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
index 5f197978b68..626d9ea0dcf 100644
--- a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
@@ -33,9 +33,9 @@ class LogisticFunction
template
static double fn(const eT x)
{
- if(x < arma::Math::log_max())
+ if(x < arma::Datum::log_max)
{
- if (x > -arma::Math::log_max())
+ if (x > -arma::Datum::log_max)
return 1.0 / (1.0 + std::exp(-x));
return 0.0;
diff --git a/src/mlpack/methods/ann/layer/constant_layer.hpp b/src/mlpack/methods/ann/layer/constant_layer.hpp
index a142a678564..67701deba93 100644
--- a/src/mlpack/methods/ann/layer/constant_layer.hpp
+++ b/src/mlpack/methods/ann/layer/constant_layer.hpp
@@ -5,8 +5,8 @@
* Definition of the ConstantLayer class, which outputs a constant value given
* any input.
*/
-#ifndef __MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
-#define __MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
+#ifndef MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
#include
diff --git a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp b/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
index a1d19e04ded..651a8a7ab72 100644
--- a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
+++ b/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
@@ -5,8 +5,8 @@
* Definition of the DropConnectLayer class, which implements a regularizer
* that randomly sets connections to zero. Preventing units from co-adapting.
*/
-#ifndef __MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
-#define __MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
+#ifndef MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
#include
diff --git a/src/mlpack/methods/ann/layer/empty_layer.hpp b/src/mlpack/methods/ann/layer/empty_layer.hpp
index 9e41a08440c..11cb6a0a243 100644
--- a/src/mlpack/methods/ann/layer/empty_layer.hpp
+++ b/src/mlpack/methods/ann/layer/empty_layer.hpp
@@ -4,8 +4,8 @@
*
* Definition of the EmptyLayer class, which is basically empty.
*/
-#ifndef __MLPACK_METHODS_ANN_LAYER_EMPTY_LAYER_HPP
-#define __MLPACK_METHODS_ANN_LAYER_EMPTY_LAYER_HPP
+#ifndef MLPACK_METHODS_ANN_LAYER_EMPTY_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_EMPTY_LAYER_HPP
namespace mlpack{
namespace ann /** Artificial Neural Network. */ {
diff --git a/src/mlpack/methods/ann/layer/glimpse_layer.hpp b/src/mlpack/methods/ann/layer/glimpse_layer.hpp
new file mode 100644
index 00000000000..228ce7d494c
--- /dev/null
+++ b/src/mlpack/methods/ann/layer/glimpse_layer.hpp
@@ -0,0 +1,479 @@
+/**
+ * @file glimpse_layer.hpp
+ * @author Marcus Edel
+ *
+ * Definition of the GlimpseLayer class, which takes an input image and a
+ * location to extract a retina-like representation of the input image at
+ * different increasing scales.
+ *
+ * For more information, see the following.
+ *
+ * @code
+ * @article{CoRR2014,
+ * author = {Volodymyr Mnih, Nicolas Heess, Alex Graves, Koray Kavukcuoglu},
+ * title = {Recurrent Models of Visual Attention},
+ * journal = {CoRR},
+ * volume = {abs/1406.6247},
+ * year = {2014},
+ * }
+ * @endcode
+ */
+#ifndef MLPACK_METHODS_ANN_LAYER_GLIMPSE_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_GLIMPSE_LAYER_HPP
+
+#include
+#include
+#include
+
+namespace mlpack {
+namespace ann /** Artificial Neural Network. */ {
+
+/**
+ * The glimpse layer returns a retina-like representation
+ * (down-scaled cropped images) of increasing scale around a given location in a
+ * given image.
+ *
+ * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
+ * arma::sp_mat or arma::cube).
+ * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
+ * arma::sp_mat or arma::cube).
+ */
+template <
+ typename InputDataType = arma::cube,
+ typename OutputDataType = arma::cube
+>
+class GlimpseLayer
+{
+ public:
+
+ /**
+ * Create the GlimpseLayer object using the specified ratio and rescale
+ * parameter.
+ *
+ * @param inSize The size of the input units.
+ * @param size The used glimpse size (height = width).
+ * @param depth The number of patches to crop per glimpse.
+ * @param scale The scaling factor used to create the increasing retina-like
+ * representation.
+ */
+ GlimpseLayer(const size_t inSize,
+ const size_t size,
+ const size_t depth = 3,
+ const size_t scale = 2) :
+ inSize(inSize),
+ size(size),
+ depth(depth),
+ scale(scale)
+ {
+ // Nothing to do here.
+ }
+
+ /**
+ * Ordinary feed forward pass of the glimpse layer.
+ *
+ * @param input Input data used for evaluating the specified function.
+ * @param output Resulting output activation.
+ */
+ template
+ void Forward(const arma::Cube& input, arma::Cube& output)
+ {
+ output = arma::Cube(size, size, depth * input.n_slices);
+
+ inputDepth = input.n_slices / inSize;
+
+ for (size_t inputIdx = 0; inputIdx < inSize; inputIdx++)
+ {
+ for (size_t depthIdx = 0, glimpseSize = size;
+ depthIdx < depth; depthIdx++, glimpseSize *= scale)
+ {
+ size_t padSize = std::floor((glimpseSize - 1) / 2);
+
+ arma::Cube inputPadded = arma::zeros >(
+ input.n_rows + padSize * 2, input.n_cols + padSize * 2,
+ input.n_slices / inSize);
+
+ inputPadded.tube(padSize, padSize, padSize + input.n_rows - 1,
+ padSize + input.n_cols - 1) = input.subcube(0, 0,
+ inputIdx * inputDepth, input.n_rows - 1, input.n_cols - 1,
+ (inputIdx + 1) * inputDepth - 1);
+
+ size_t h = inputPadded.n_rows - glimpseSize;
+ size_t w = inputPadded.n_cols - glimpseSize;
+
+ size_t x = std::min(h, (size_t) std::max(0.0,
+ (location(0, inputIdx) + 1) / 2.0 * h));
+ size_t y = std::min(w, (size_t) std::max(0.0,
+ (location(1, inputIdx) + 1) / 2.0 * w));
+
+ if (depthIdx == 0)
+ {
+ for (size_t j = (inputIdx + depthIdx), paddedSlice = 0;
+ j < output.n_slices; j += (inSize * depth), paddedSlice++)
+ {
+ output.slice(j) = inputPadded.subcube(x, y,
+ paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
+ paddedSlice);
+ }
+ }
+ else
+ {
+ for (size_t j = (inputIdx + depthIdx * (depth - 1)), paddedSlice = 0;
+ j < output.n_slices; j += (inSize * depth), paddedSlice++)
+ {
+ arma::Mat poolingInput = inputPadded.subcube(x, y,
+ paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
+ paddedSlice);
+
+ if (scale == 2)
+ {
+ Pooling(glimpseSize / size, poolingInput, output.slice(j));
+ }
+ else
+ {
+ ReSampling(poolingInput, output.slice(j));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Ordinary feed backward pass of the glimpse layer.
+ *
+ * @param input The propagated input activation.
+ * @param gy The backpropagated error.
+ * @param g The calculated gradient.
+ */
+ template
+ void Backward(const InputType& input,
+ const ErrorType& gy,
+ arma::Cube& g)
+ {
+ // Generate a cube using the backpropagated error matrix.
+ arma::Cube mappedError = arma::zeros(input.n_rows,
+ input.n_cols, input.n_slices);
+
+ for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
+ {
+ for (size_t i = 0; i < gy.n_cols; i++)
+ {
+ arma::Col temp = gy.col(i).subvec(
+ j * input.n_rows * input.n_cols,
+ (j + 1) * input.n_rows * input.n_cols - 1);
+
+ mappedError.slice(s + i) = arma::Mat