Skip to content

Commit

Permalink
use eigen instead of stanmath
Browse files Browse the repository at this point in the history
compile time differentiation is much faster
Revert "autodiff gaussian width parameter"

This reverts commit e9bced7.
  • Loading branch information
gf712 committed Nov 5, 2019
1 parent 5815a3d commit a35cf20
Show file tree
Hide file tree
Showing 13 changed files with 9 additions and 1,200 deletions.
Binary file removed .RData
Binary file not shown.
15 changes: 0 additions & 15 deletions .Rhistory

This file was deleted.

22 changes: 0 additions & 22 deletions Base.h

This file was deleted.

5 changes: 0 additions & 5 deletions Derived.h

This file was deleted.

4 changes: 0 additions & 4 deletions Interface.h

This file was deleted.

153 changes: 0 additions & 153 deletions src/shogun/kernel/Autodiff_benchmark.cc

This file was deleted.

17 changes: 9 additions & 8 deletions src/shogun/kernel/GaussianKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@
* Tonmoy Saikia, Sergey Lisitsyn, Matt Aasted, Sanuj Sharma
*/

#include <Eigen/Core>
#include <unsupported/Eigen/AutoDiff>
#include <shogun/lib/common.h>
#include <shogun/kernel/GaussianKernel.h>
#include <shogun/features/DotFeatures.h>
#include <shogun/distance/EuclideanDistance.h>
#include <shogun/mathematics/Math.h>
#include <stan/math/rev/scal.hpp>


using namespace shogun;

Expand Down Expand Up @@ -92,26 +94,25 @@ void CGaussianKernel::set_width(float64_t w)
SGMatrix<float64_t> CGaussianKernel::get_parameter_gradient(const TParameter* param, index_t index)
{
using std::exp;
using std::log;

require(lhs, "Left hand side features must be set!");
require(rhs, "Right hand side features must be set!");

if (!strcmp(param->m_name, "log_width"))
{
SGMatrix<float64_t> derivative=SGMatrix<float64_t>(num_lhs, num_rhs);
stan::math::var log_width = m_log_width;
auto constant_part = exp(log_width * 2.0) * 2.0;
using EigenScalar = Eigen::Matrix<float64_t, 1, 1>;
Eigen::AutoDiffScalar<EigenScalar> eigen_log_width = m_log_width;

for (int k=0; k<num_rhs; k++)
{
#pragma omp parallel for
for (int j=0; j<num_lhs; j++)
{
auto f = exp(-CShiftInvariantKernel::distance(j, k) / constant_part);
f.grad();
derivative(j, k) = log_width.adj();
stan::math::set_zero_all_adjoints();
eigen_log_width.derivatives() = EigenScalar::Unit(1,0);
auto el = CShiftInvariantKernel::distance(j, k);
Eigen::AutoDiffScalar<EigenScalar> kernel = exp(-el / (exp(eigen_log_width * 2.0) * 2.0));
derivative(j, k) = kernel.derivatives()(0);
}
}
return derivative;
Expand Down

0 comments on commit a35cf20

Please sign in to comment.