Skip to content

Commit

Permalink
Decomposable Kernels feature now working
Browse files Browse the repository at this point in the history
To Do:
1. Re-implement addition and multiplication of kernels in terms of
DecomposableKernel class
  • Loading branch information
mandar2812 committed Dec 15, 2016
1 parent 3378a31 commit bbf07cf
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 9 deletions.
2 changes: 1 addition & 1 deletion build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ packageDescription := "DynaML is a scala library/repl for implementing and worki
"which can be extended easily to implement advanced models for small and large scale applications.\n\n"+
"But the library can also be used as an educational/research tool for data analysis."

val mainVersion = "v1.4.1-beta.6"
val mainVersion = "v1.4.1-beta.7"

val dataDirectory = settingKey[File]("The directory holding the data files for running example scripts")

Expand Down
2 changes: 2 additions & 0 deletions conf/DynaMLInit.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ import io.github.mandar2812.dynaml.examples._
import io.github.mandar2812.dynaml.models.neuralnets.TransferFunctions._
//The probability API
import io.github.mandar2812.dynaml.probability._
//Wavelet API
import io.github.mandar2812.dynaml.wavelets._
//OpenML support
import io.github.mandar2812.dynaml.openml.OpenML
//Renjin imports
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,15 @@ import io.github.mandar2812.dynaml.evaluation.RegressionMetrics
import io.github.mandar2812.dynaml.models.ParameterizedLearner
import io.github.mandar2812.dynaml.models.gp.AbstractGPRegressionModel
import io.github.mandar2812.dynaml.optimization.{CoupledSimulatedAnnealing, GPMLOptimizer, GloballyOptWithGrad, GridSearch}
import io.github.mandar2812.dynaml.pipes.{DataPipe, ReversibleScaler, Scaler, StreamDataPipe}
import io.github.mandar2812.dynaml.pipes._
import io.github.mandar2812.dynaml.utils.{GaussianScaler, MVGaussianScaler, MinMaxScaler}
import io.github.mandar2812.dynaml.wavelets.{GroupedHaarWaveletFilter, HaarWaveletFilter, InvGroupedHaarWaveletFilter, InverseHaarWaveletFilter}
import org.apache.log4j.Logger
import org.renjin.script.RenjinScriptEngine
import org.renjin.sexp._

import scala.reflect.ClassTag

/**
* @author mandar2812 datum 3/2/16.
*
Expand Down Expand Up @@ -557,6 +559,34 @@ object DynaMLPipe {

val invGroupedHaarWaveletFilter = (orders: Array[Int]) => InvGroupedHaarWaveletFilter(orders)


def genericReplicationEncoder[I](n: Int)(implicit tag: ClassTag[I]): Encoder[I, Array[I]] = Encoder[I, Array[I]]((v: I) => {
Array.fill[I](n)(v)
}, (vs: Array[I]) => {
vs.head
})

/**
* Creates an [[Encoder]] which can split
* [[DenseVector]] instances into uniform splits and
* put them back together.
* */
val breezeDVSplitEncoder = (n: Int) => Encoder((v: DenseVector[Double]) => {
v.toArray.grouped(n).map(DenseVector(_)).toArray
}, (vs: Array[DenseVector[Double]]) => {
DenseVector(vs.map(_.toArray).reduceLeft((a,b) => a++b))
})

/**
* Creates an [[Encoder]] which replicates a
* [[DenseVector]] instance n times.
* */
val breezeDVReplicationEncoder = (n: Int) => Encoder((v: DenseVector[Double]) => {
Array.fill(n)(v)
}, (vs: Array[DenseVector[Double]]) => {
vs.head
})

def trainParametricModel[
G, T, Q, R, S, M <: ParameterizedLearner[G, T, Q, R, S]
](regParameter: Double, step: Double = 0.05,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package io.github.mandar2812.dynaml.kernels

import breeze.linalg.DenseMatrix
import io.github.mandar2812.dynaml.DynaMLPipe
import io.github.mandar2812.dynaml.algebra.{PartitionedPSDMatrix, PartitionedVector}
import io.github.mandar2812.dynaml.pipes.{DataPipe, Encoder}

Expand Down Expand Up @@ -82,25 +83,35 @@ abstract class CompositeCovariance[T]
*/
class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
implicit encoding: Encoder[S, Array[S]],
reducer: DataPipe[Array[Double], Double]) extends CompositeCovariance[S] {
reducer: DataPipe[Array[Double], Double] = DecomposableCovariance.:+:) extends CompositeCovariance[S] {

val kernelMap = kernels.map(k => (k.toString.split(".").last, k)).toMap
val kernelMap = kernels.map(k => (k.toString.split("\\.").last, k)).toMap

state = kernels.map(k => {
val id = k.toString.split("\\.").last
k.state.map(h => (id+"/"+h._1, h._2))
}).reduceLeft(_++_)

override val hyper_parameters: List[String] = kernels.map(k => {
val id = k.toString.split(".").last
val id = k.toString.split("\\.").last
k.hyper_parameters.map(h => id+"/"+h)
}).reduceLeft(_++_)

blocked_hyper_parameters = kernels.map(k => {
val id = k.toString.split(".").last
val id = k.toString.split("\\.").last
k.blocked_hyper_parameters.map(h => id+"/"+h)
}).reduceLeft(_++_)

override def repr: DecomposableCovariance[S] = this

override def setHyperParameters(h: Map[String, Double]): DecomposableCovariance.this.type = {
//Sanity Check
assert(effective_hyper_parameters.forall(h.contains),
"All hyper parameters must be contained in the arguments")
//group the hyper params by kernel id
h.toSeq.map(kv => {
val idS = kv._1.split("/")
(idS.head, (idS.last, kv._2))
(idS.head, (idS.last.mkString("/"), kv._2))
}).groupBy(_._1).map(hypC => {
val kid = hypC._1
val hyper_params = hypC._2.map(_._2).toMap
Expand All @@ -116,4 +127,12 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
coupleAndKern._2.evaluate(u,v)
}))
}
}

object DecomposableCovariance {

val :+: = DataPipe((l: Array[Double]) => l.sum)

val :*: = DataPipe((l: Array[Double]) => l.sum)

}
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ object Scaler {
*
*
* */
trait ReversibleScaler[S] extends Scaler[S] {
trait ReversibleScaler[S] extends Scaler[S] with Encoder[S, S]{

/**
* The inverse operation of this scaling.
*
* */
val i: Scaler[S]
override val i: Scaler[S]

def *[T](that: ReversibleScaler[T]) = {

Expand Down

0 comments on commit bbf07cf

Please sign in to comment.