Skip to content

Commit

Permalink
Added TensorCombinationKernel
Browse files Browse the repository at this point in the history
  • Loading branch information
mandar2812 committed Dec 16, 2016
1 parent 3671f35 commit 7c67f94
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 40 deletions.
6 changes: 3 additions & 3 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ packageDescription := "DynaML is a scala library/repl for implementing and worki
"which can be extended easily to implement advanced models for small and large scale applications.\n\n"+
"But the library can also be used as an educational/research tool for data analysis."

val mainVersion = "v1.4.1-beta.8"
val mainVersion = "v1.4.1-beta.9"

val dataDirectory = settingKey[File]("The directory holding the data files for running example scripts")

Expand Down Expand Up @@ -42,7 +42,7 @@ lazy val pipes = (project in file("dynaml-pipes")).settings(baseSettings:_*)
.settings(commonSettings:_*)
.settings(
name := "dynaml-pipes",
version := "1.0"
version := mainVersion
)

lazy val core = (project in file("dynaml-core")).settings(baseSettings)
Expand All @@ -59,7 +59,7 @@ lazy val examples = (project in file("dynaml-examples"))
.settings(commonSettings:_*)
.settings(
name := "dynaml-examples",
version := "1.0"
version := mainVersion
).dependsOn(pipes, core)

lazy val DynaML = (project in file(".")).enablePlugins(JavaAppPackaging, BuildInfoPlugin)
Expand Down
4 changes: 2 additions & 2 deletions dynaml-core/scripts/kernelModels.scala
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ val otherSumK = kernel + other_kernel
val sumK2 = new DecomposableCovariance(otherSumK, other_kernel1)(sp1)

AbottPowerPlant(sumK2, new DiracKernel(0.09),
opt = Map("globalOpt" -> "GS", "grid" -> "1", "step" -> "0.004"),
num_training = 3000, num_test = 1000, deltaT = 2, column = 7)
opt = Map("globalOpt" -> "GS", "grid" -> "2", "step" -> "0.004"),
num_training = 3000, num_test = 1000, deltaT = 2, column = 8)

Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ import io.github.mandar2812.dynaml.algebra.PartitionedPSDMatrix
* */
trait LocalSVMKernel[Index] extends LocalScalarKernel[Index] {

def :+(otherKernel: LocalSVMKernel[Index]): CompositeCovariance[(Index, Index)] =
/*def :+(otherKernel: LocalSVMKernel[Index]): CompositeCovariance[(Index, Index)] =
new KernelOps.PairOps[Index, Index].tensorAddPartLocalScKernels(this, otherKernel)
/*def :*(otherKernel: LocalSVMKernel[Index]): CompositeCovariance[(Index, Index)] =
def :*(otherKernel: LocalSVMKernel[Index]): CompositeCovariance[(Index, Index)] =
new KernelOps.PairOps[Index, Index].tensorMultLocalScKernels(this, otherKernel)*/


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,15 @@ CovarianceFunction[Index, Double, DenseMatrix[Double]]
def *[T <: LocalScalarKernel[Index]](otherKernel: T)(implicit ev: ClassTag[Index]): CompositeCovariance[Index] =
new DecomposableCovariance(this, otherKernel)(
DynaMLPipe.genericReplicationEncoder[Index](2),
DecomposableCovariance.:*:)
Reducer.:*:)

def :*[T1](otherKernel: LocalScalarKernel[T1]): CompositeCovariance[(Index, T1)] =
new KernelOps.PairOps[Index, T1].tensorMultLocalScKernels(this, otherKernel)
new TensorCombinationKernel[Index, T1](this, otherKernel)
//new KernelOps.PairOps[Index, T1].tensorMultLocalScKernels(this, otherKernel)

def :+[T1](otherKernel: LocalScalarKernel[T1]): CompositeCovariance[(Index, T1)] =
new KernelOps.PairOps[Index, T1].tensorAddLocalScKernels(this, otherKernel)
new TensorCombinationKernel[Index, T1](this, otherKernel)(Reducer.:+:)
//new KernelOps.PairOps[Index, T1].tensorAddLocalScKernels(this, otherKernel)

override def buildKernelMatrix[S <: Seq[Index]](mappedData: S, length: Int): KernelMatrix[DenseMatrix[Double]] =
SVMKernel.buildSVMKernelMatrix[S, Index](mappedData, length, this.evaluate)
Expand Down Expand Up @@ -89,7 +91,7 @@ abstract class CompositeCovariance[T]
*/
class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
implicit encoding: Encoder[S, Array[S]],
reducer: Reducer = DecomposableCovariance.:+:) extends CompositeCovariance[S] {
reducer: Reducer = Reducer.:+:) extends CompositeCovariance[S] {

val kernelMap = kernels.map(k => (k.toString.split("\\.").last, k)).toMap

Expand Down Expand Up @@ -160,10 +162,59 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
}
}

object DecomposableCovariance {
/**
* Represents a kernel on a product space [[R]] &times [[S]]
*
* @param firstK The first covariance
* @param secondK The second covariance
* @param reducer An implicit parameter indicating how to combine the
* kernel values; it can only be [[Reducer.:+:]] or [[Reducer.:*:]]
* */
class TensorCombinationKernel[R, S](
firstK: LocalScalarKernel[R],
secondK: LocalScalarKernel[S])(implicit reducer: Reducer = Reducer.:*:)
extends CompositeCovariance[(R,S)] {

val fID = firstK.toString.split("\\.").last
val sID = secondK.toString.split("\\.").last

override val hyper_parameters: List[String] =
firstK.hyper_parameters.map(h => fID+"/"+h) ++ secondK.hyper_parameters.map(h => sID+"/"+h)

blocked_hyper_parameters =
firstK.blocked_hyper_parameters.map(h => fID+"/"+h) ++ secondK.blocked_hyper_parameters.map(h => sID+"/"+h)

state =
firstK.state.map(h => (fID+"/"+h._1, h._2)) ++ secondK.state.map(h => (sID+"/"+h._1, h._2))

val :+: = SumReducer
override def evaluate(x: (R, S), y: (R, S)): Double =
reducer(Array(firstK.evaluate(x._1, y._1), secondK.evaluate(x._2, y._2)))

val :*: = ProductReducer
override def repr: TensorCombinationKernel[R, S] = this

override def setHyperParameters(h: Map[String, Double]): TensorCombinationKernel.this.type = {
//Sanity Check
assert(effective_hyper_parameters.forall(h.contains),
"All hyper parameters must be contained in the arguments")
//group the hyper params by kernel id
h.toSeq.filterNot(_._1.split("/").length == 1).map(kv => {
val idS = kv._1.split("/")
(idS.head, (idS.tail.mkString("/"), kv._2))
}).groupBy(_._1).map(hypC => {
val kid = hypC._1
val hyper_params = hypC._2.map(_._2).toMap
if(kid == fID) firstK.setHyperParameters(hyper_params) else secondK.setHyperParameters(hyper_params)
})
this
}

}
override def gradient(x: (R, S), y: (R, S)): Map[String, Double] = reducer match {
case SumReducer =>
firstK.gradient(x._1, y._1) ++ secondK.gradient(x._2, y._2)
case ProductReducer =>
firstK.gradient(x._1, y._1).map(k => (k._1, k._2*secondK.evaluate(x._2, y._2))) ++
secondK.gradient(x._2, y._2).map(k => (k._1, k._2*firstK.evaluate(x._1, y._1)))
case _: Reducer =>
firstK.gradient(x._1, y._1) ++ secondK.gradient(x._2, y._2)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,12 @@ object SVMKernel {
eval: (T, T) => Double):
KernelMatrix[DenseMatrix[Double]] = {

val kernelIndex = utils.combine(Seq(mappedData.zipWithIndex, mappedData.zipWithIndex))
.filter(s => s.head._2 >= s.last._2)
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
val kernelIndex = optimize {
utils.combine(Seq(mappedData.zipWithIndex, mappedData.zipWithIndex))
.filter(s => s.head._2 >= s.last._2)
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
}

val kernel = DenseMatrix.tabulate[Double](length, length){
(i, j) => if (i >= j) kernelIndex((i,j)) else kernelIndex((j,i))
Expand All @@ -92,9 +94,11 @@ object SVMKernel {
eval: (T, T) => Double)
: DenseMatrix[Double] = {

val kernelIndex = utils.combine(Seq(data1.zipWithIndex, data2.zipWithIndex))
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
val kernelIndex = optimize {
utils.combine(Seq(data1.zipWithIndex, data2.zipWithIndex))
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
}

logger.info(" Dimensions: " + data1.length + " x " + data2.length)
DenseMatrix.tabulate[Double](data1.length, data2.length){
Expand Down Expand Up @@ -123,23 +127,23 @@ object SVMKernel {

logger.info("~~~~~~~~~~~~~~~~~~~~~~~")
logger.info("Constructing Partitions")
optimize {
new PartitionedPSDMatrix(
utils.combine(Seq(partitionedData, partitionedData))
.filter(c => c.head._2 >= c.last._2)
.toStream.map(c => {

val partitionIndex = (c.head._2.toLong, c.last._2.toLong)
logger.info(":- Partition: "+partitionIndex)

val matrix =
if(partitionIndex._1 == partitionIndex._2)
buildSVMKernelMatrix(c.head._1, c.head._1.length, eval).getKernelMatrix()
else crossKernelMatrix(c.head._1, c.last._1, eval)

(partitionIndex, matrix)
}), rows, cols, num_R_blocks, num_C_blocks)
}
new PartitionedPSDMatrix(optimize {
utils.combine(Seq(partitionedData, partitionedData))
.filter(c => c.head._2 >= c.last._2)
.toStream.map(c => {

val partitionIndex = (c.head._2.toLong, c.last._2.toLong)
logger.info(":- Partition: "+partitionIndex)

val matrix =
if(partitionIndex._1 == partitionIndex._2)
buildSVMKernelMatrix(c.head._1, c.head._1.length, eval).getKernelMatrix()
else crossKernelMatrix(c.head._1, c.last._1, eval)

(partitionIndex, matrix)
})
}, rows, cols, num_R_blocks, num_C_blocks)

}

def crossPartitonedKernelMatrix[T, S <: Seq[T]](
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,22 @@ object SumReducer extends Reducer {
override def run(data: Array[Double]): Double = data.sum
}

object ProductReducer extends Reducer{
object ProductReducer extends Reducer {
override def run(data: Array[Double]): Double = data.product
}

/**
* Reducer companion object with
* default definitions
* */
object Reducer {
/**
* Represents sum of values
* */
val :+: = SumReducer

/**
* Represents product of values
* */
val :*: = ProductReducer
}

0 comments on commit 7c67f94

Please sign in to comment.