Skip to content

Commit

Permalink
KernelOps based optimisations
Browse files Browse the repository at this point in the history
1. Resorting to kernelOps for kernel addition and multiplication
2. Using scalaxy-streams optimise function
  • Loading branch information
mandar2812 committed Dec 19, 2016
1 parent 7c67f94 commit efaacc3
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,27 @@ object KernelOps extends UFunc {
firstKern: LocalScalarKernel[Index],
otherKernel: LocalScalarKernel[Index]): CompositeCovariance[Index] =
new CompositeCovariance[Index] {
override val hyper_parameters = firstKern.hyper_parameters ++ otherKernel.hyper_parameters

val (fID, sID) = (firstKern.toString.split("\\.").last, otherKernel.toString.split("\\.").last)

override val hyper_parameters =
firstKern.hyper_parameters.map(h => fID+"/"+h) ++
otherKernel.hyper_parameters.map(h => sID+"/"+h)

override def evaluate(x: Index, y: Index) = firstKern.evaluate(x,y) + otherKernel.evaluate(x,y)

state = firstKern.state ++ otherKernel.state
state = firstKern.state.map(h => (fID+"/"+h._1, h._2)) ++ otherKernel.state.map(h => (sID+"/"+h._1, h._2))

blocked_hyper_parameters = firstKern.blocked_hyper_parameters ++ otherKernel.blocked_hyper_parameters
blocked_hyper_parameters =
firstKern.blocked_hyper_parameters.map(h => fID+"/"+h) ++
otherKernel.blocked_hyper_parameters.map(h => sID+"/"+h)

override def setHyperParameters(h: Map[String, Double]): this.type = {
firstKern.setHyperParameters(h)
otherKernel.setHyperParameters(h)
super.setHyperParameters(h)
firstKern.setHyperParameters(h.filter(_._1.contains(fID))
.map(kv => (kv._1.split("/").tail.mkString("/"), kv._2)))
otherKernel.setHyperParameters(h.filter(_._1.contains(sID))
.map(kv => (kv._1.split("/").tail.mkString("/"), kv._2)))
this
}

override def gradient(x: Index, y: Index): Map[String, Double] =
Expand All @@ -63,18 +72,27 @@ object KernelOps extends UFunc {
override def apply(firstKern: LocalScalarKernel[Index],
otherKernel: LocalScalarKernel[Index]): CompositeCovariance[Index] =
new CompositeCovariance[Index] {
override val hyper_parameters = firstKern.hyper_parameters ++ otherKernel.hyper_parameters

val (fID, sID) = (firstKern.toString.split("\\.").last, otherKernel.toString.split("\\.").last)

override val hyper_parameters =
firstKern.hyper_parameters.map(h => fID+"/"+h) ++
otherKernel.hyper_parameters.map(h => sID+"/"+h)

override def evaluate(x: Index, y: Index) = firstKern.evaluate(x,y) * otherKernel.evaluate(x,y)

state = firstKern.state ++ otherKernel.state
state = firstKern.state.map(h => (fID+"/"+h._1, h._2)) ++ otherKernel.state.map(h => (sID+"/"+h._1, h._2))

blocked_hyper_parameters = firstKern.blocked_hyper_parameters ++ otherKernel.blocked_hyper_parameters
blocked_hyper_parameters =
firstKern.blocked_hyper_parameters.map(h => fID+"/"+h) ++
otherKernel.blocked_hyper_parameters.map(h => sID+"/"+h)

override def setHyperParameters(h: Map[String, Double]): this.type = {
firstKern.setHyperParameters(h)
otherKernel.setHyperParameters(h)
super.setHyperParameters(h)
firstKern.setHyperParameters(h.filter(_._1.contains(fID))
.map(kv => (kv._1.split("/").tail.mkString("/"), kv._2)))
otherKernel.setHyperParameters(h.filter(_._1.contains(sID))
.map(kv => (kv._1.split("/").tail.mkString("/"), kv._2)))
this
}

override def gradient(x: Index, y: Index): Map[String, Double] =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ CovarianceFunction[Index, Double, DenseMatrix[Double]]
*
* */
def +[T <: LocalScalarKernel[Index]](otherKernel: T)(implicit ev: ClassTag[Index]): CompositeCovariance[Index] =
new DecomposableCovariance(this, otherKernel)(DynaMLPipe.genericReplicationEncoder[Index](2))
//kernelOps.addLocalScKernels(this, otherKernel)
//new DecomposableCovariance(this, otherKernel)(DynaMLPipe.genericReplicationEncoder[Index](2))
kernelOps.addLocalScKernels(this, otherKernel)


/**
Expand All @@ -52,9 +52,10 @@ CovarianceFunction[Index, Double, DenseMatrix[Double]]
*
* */
def *[T <: LocalScalarKernel[Index]](otherKernel: T)(implicit ev: ClassTag[Index]): CompositeCovariance[Index] =
new DecomposableCovariance(this, otherKernel)(
kernelOps.multLocalScKernels(this, otherKernel)
/*new DecomposableCovariance(this, otherKernel)(
DynaMLPipe.genericReplicationEncoder[Index](2),
Reducer.:*:)
Reducer.:*:)*/

def :*[T1](otherKernel: LocalScalarKernel[T1]): CompositeCovariance[(Index, T1)] =
new TensorCombinationKernel[Index, T1](this, otherKernel)
Expand Down Expand Up @@ -100,6 +101,8 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
k.state.map(h => (id+"/"+h._1, h._2))
}).reduceLeft(_++_)

val encodingTuple = encoding*encoding

override val hyper_parameters: List[String] = kernels.map(k => {
val id = k.toString.split("\\.").last
k.hyper_parameters.map(h => id+"/"+h)
Expand All @@ -110,6 +113,16 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
k.blocked_hyper_parameters.map(h => id+"/"+h)
}).reduceLeft(_++_)

def kernelBind = DataPipe((xy: (Array[S], Array[S])) => {
optimize {
(xy._1, xy._2, kernels.map(k => k.evaluate _ ))
.zipped
.map((x, y, k) => k(x, y))
}
})

def kernelPipe = encodingTuple > kernelBind > reducer

override def repr: DecomposableCovariance[S] = this

override def setHyperParameters(h: Map[String, Double]): DecomposableCovariance.this.type = {
Expand All @@ -128,17 +141,7 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
this
}

override def evaluate(x: S, y: S): Double = {
val (xs, ys) = (encoding*encoding)((x,y))
reducer(
optimize {
xs.zip(ys).zip(kernels).map(coupleAndKern => {
val (u,v) = coupleAndKern._1
coupleAndKern._2.evaluate(u,v)
})
}
)
}
override def evaluate(x: S, y: S): Double = kernelPipe run (x,y)

override def gradient(x: S, y: S): Map[String, Double] = reducer match {
case SumReducer =>
Expand All @@ -153,12 +156,6 @@ class DecomposableCovariance[S](kernels: LocalScalarKernel[S]*)(
val (u,v) = coupleAndKern._1
coupleAndKern._2.gradient(u,v).mapValues(_ * this.evaluate(x,y)/coupleAndKern._2.evaluate(x,y))
}).reduceLeft(_++_)
case _: Reducer =>
val (xs, ys) = (encoding*encoding)((x,y))
xs.zip(ys).zip(kernels).map(coupleAndKern => {
val (u,v) = coupleAndKern._1
coupleAndKern._2.gradient(u,v)
}).reduceLeft(_++_)
}
}

Expand Down Expand Up @@ -214,7 +211,5 @@ class TensorCombinationKernel[R, S](
case ProductReducer =>
firstK.gradient(x._1, y._1).map(k => (k._1, k._2*secondK.evaluate(x._2, y._2))) ++
secondK.gradient(x._2, y._2).map(k => (k._1, k._2*firstK.evaluate(x._1, y._1)))
case _: Reducer =>
firstK.gradient(x._1, y._1) ++ secondK.gradient(x._2, y._2)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,11 @@ package object utils {

def getTypeTag[T: ru.TypeTag](obj: T) = ru.typeTag[T]

def combine[A](xs: Traversable[Traversable[A]]): Seq[Seq[A]] =
def combine[A](xs: Traversable[Traversable[A]]): Seq[Seq[A]] = optimize {
xs.foldLeft(Seq(Seq.empty[A])) {
(x, y) => for (a <- x.view; b <- y) yield a :+ b
}
}

def downloadURL(url: String, saveAs: String): Unit =
new URL(url) #> new File(saveAs) !!
Expand Down

0 comments on commit efaacc3

Please sign in to comment.