Skip to content

Commit

Permalink
BifurcateSplitTable (#1513)
Browse files Browse the repository at this point in the history
* add BifurcateSplitTable

* createBifurcateSplitTable

* add buffer

* clearState
  • Loading branch information
JerryYanWan committed Aug 28, 2017
1 parent caeb795 commit ca368bf
Show file tree
Hide file tree
Showing 4 changed files with 183 additions and 1 deletion.
24 changes: 23 additions & 1 deletion pyspark/bigdl/nn/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1484,7 +1484,29 @@ def set_init_method(self, weight_init_method = None, bias_init_method = None):
return self


class Bilinear(Layer):
class BifurcateSplitTable(Model):
'''
Creates a module that takes a Tensor as input and
outputs two tables, splitting the Tensor along
the specified dimension `dimension`.
The input to this layer is expected to be a tensor, or a batch of tensors;
:param dimension to be split along this dimension
:param T Numeric type. Only support float/double now
>>> bifurcateSplitTable = BifurcateSplitTable(1)
creating: createBifurcateSplitTable
'''

def __init__(self,
dimension,
bigdl_type="float"):
super(BifurcateSplitTable, self).__init__(None, bigdl_type,
dimension)


class Bilinear(Model):

'''
a bilinear transformation with sparse inputs,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn

import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}

import scala.reflect.ClassTag

/**
* Creates a module that takes a Tensor as input and
* outputs two tables, splitting the Tensor along
* the specified dimension `dimension`.
*
* The input to this layer is expected to be a tensor, or a batch of tensors;
*
* @param dimension to be split along this dimension
* @tparam T Numeric type. Only support float/double now
*/

class BifurcateSplitTable[T: ClassTag](
var dimension: Int)
(implicit ev: TensorNumeric[T]) extends AbstractModule[Tensor[T], Table, T]{

val left = Tensor[T]()
val right = Tensor[T]()

override def updateOutput(input: Tensor[T]): Table = {
val slices = input.size(dimension)
require(slices >= 1,
s"BifurcateSplitTable: the size of referred dimension is ${slices}. " +
s"It should be larger than 1.")
val leftSlices = slices >> 1
val rightSlices = slices - leftSlices

val leftSlice = input.narrow(dimension, 1, leftSlices)
val rightSlice = input.narrow(dimension, 1 + leftSlices, rightSlices)

left.resizeAs(leftSlice).copy(leftSlice)
right.resizeAs(rightSlice).copy(rightSlice)

output(1) = left
output(2) = right
output
}

override def updateGradInput(input: Tensor[T], gradOutput: Table): Tensor[T] = {
val slices = input.size(dimension)
val leftSlices = slices >> 1
val rightSlices = slices - leftSlices

gradInput.resizeAs(input)

gradInput.narrow(dimension, 1, leftSlices).copy(gradOutput(1))
gradInput.narrow(dimension, 1 + leftSlices, rightSlices).copy(gradOutput(2))

gradInput
}


override def canEqual(other: Any): Boolean = other.isInstanceOf[SplitTable[T]]

override def clearState() : this.type = {
super.clearState()
left.set()
right.set()
this
}

override def toString: String = s"BifurcateSplitTable($dimension)"

override def equals(other: Any): Boolean = other match {
case that: BifurcateSplitTable[T] =>
super.equals(that) &&
(that canEqual this) &&
dimension == that.dimension
case _ => false
}

override def hashCode(): Int = {
val state = Seq(super.hashCode(), dimension)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}

object BifurcateSplitTable {
def apply[@specialized(Float, Double) T: ClassTag](
dimension: Int)(implicit ev: TensorNumeric[T]) : BifurcateSplitTable[T] = {
new BifurcateSplitTable[T](dimension)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1059,6 +1059,11 @@ class PythonBigDL[T: ClassTag](implicit ev: TensorNumeric[T]) extends Serializab
padBottom)
}

def createBifurcateSplitTable(dimension: Int)
: BifurcateSplitTable[T] = {
BifurcateSplitTable[T](dimension)
}

def createSplitTable(dimension: Int,
nInputDims: Int = -1)
: SplitTable[T] = {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn

import com.intel.analytics.bigdl.nn.SplitTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}

import scala.util.Random

@com.intel.analytics.bigdl.tags.Serial
class SplitTableSpec extends FlatSpec with BeforeAndAfter with Matchers {

"A BifurcateSplitTable " should "generate correct output and grad" in {
val seed = 100
Random.setSeed(seed)

val dim = 2
val module = new BifurcateSplitTable[Double](dim)
val input = Tensor[Double](3, 4).randn()
val expectedGradInput = Tensor[Double]().resizeAs(input).randn()
val gradOutput = T(expectedGradInput.narrow(dim, 1, 2), expectedGradInput.narrow(dim, 3, 2))

val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)

output.length() should be (2)
val left = output(1).asInstanceOf[Tensor[Double]]
val right = output(2).asInstanceOf[Tensor[Double]]
left should be (input.narrow(dim, 1, 2))
right should be (input.narrow(dim, 3, 2))

gradInput should be (expectedGradInput)
}
}

0 comments on commit ca368bf

Please sign in to comment.