Skip to content

Commit

Permalink
Merge branch 'master' of github.com:chainer/chainer into add-try-trai…
Browse files Browse the repository at this point in the history
…ner-class-tutorial
  • Loading branch information
mitmul committed Oct 18, 2017
2 parents bffd12f + e31cad8 commit e4b986d
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 7 deletions.
6 changes: 3 additions & 3 deletions appveyor.yml
Expand Up @@ -64,9 +64,9 @@ build_script:

test_script:
# Run the project tests
- "%CMD_IN_ENV% pip install nose mock hacking"
- "%CMD_IN_ENV% pip install mock hacking"
- "%CMD_IN_ENV% pip install nose pytest pytest-timeout pytest-cov"
- "flake8"
- "cd tests\\chainer_tests"
# Avoid interuption confirmation of cmd.exe
- "echo nosetests -a \"!gpu,!slow\" > tmp.bat"
- "echo python -m pytest --timeout=60 -m \"not gpu and not multi_gpu and not cudnn and not slow\" tests > tmp.bat"
- "call tmp.bat < nul"
6 changes: 3 additions & 3 deletions chainer/function.py
Expand Up @@ -135,7 +135,7 @@ def backward(self, target_input_indexes, grad_outputs):
if gxs[i] is None:
g = None
else:
# Intentionallly not passing requires_grad=False so that
# Intentionally not passing requires_grad=False so that
# backprop routines can raise an error when a further backprop
# is attempted against this gradient variable.
g = variable.Variable(gxs[i])
Expand Down Expand Up @@ -487,7 +487,7 @@ def retain_inputs(self, indexes):
Args:
indexes (iterable of int): Indexes of input variables that the
function does not require for backprop.
function will require for backprop.
"""
self.node.retain_inputs(indexes)
Expand All @@ -513,7 +513,7 @@ def retain_outputs(self, indexes, retain_after_backward=False):
Args:
indexes (iterable of int): Indexes of input variables that the
function does not require for backprop.
function will require for backprop.
retain_after_backward (bool): This option has no effect. It is
left only for the backward compatibility.
Expand Down
2 changes: 1 addition & 1 deletion chainer/functions/activation/relu.py
Expand Up @@ -58,7 +58,7 @@ class ReLUGrad2(function_node.FunctionNode):
This function takes 2 variables b and c, and
computes f(b, c) = sign(b) * c with backpropagation
where operations are dones in elementwise manner
where operations are done in elementwise manner
and sign(x) = 1 when x > 0 is positive and 0 otherwise.
As the gradient of f with respect to b is 0,
Expand Down
2 changes: 2 additions & 0 deletions chainer/utils/__init__.py
Expand Up @@ -4,6 +4,8 @@


# import class and function
from chainer.utils.conv import get_conv_outsize # NOQA
from chainer.utils.conv import get_deconv_outsize # NOQA
from chainer.utils.experimental import experimental # NOQA
from chainer.utils.walker_alias import WalkerAlias # NOQA

Expand Down
41 changes: 41 additions & 0 deletions chainer/utils/conv.py
Expand Up @@ -5,6 +5,27 @@


def get_conv_outsize(size, k, s, p, cover_all=False, d=1):
"""Calculates output size of convolution.
This function takes the size of input feature map, kernel, stride, and
pooling of one particular dimension, then calculates the output feature
map size of that dimension.
.. seealso:: :func:`~chainer.utils.get_deconv_outsize`
Args:
size (int): The size of input feature map. It usually is the length of
a side of feature map.
k (int): The size of convolution kernel.
s (int): The size of stride.
p (int): The size of padding.
cover_all (bool): Use ``cover_all`` option or not.
d (int): The size of dilation.
Returns:
int: The expected output size of the convolution operation.
"""
dk = k + (k - 1) * (d - 1)
if cover_all:
return (size + p * 2 - dk + s - 1) // s + 1
Expand All @@ -13,6 +34,26 @@ def get_conv_outsize(size, k, s, p, cover_all=False, d=1):


def get_deconv_outsize(size, k, s, p, cover_all=False):
"""Calculates output size of deconvolution.
This function takes the size of input feature map, kernel, stride, and
pooling of one particular dimension, then calculates the output feature
map size of that dimension.
.. seealso:: :func:`~chainer.utils.get_conv_outsize`
Args:
size (int): The size of input feature map. It usually is the length of
a side of feature map.
k (int): The size of deconvolution kernel.
s (int): The size of stride.
p (int): The size of padding.
cover_all (bool): Use ``cover_all`` option or not.
Returns:
int: The expected output size of the deconvolution operation.
"""
if cover_all:
return s * (size - 1) + k - s + 1 - 2 * p
else:
Expand Down
1 change: 1 addition & 0 deletions docs/source/reference/util.rst
Expand Up @@ -4,6 +4,7 @@ Utilities
.. toctree::
:maxdepth: 2

util/conv
util/cuda
util/algorithm
util/reporter
Expand Down
9 changes: 9 additions & 0 deletions docs/source/reference/util/conv.rst
@@ -0,0 +1,9 @@
Convolution/Deconvolution utilities
-----------------------------------

.. autosummary::
:toctree: generated/
:nosignatures:

chainer.utils.get_conv_outsize
chainer.utils.get_deconv_outsize

0 comments on commit e4b986d

Please sign in to comment.