Skip to content

Commit

Permalink
implement filter2D
Browse files Browse the repository at this point in the history
  • Loading branch information
edgarriba committed Jun 25, 2019
1 parent b0c522e commit d5f5076
Show file tree
Hide file tree
Showing 6 changed files with 218 additions and 95 deletions.
5 changes: 3 additions & 2 deletions docs/source/filters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@ kornia.filters

The functions in this sections perform various image filtering operations.

.. autofunction:: filter2D
.. autofunction:: box_blur
.. autofunction:: median_blur
.. autofunction:: gaussian_blur
.. autofunction:: gaussian_blur2d
.. autofunction:: laplacian
.. autofunction:: sobel
.. autofunction:: spatial_gradient
Expand All @@ -19,7 +20,7 @@ The functions in this sections perform various image filtering operations.

.. autoclass:: BoxBlur
.. autoclass:: MedianBlur
.. autoclass:: GaussianBlur
.. autoclass:: GaussianBlur2d
.. autoclass:: Laplacian
.. autoclass:: Sobel
.. autoclass:: SpatialGradient
1 change: 1 addition & 0 deletions kornia/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
spatial_gradient,
box_blur,
median_blur,
filter2D,
)
from kornia.losses import (
ssim,
Expand Down
2 changes: 2 additions & 0 deletions kornia/filters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .sobel import Sobel, sobel
from .blur import BoxBlur, box_blur
from .median import MedianBlur, median_blur
from .filter import filter2D

__all__ = [
"get_gaussian_kernel",
Expand All @@ -18,6 +19,7 @@
"spatial_gradient",
"box_blur",
"median_blur",
"filter2D",
"GaussianBlur2d",
"Laplacian",
"SpatialGradient",
Expand Down
75 changes: 75 additions & 0 deletions kornia/filters/filter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from typing import Tuple, List

import torch
import torch.nn as nn
import torch.nn.functional as F


def compute_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
"""Computes padding tuple."""
# 4 ints: (padding_left, padding_right,padding_top,padding_bottom)
# https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad
assert len(kernel_size) == 2, kernel_size
computed = [(k - 1) // 2 for k in kernel_size]
return computed[1], computed[1], computed[0], computed[0]


def filter2D(input: torch.Tensor, kernel: torch.Tensor,
border_type: str = 'reflect') -> torch.Tensor:
r"""Function that convolves a tensor with a kernel.
The function applies a given kernel to a tensor. The kernel is applied
indepentdently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output reaims in the same shape.
Args:
input (torch.Tensor): the input tensor with shape of
:math:`(B, C, H, W)`.
kernel (torch.Tensor): the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(B, kH, kW)`.
borde_type (str): the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input.
"""
if not isinstance(input, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))

if not isinstance(kernel, torch.Tensor):
raise TypeError("Input kernel type is not a torch.Tensor. Got {}"
.format(type(kernel)))

if not isinstance(border_type, str):
raise TypeError("Input border_type is not string. Got {}"
.format(type(kernel)))

if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))

if not len(kernel.shape) == 3:
raise ValueError("Invalid kernel shape, we expect BxHxW. Got: {}"
.format(kernel.shape))

borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular']
if border_type not in borders_list:
raise ValueError("Invalid border_type, we expect the following: {0}."
"Got: {1}".format(borders_list, border_type))

# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.to(input.device).to(input.dtype)
tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1)

# pad the input tensor
height, width = tmp_kernel.shape[-2:]
padding_shape: Tuple[int, int, int, int] = compute_padding((height, width))
input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type)

# convolve the tensor with the kernel
return F.conv2d(input_pad, tmp_kernel, padding=0, stride=1, groups=c)
150 changes: 57 additions & 93 deletions test/filters/test_filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,106 +9,70 @@
from torch.testing import assert_allclose


@pytest.mark.parametrize("window_size", [5, 11])
@pytest.mark.parametrize("sigma", [1.5, 5.0])
def test_get_gaussian_kernel(window_size, sigma):
kernel = kornia.get_gaussian_kernel(window_size, sigma)
assert kernel.shape == (window_size,)
assert kernel.sum().item() == pytest.approx(1.0)


@pytest.mark.parametrize("ksize_x", [5, 11])
@pytest.mark.parametrize("ksize_y", [3, 7])
@pytest.mark.parametrize("sigma", [1.5, 2.1])
def test_get_gaussian_kernel2d(ksize_x, ksize_y, sigma):
kernel = kornia.get_gaussian_kernel2d((ksize_x, ksize_y), (sigma, sigma))
assert kernel.shape == (ksize_x, ksize_y)
assert kernel.sum().item() == pytest.approx(1.0)


class TestGaussianBlur:
@pytest.mark.parametrize("batch_shape", [(1, 4, 8, 15), (2, 3, 11, 7)])
def test_gaussian_blur(self, batch_shape, device_type):
kernel_size = (5, 7)
sigma = (1.5, 2.1)
class TestFilter2D:
def test_smoke(self):
kernel = torch.rand(1, 3, 3)
input = torch.ones(1, 1, 7, 8)

assert kornia.filter2D(input, kernel).shape == input.shape

def test_mean_filter(self):
kernel = torch.ones(1, 3, 3)
input = torch.tensor([[[
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 5., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
]]])
expected = torch.tensor([[[
[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.],
]]])

actual = kornia.filter2D(input, kernel)
assert_allclose(actual, expected)

input = torch.rand(batch_shape).to(torch.device(device_type))
gauss = kornia.filters.GaussianBlur2d(kernel_size, sigma, "replicate")
assert gauss(input).shape == batch_shape
def test_mean_filter_2batch_2ch(self):
kernel = torch.ones(1, 3, 3)
input = torch.tensor([[[
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 5., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
]]]).expand(2, 2, -1, -1)
expected = torch.tensor([[[
[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.],
]]])

actual = kornia.filter2D(input, kernel)
assert_allclose(actual, expected)

def test_gradcheck(self):
# test parameters
batch_shape = (2, 3, 11, 7)
kernel_size = (5, 3)
sigma = (1.5, 2.1)
kernel = torch.rand(1, 3, 3)
input = torch.ones(1, 1, 7, 8)

# evaluate function gradient
input = torch.rand(batch_shape)
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(
kornia.gaussian_blur2d,
(input, kernel_size, sigma, "replicate"),
raise_exception=True,
)
kernel = utils.tensor_to_gradcheck_var(kernel) # to var
assert gradcheck(kornia.filter2D, (input, kernel),
raise_exception=True)

@pytest.mark.skip(reason="not found compute_padding()")
def test_jit(self):
@torch.jit.script
def op_script(img):

return kornia.gaussian_blur2d(img, (5, 5), (1.2, 1.2), "replicate")
op = kornia.filter2D
op = torch.jit.script(op)

batch_size, channels, height, width = 2, 3, 64, 64
img = torch.ones(batch_size, channels, height, width)
expected = kornia.filters.GaussianBlur2d(
(5, 5), (1.2, 1.2), "replicate"
)(img)
actual = op_script(img)
kernel = torch.rand(1, 3, 3)
input = torch.ones(1, 1, 7, 8)
expected = op(input, kernel)
actual = op_script(input, kernel)
assert_allclose(actual, expected)


@pytest.mark.parametrize("window_size", [5])
def test_get_laplacian_kernel(window_size):
kernel = kornia.get_laplacian_kernel(window_size)
assert kernel.shape == (window_size,)
assert kernel.sum().item() == pytest.approx(0.0)


@pytest.mark.parametrize("window_size", [7])
def test_get_laplacian_kernel2d(window_size):
kernel = kornia.get_laplacian_kernel2d(window_size)
assert kernel.shape == (window_size, window_size)
assert kernel.sum().item() == pytest.approx(0.0)
expected = torch.tensor(
[
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, -48.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
]
)
assert_allclose(expected, kernel)


class TestLaplacian:
@pytest.mark.parametrize("batch_shape", [(1, 4, 8, 15), (2, 3, 11, 7)])
def test_laplacian(self, batch_shape, device_type):
kernel_size = 5

input = torch.rand(batch_shape).to(torch.device(device_type))
laplace = kornia.filters.Laplacian(kernel_size)
assert laplace(input).shape == batch_shape

def test_gradcheck(self):
# test parameters
batch_shape = (2, 3, 11, 7)
kernel_size = 9

# evaluate function gradient
input = torch.rand(batch_shape)
input = utils.tensor_to_gradcheck_var(input)
assert gradcheck(
kornia.laplacian, (input, kernel_size), raise_exception=True
)
80 changes: 80 additions & 0 deletions test/test_filter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from typing import Tuple

import pytest

import kornia
import kornia.testing as utils # test utils

import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck


class TestBoxBlur:
def test_shape(self):
inp = torch.zeros(1, 3, 4, 4)
blur = kornia.filters.BoxBlur((3, 3))
assert blur(inp).shape == (1, 3, 4, 4)

def test_shape_batch(self):
inp = torch.zeros(2, 6, 4, 4)
blur = kornia.filters.BoxBlur((3, 3))
assert blur(inp).shape == (2, 6, 4, 4)

def test_kernel_3x3(self):
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]])

kernel_size = (3, 3)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 1:4], torch.tensor(1.))

def test_kernel_5x5(self):
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]])

kernel_size = (5, 5)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 2], torch.tensor(1.))

def test_kernel_5x5_batch(self):
batch_size = 3
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]]).repeat(batch_size, 1, 1, 1)

kernel_size = (5, 5)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 2], torch.tensor(1.))

def test_gradcheck(self):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.filters.box_blur, (img, (3, 3),),
raise_exception=True)

def test_jit(self):
@torch.jit.script
def op_script(input: torch.Tensor,
kernel_size: Tuple[int, int]) -> torch.Tensor:
return kornia.filters.box_blur(input, kernel_size)
kernel_size = (3, 3)
img = torch.rand(2, 3, 4, 5)
actual = op_script(img, kernel_size)
expected = kornia.filters.box_blur(img, kernel_size)
assert_allclose(actual, expected)

0 comments on commit d5f5076

Please sign in to comment.