-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
205604a
commit a528bbd
Showing
2 changed files
with
236 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
from functools import partial | ||
from string import ascii_letters | ||
|
||
import numpy as np | ||
|
||
from tricycle_v2.ops import einsum, nothing, to_tensor | ||
from tricycle_v2.tensor import Tensor | ||
from tricycle_v2.unary import udiv, umul | ||
|
||
|
||
def badd(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Add the elements of two tensors together, elementwise | ||
The two tensors must have the same shape | ||
""" | ||
assert tensor_1.shape == tensor_2.shape | ||
|
||
result = to_tensor(np.add(tensor_1, tensor_2)) | ||
|
||
result.args = (tensor_1, tensor_2) | ||
result.back_fn = (nothing, nothing) | ||
|
||
return result | ||
|
||
|
||
def bsub(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Subtract the elements of two tensors together, elementwise | ||
The two tensors must have the same shape | ||
""" | ||
assert tensor_1.shape == tensor_2.shape | ||
|
||
tensor_2_neg = umul(tensor_2, -1) | ||
return badd(tensor_1, tensor_2_neg) | ||
|
||
|
||
def bmul(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Multiply the elements of two tensors together, elementwise | ||
The two tensors must have the same shape | ||
""" | ||
assert tensor_1.shape == tensor_2.shape | ||
|
||
indices = ascii_letters[: len(tensor_1.shape)] | ||
subscripts = f"{indices},{indices}->{indices}" | ||
|
||
return einsum(subscripts, tensor_1, tensor_2) | ||
|
||
|
||
def bdiv(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Divide the elements of two tensors together, elementwise | ||
The two tensors must have the same shape | ||
""" | ||
return bmul(tensor_1, udiv(1, tensor_2)) | ||
|
||
|
||
def bmax(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Compare two tensors elementwise, returning the maximum | ||
of each pair of elements | ||
The two tensors must have the same shape | ||
if elements are equal, return the first | ||
""" | ||
assert tensor_1.shape == tensor_2.shape | ||
|
||
result = to_tensor(np.maximum(tensor_1, tensor_2)) | ||
|
||
indicator_1 = to_tensor((tensor_1 > tensor_2).astype(float)) | ||
indicator_2 = to_tensor((tensor_1 <= tensor_2).astype(float)) | ||
result.args = (tensor_1, tensor_2) | ||
result.back_fn = (partial(bmul, indicator_1), partial(bmul, indicator_2)) | ||
|
||
return result | ||
|
||
|
||
def bmin(tensor_1: Tensor, tensor_2: Tensor) -> Tensor: | ||
""" | ||
Compare two tensors elementwise, returning the minimum | ||
of each pair of elements | ||
The two tensors must have the same shape | ||
if elements are equal, return the first | ||
""" | ||
assert tensor_1.shape == tensor_2.shape | ||
|
||
result = to_tensor(np.minimum(tensor_1, tensor_2)) | ||
|
||
indicator_1 = to_tensor((tensor_1 < tensor_2).astype(float)) | ||
indicator_2 = to_tensor((tensor_1 >= tensor_2).astype(float)) | ||
result.args = (tensor_1, tensor_2) | ||
result.back_fn = (partial(bmul, indicator_1), partial(bmul, indicator_2)) | ||
|
||
return result |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,137 @@ | ||
import numpy as np | ||
|
||
from tricycle_v2.binary import badd, bdiv, bmax, bmin, bmul, bsub | ||
from tricycle_v2.ops import to_tensor | ||
|
||
|
||
def test_can_badd(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor(np.arange(1, 13).reshape(3, 4)) | ||
|
||
out_tensor = badd(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, np.array([[1, 3, 5, 7], [9, 11, 13, 15], [17, 19, 21, 23]]) | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
assert np.allclose( | ||
in_tensor_1.grad, | ||
np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]), | ||
) | ||
assert np.allclose( | ||
in_tensor_2.grad, | ||
np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]), | ||
) | ||
|
||
|
||
def test_can_bsub(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor(np.arange(1, 13).reshape(3, 4)) | ||
|
||
out_tensor = bsub(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1]]) | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
assert np.allclose( | ||
in_tensor_1.grad, | ||
np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]), | ||
) | ||
assert np.allclose( | ||
in_tensor_2.grad, | ||
np.array( | ||
[ | ||
[-1.0, -1.0, -1.0, -1.0], | ||
[-1.0, -1.0, -1.0, -1.0], | ||
[-1.0, -1.0, -1.0, -1.0], | ||
] | ||
), | ||
) | ||
|
||
|
||
def test_can_bmul(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor(np.arange(1, 13).reshape(3, 4)) | ||
|
||
out_tensor = bmul(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, np.array([[0, 2, 6, 12], [20, 30, 42, 56], [72, 90, 110, 132]]) | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
assert np.allclose(in_tensor_1.grad, np.array(in_tensor_2)) | ||
assert np.allclose(in_tensor_2.grad, np.array(in_tensor_1)) | ||
|
||
|
||
def test_can_bdiv(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor(np.arange(1, 13).reshape(3, 4)) | ||
|
||
out_tensor = bdiv(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, | ||
np.array( | ||
[ | ||
[0, 1 / 2, 2 / 3, 3 / 4], | ||
[4 / 5, 5 / 6, 6 / 7, 7 / 8], | ||
[8 / 9, 9 / 10, 10 / 11, 11 / 12], | ||
] | ||
), | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
assert np.allclose(in_tensor_1.grad, np.array(1 / in_tensor_2)) | ||
assert np.allclose(in_tensor_2.grad, np.array(-in_tensor_1 / (in_tensor_2**2))) | ||
|
||
|
||
def test_can_bmax(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor([[0, 0, 0, 0], [100, 100, 100, 100], [8, 9, 10, 11]]) | ||
|
||
out_tensor = bmax(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, np.array([[0, 1, 2, 3], [100, 100, 100, 100], [8, 9, 10, 11]]) | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
one_is_bigger = (in_tensor_1 > in_tensor_2).astype(np.float32) | ||
two_is_bigger = (in_tensor_1 <= in_tensor_2).astype(np.float32) | ||
|
||
assert np.allclose(in_tensor_1.grad, one_is_bigger) | ||
assert np.allclose(in_tensor_2.grad, two_is_bigger) | ||
|
||
|
||
def test_can_bmin(): | ||
in_tensor_1 = to_tensor(np.arange(12).reshape(3, 4)) | ||
in_tensor_2 = to_tensor([[0, 0, 0, 0], [100, 100, 100, 100], [8, 9, 10, 11]]) | ||
|
||
out_tensor = bmin(in_tensor_1, in_tensor_2) | ||
|
||
assert out_tensor.shape == (3, 4) | ||
assert np.allclose( | ||
out_tensor, np.array([[0, 0, 0, 0], [4, 5, 6, 7], [8, 9, 10, 11]]) | ||
) | ||
|
||
out_tensor.backward() | ||
|
||
one_is_smaller = (in_tensor_1 < in_tensor_2).astype(np.float32) | ||
two_is_smaller = (in_tensor_1 >= in_tensor_2).astype(np.float32) | ||
|
||
assert np.allclose(in_tensor_1.grad, one_is_smaller) | ||
assert np.allclose(in_tensor_2.grad, two_is_smaller) |