Skip to content

Commit

Permalink
Fix Tensor.grad setter bug (#5462)
Browse files Browse the repository at this point in the history
* fix(Tensor): fix Tensor.grad setter bug

* refine error message

* fix(Tensor): fix bug

* add grad = grad * constant test

* update tensor.py to pr head

* format code

* fix(Parameter): fix Parameter construction bug

* fix bug

Co-authored-by: tea321000 <834914152@qq.com>
Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
  • Loading branch information
3 people committed Jul 12, 2021
1 parent 75d3fa4 commit 6e8c8cf
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 22 deletions.
21 changes: 15 additions & 6 deletions oneflow/python/framework/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,17 +259,26 @@ def grad(self):
@_auto_determine
def grad(self, new_grad):
def check_grad(grad, new_grad):
assert grad.shape == new_grad.shape, "Shape of new grad is not equal"
assert grad.device == new_grad.device, "Device of new grad is not equal"
assert grad.dtype == new_grad.dtype, "Data type of new grad is not equal"
assert type(grad) == type(new_grad), "Type of new grad is not equal"
assert (
grad.shape == new_grad.shape
), f"Shape of grads are not equal, {grad.shape} vs {new_grad.shape}"
assert (
grad.device == new_grad.device
), f"Device of grads are not equal, {grad.device} vs {new_grad.device}"
assert (
grad.dtype == new_grad.dtype
), f"Data type of grads are not equal, {grad.dtype} vs {new_grad.dtype}"

if self._local_or_consistent_tensor is not None:
if new_grad is None:
self._local_or_consistent_tensor.set_grad(None)
else:
new_grad_detach = new_grad.detach()._local_or_consistent_tensor
check_grad(self._local_or_consistent_tensor.grad, new_grad_detach)
if isinstance(new_grad, Tensor):
if not new_grad.is_determined:
new_grad.determine()
new_grad = new_grad._local_or_consistent_tensor
new_grad_detach = new_grad.detach()
check_grad(self.grad, new_grad_detach)
self._local_or_consistent_tensor.set_grad(new_grad_detach)

@property
Expand Down
2 changes: 2 additions & 0 deletions oneflow/python/nn/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ def __init__(self, data, requires_grad=True):
# TODO: uncomment the following two lines when consistent <-> local conversion is ready
# data.set_is_consistent(True)
# data.set_placement(flow.placement("gpu", ["0:0"], None))
if not isinstance(data, Tensor):
data = Tensor(data)
self._data = data
self._data.requires_grad = requires_grad

Expand Down
25 changes: 9 additions & 16 deletions oneflow/python/test/tensor/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -763,22 +763,6 @@ def test_tensor_detach(test_case):
test_case.assertEqual(z.is_leaf, True)
test_case.assertEqual(z.grad_fn, None)

@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_clone(test_case):
shape = (2, 3, 4, 5)
x = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, requires_grad=True,
)
y = x.clone()
test_case.assertTrue(np.allclose(y.numpy(), x.numpy(), 1e-4, 1e-4))
test_case.assertEqual(y.requires_grad, True)
test_case.assertEqual(y.is_leaf, False)
# Cannot print Copy grad function
test_case.assertTrue(y.grad_fn != None)

@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
Expand Down Expand Up @@ -1132,6 +1116,15 @@ def test_tensor_grad_assignment_sum(test_case):
1e-5,
)
)
of_input.grad = of_input.grad * 2
test_case.assertTrue(
np.allclose(
of_input.grad.detach().numpy(),
2 * np.full(np_input.shape, rand_init + rand_scale),
1e-5,
1e-5,
)
)


if __name__ == "__main__":
Expand Down

0 comments on commit 6e8c8cf

Please sign in to comment.