Skip to content

Commit

Permalink
Update backward formula for torch.dot and add backward definition for…
Browse files Browse the repository at this point in the history
… torch.vdot (#45074)

Summary:
Pull Request resolved: #45074

TODO: Add R -> C tests in #44744 (blocked on some JIT changes)

Test Plan: Imported from OSS

Reviewed By: gchanan

Differential Revision: D23975361

Pulled By: anjali411

fbshipit-source-id: 3512bd2962b588a198bc317673bd18cc96ac823f
  • Loading branch information
anjali411 authored and facebook-github-bot committed Sep 29, 2020
1 parent 147c88e commit 18876b5
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 5 deletions.
2 changes: 1 addition & 1 deletion test/test_autograd.py
Expand Up @@ -4806,7 +4806,7 @@ def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
'permute', 'squeeze', 'unsqueeze', 'resize', 'resize_as', 'tril', 'triu',
'chunk', 'split', 'split_with_sizes', 'repeat', 'expand', 'zero_',
'eq_', 'ne_', 'add', '__radd__', 'sum', 'conj', 'sin', 'cos', 'mul', 'sinh',
'cosh', '__rmul__', 'sgn', 'abs'] + separate_complex_tests
'cosh', '__rmul__', 'sgn', 'abs', 'dot', 'vdot'] + separate_complex_tests

# TODO(@anjali411): add the commented tests back after updating the formula based on tensorflow definition - @anjali411
# complex_list += ['fill_', 't', '__rdiv__', 'tanh']
Expand Down
8 changes: 4 additions & 4 deletions tools/autograd/derivatives.yaml
Expand Up @@ -404,12 +404,12 @@
self: div_tensor_self_backward(grad, at::scalar_to_tensor(other), self.scalar_type())

- name: dot(Tensor self, Tensor tensor) -> Tensor
self: grad * tensor
tensor: grad * self
self: handle_r_to_c(self.scalar_type(), grad * tensor.conj())
tensor: handle_r_to_c(tensor.scalar_type(), grad * self.conj())

- name: vdot(Tensor self, Tensor other) -> Tensor
self: 'not_implemented("vdot: self")'
other: 'not_implemented("vdot: other")'
self: handle_r_to_c(self.scalar_type(), grad.conj() * other)
other: handle_r_to_c(other.scalar_type(), grad * self)

- name: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
self: _fused_dropout_backward(grad, result1, p)
Expand Down
8 changes: 8 additions & 0 deletions torch/csrc/autograd/FunctionsManual.cpp
Expand Up @@ -87,6 +87,14 @@ static Tensor wrapped_scalar_tensor(Scalar scalar) {
return tensor;
}

Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result) {
if (!at::isComplexType(self_st) && gradient_result.is_complex()) {
// R -> C
return at::real(gradient_result);
}
return gradient_result;
}

Tensor restore_reduced_dims(const Tensor &output, IntArrayRef dims, bool keepdim) {
if (keepdim) {
return output;
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/FunctionsManual.h
Expand Up @@ -33,6 +33,7 @@ bool any_variable_defined(variable_list& variables);
void copy_range(variable_list& out, IndexRange range, const at::Tensor & t);
void copy_range(variable_list& out, IndexRange range, at::ArrayRef<at::Tensor> t);
at::Tensor not_implemented(const char* name);
at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result);
at::Tensor maybe_multiply(const at::Tensor & t, const at::Scalar & s);
int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim);
Tensor restore_reduced_dims(const Tensor &output, IntArrayRef dims, bool keepdim);
Expand Down
1 change: 1 addition & 0 deletions torch/testing/_internal/common_methods_invocations.py
Expand Up @@ -961,6 +961,7 @@ def method_tests():
('addr', (S, M), ((S,), (M,)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addr', (), ((S,), (M,)), 'broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('dot', (L,), ((L,),), '', (True,)),
('vdot', (L,), ((L,),),),
('mm', (S, M), ((M, S),), '', (True,)),
('bmm', (M, S, M), ((M, M, S),), '', (True,)),
('mv', (S, M), ((M,),), '', (True,)),
Expand Down

0 comments on commit 18876b5

Please sign in to comment.