Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[complex] Enable complex autograd tests for diag #51268

Closed
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 1 addition & 2 deletions tools/autograd/gen_variable_type.py
Expand Up @@ -89,8 +89,7 @@
'reflection_pad1d_backward', 'reflection_pad2d_backward',
'replication_pad1d', 'replication_pad2d', 'replication_pad3d',
'replication_pad1d_backward', 'replication_pad2d_backward', 'replication_pad3d_backward',
'masked_scatter', 'masked_select',
'index_fill',
'diag', 'masked_scatter', 'masked_select', 'index_fill'
}

# Some operators invalidate the grad_accumulator. Let's reset it.
Expand Down
32 changes: 22 additions & 10 deletions torch/testing/_internal/common_methods_invocations.py
Expand Up @@ -1095,6 +1095,22 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad):
)
return [SampleInput(tensor) for tensor in tensors]

def sample_inputs_diag(op_info, device, dtype, requires_grad):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))

tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)

args = ((), (2,), (-2,), (1,), (2,))

samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor, args=arg))

return samples + [vec_sample]

def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad):
samples = (
Expand Down Expand Up @@ -1362,6 +1378,12 @@ def sample_inputs_masked_select(op_info, device, dtype, requires_grad):
),
assert_autodiffed=True,
safe_casts_outputs=True),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
sample_inputs_func=sample_inputs_diag,
test_inplace_grad=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
Expand Down Expand Up @@ -2788,16 +2810,6 @@ def method_tests():
('dist', (), ((), 4), 'scalar_4'),
('dist', (S, S, S), ((), 4), 'scalar_4_broadcast_rhs'),
('dist', (), ((S, S, S), 4), 'scalar_4_broadcast_lhs'),
('diag', (M, M), NO_ARGS, '2d'),
('diag', (3, 5), NO_ARGS, '2d_wide'),
('diag', (3, 5), (2,), '2d_wide_pos'),
('diag', (3, 5), (-2,), '2d_wide_neg'),
('diag', (5, 3), NO_ARGS, '2d_tall'),
('diag', (5, 3), (2,), '2d_tall_pos'),
('diag', (5, 3), (-2,), '2d_tall_neg'),
('diag', (M,), NO_ARGS, '1d'),
('diag', (M, M), (1,), '2d_1'),
('diag', (M, M), (2,), '2d_2'),
('diag_embed', (S, S), NO_ARGS),
('diagonal', (M, M), NO_ARGS, '2d'),
('diagonal', (3, 5), NO_ARGS, '2d_wide'),
Expand Down