New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
CUDA BFloat16 support of clamp, remainder, lshift, rshift #45247
Changes from all commits
1d0e860
3442ce3
6493d82
1683e1e
c2c9d50
de95b58
740c393
9ae9ad1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19947,7 +19947,7 @@ def test_movedim_view(self, device): | |
_float_types2 = _float_types + [torch.bfloat16] if TEST_WITH_ROCM else _float_types | ||
|
||
_signed_types = [ | ||
torch.half, torch.float, torch.double, | ||
torch.half, torch.bfloat16, torch.float, torch.double, | ||
torch.int8, torch.short, torch.int, torch.long | ||
] | ||
|
||
|
@@ -20189,8 +20189,10 @@ def inner(self, device, dtype): | |
('chunk', 'neg_dim', _medium_2d, lambda t, d: [4, -2], 1e-5, 1e-5, 1e-5, _types, _cpu_types, False), | ||
('clamp', 'neg', _medium_2d, lambda t, d: [-1, 5], 1e-5, 1e-2, 1e-5, _signed_types, [torch.bfloat16]), | ||
('clamp', 'pos', _medium_2d, lambda t, d: [1, 5], 1e-5, 1e-2, 1e-5, _unsigned_types, [torch.bfloat16]), | ||
('clamp_min', '', _medium_2d, lambda t, d: [1], 1e-2, 1e-2, 1e-5, _types, [torch.bfloat16]), | ||
('clamp_max', '', _medium_2d, lambda t, d: [1], 1e-2, 1e-2, 1e-5, _types, [torch.bfloat16]), | ||
('clamp_min', '', _medium_2d, lambda t, d: [1], 1e-2, 1e-2, 1e-5, | ||
torch.testing.get_all_dtypes(include_complex=False, include_bool=False, include_bfloat16=True), [torch.bfloat16]), | ||
('clamp_max', '', _medium_2d, lambda t, d: [1], 1e-2, 1e-2, 1e-5, | ||
torch.testing.get_all_dtypes(include_complex=False, include_bool=False, include_bfloat16=True), [torch.bfloat16]), | ||
('clone', '', _medium_2d, lambda t, d: [], 1e-5, 1e-5, 1e-5, _types, _cpu_types, False), | ||
('contiguous', '', _medium_2d, lambda t, d: [], 1e-5, 1e-5, 1e-5, _types, _cpu_types, False), | ||
('conj', '', _small_3d, lambda t, d: [], 1e-5, 0, 1e-5, _types_no_half, [torch.bfloat16], False), | ||
|
@@ -20275,14 +20277,14 @@ def inner(self, device, dtype): | |
1e-5, 1e-5, 1e-5, _float_types_no_half), | ||
('mvlgamma', '2d_p=2', lambda t, d: _small_2d(t, d).clamp(0.6, 10), lambda t, d: [2], | ||
1e-5, 1e-5, 1e-5, _float_types_no_half), | ||
('remainder', 'value', _small_3d, lambda t, d: [3], 1e-1, 1e-5, 1e-5, _signed_types), | ||
('remainder', 'negative_value', _small_3d, lambda t, d: [-3], 1e-1, 1e-5, 1e-5, _signed_types), | ||
('remainder', 'value', _small_3d, lambda t, d: [3], 1e-1, 1e-2, 1e-5, _signed_types), | ||
('remainder', 'negative_value', _small_3d, lambda t, d: [-3], 1e-1, 1e-2, 1e-5, _signed_types), | ||
('remainder', 'tensor', _small_3d, | ||
lambda t, d: [_small_3d(t, d, has_zeros=False)], | ||
1e-1, 1e-5, 1e-5, _signed_types), | ||
1e-1, 1e-2, 1e-5, _signed_types), | ||
('remainder', 'negative_tensor', _small_3d, | ||
lambda t, d: [0 - _small_3d(t, d, has_zeros=False)], | ||
1e-1, 1e-5, 1e-5, _signed_types), | ||
1e-1, 1e-2, 1e-5, _signed_types), | ||
Comment on lines
+20280
to
+20287
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. None of these is testing BFloat16 because
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The change is here: https://github.com/pytorch/pytorch/pull/45247/files#diff-9996665f82f52030836eb8657057cfadR19601-R19604 |
||
('std', '', _small_3d, lambda t, d: [], 1e-3, 1e-5, 1e-5, _float_types, _cpu_types, False), | ||
('std', 'dim', _small_3d, lambda t, d: [1], 1e-3, 1e-5, 1e-5, _float_types, _cpu_types, False), | ||
('std', 'neg_dim', _small_3d, lambda t, d: [-1], 1e-3, 1e-5, 1e-5, _float_types, _cpu_types, False), | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
how is this being tested?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It is tested by
where the
_signed_types
is modified in https://github.com/pytorch/pytorch/pull/45247/files#diff-9996665f82f52030836eb8657057cfadR19601-R19604 to add bfloat16