-
Notifications
You must be signed in to change notification settings - Fork 86
App op (atleast_1d and atleast_2d and atleast_3d) | feat (torchlib) #767
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
4114272
d21c79e
873957f
7efaf27
a916464
2be4465
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -344,6 +344,12 @@ def _where_input_wrangler( | |
"atan": core_ops.aten_atan, | ||
"atan2": core_ops.aten_atan2, | ||
"atanh": core_ops.aten_atanh, | ||
"atleast_1d": core_ops.aten_atleast_1d, | ||
"atleast_1d_single_tensor": core_ops.aten_atleast_1d_single_tensor, | ||
"atleast_2d": core_ops.aten_atleast_2d, | ||
"atleast_2d_single_tensor": core_ops.aten_atleast_2d_single_tensor, | ||
"atleast_3d": core_ops.aten_atleast_3d, | ||
"atleast_3d_single_tensor": core_ops.aten_atleast_3d_single_tensor, | ||
"baddbmm": core_ops.aten_baddbmm, | ||
"bmm": core_ops.aten_bmm, | ||
"broadcast_to": core_ops.aten_broadcast_to, | ||
|
@@ -808,6 +814,21 @@ def _where_input_wrangler( | |
matcher=lambda sample: len(sample.args) != 2, | ||
reason="arange_start_step overload takes three arguments (input, start, step)", | ||
), | ||
skip( | ||
"atleast_1d_single_tensor", | ||
matcher=lambda sample: isinstance(sample.input, (list, tuple)), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So we don’t take a Sequence as input? Where is this op used? How are the inputs produced? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It accepts both tensor and list. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh I see there are two variants |
||
reason="atleast_1d_single_tensor overload takes single tensor as input", | ||
), | ||
skip( | ||
"atleast_2d_single_tensor", | ||
matcher=lambda sample: isinstance(sample.input, (list, tuple)), | ||
reason="atleast_2d_single_tensor overload takes single tensor as input", | ||
), | ||
skip( | ||
"atleast_3d_single_tensor", | ||
matcher=lambda sample: isinstance(sample.input, (list, tuple)), | ||
reason="atleast_3d_single_tensor overload takes single tensor as input", | ||
), | ||
skip( | ||
"cat", | ||
matcher=lambda sample: sample.input[0].equal(torch.tensor([])), | ||
|
@@ -1166,6 +1187,11 @@ def _where_input_wrangler( | |
), | ||
) | ||
|
||
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_1d", ("atleast_1d_single_tensor",)) | ||
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_2d", ("atleast_2d_single_tensor",)) | ||
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_3d", ("atleast_3d_single_tensor",)) | ||
|
||
|
||
ops_test_common.duplicate_opinfo(OPS_DB, "full_like", ("full_like_dtype",)) | ||
|
||
ops_test_common.duplicate_opinfo(OPS_DB, "index_put", ("index_put_bool",)) | ||
|
@@ -1480,6 +1506,30 @@ def _where_input_wrangler( | |
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_1d": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_1d_single_tensor": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_2d": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_2d_single_tensor": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_3d": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"atleast_3d_single_tensor": ( | ||
torch.float32, | ||
torch.float16, | ||
), | ||
"baddbmm": ( | ||
torch.float32, | ||
torch.float16, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You may want to double-check the downstream usage to make sure whether you want to reshape a tensor of size N to 1x1xN or Nx1x1 ... the above reshapes it to 1xNx1 .... which may be okay, but just wondering.