-
Notifications
You must be signed in to change notification settings - Fork 786
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
tensorsplit_op #7258
tensorsplit_op #7258
Changes from 2 commits
b9c5bb8
41595ed
b9f34a9
ef0770b
970edcd
b41b8b2
7bce539
a946188
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -44,6 +44,9 @@ oneflow | |
diag, | ||
diagonal, | ||
movedim, | ||
tensor_split, | ||
hsplit, | ||
vsplit, | ||
div, | ||
dot, | ||
eq, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1754,6 +1754,129 @@ class MovedimIntFunctor { | |
} | ||
}; | ||
|
||
class TensorSplitVecFunctor { | ||
public: | ||
TensorSplitVecFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const std::vector<int32_t>& indices_or_sections, | ||
const int32_t& dim) const { | ||
int32_t ndim = input->shape()->NumAxes(); | ||
CHECK_OR_RETURN((dim>=-ndim)&&(dim<ndim))<< "Dimension out of range (expected to be in range of [" | ||
<<-ndim<<","<< ndim-1 <<"], but got "<<dim<<")"; | ||
int32_t pos_dim = dim>=0?dim:dim+ndim; | ||
|
||
std::vector<int64_t> start(ndim, 0); | ||
std::vector<int64_t> stop(ndim); | ||
std::vector<int64_t> step(ndim, 1); | ||
for(int32_t i=0; i<ndim; i++){ | ||
stop[i] = input->shape()->At(i); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. one::Tensor有个更简单的接口: reference:https://github.com/Oneflow-Inc/oneflow/blob/master/oneflow/core/framework/tensor.h#L49 |
||
} | ||
|
||
int32_t num_indices = indices_or_sections.size(); | ||
TensorTuple output(num_indices+1); | ||
for(int32_t i = 0; i < num_indices; i++){ | ||
int32_t end_idx = indices_or_sections[i]; | ||
stop[pos_dim] = end_idx; | ||
output[i] = JUST(Slice(input, start, stop, step)); | ||
start[pos_dim] = end_idx; | ||
} | ||
stop[pos_dim] = input->shape()->At(ndim-1); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
output[num_indices] = JUST(Slice(input, start, stop, step)); | ||
|
||
return output; | ||
} | ||
}; | ||
|
||
class TensorSplitIntFunctor { | ||
public: | ||
TensorSplitIntFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const int32_t& indices_or_sections, | ||
const int32_t& dim) const { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
int32_t ndim = input->shape()->NumAxes(); | ||
CHECK_OR_RETURN((dim>=-ndim)&&(dim<ndim))<< "Dimension out of range (expected to be in range of [" | ||
<<-ndim<<","<< ndim-1 <<"], but got "<<dim<<")"; | ||
CHECK_OR_RETURN(indices_or_sections > 0) <<"number of sections must be larger than 0, got ,"<< indices_or_sections <<");"; | ||
int32_t pos_dim = dim>=0?dim:dim+ndim; | ||
|
||
const auto dim_size = input->shape()->At(pos_dim); | ||
int64_t min_split_size = dim_size / indices_or_sections; | ||
int64_t num_splits_one_extra = dim_size % indices_or_sections; | ||
|
||
std::vector<int64_t> start(ndim, 0); | ||
std::vector<int64_t> stop(ndim); | ||
std::vector<int64_t> step(ndim, 1); | ||
for(int32_t i=0; i<ndim; i++){ | ||
stop[i] = input->shape()->At(i); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
} | ||
stop[pos_dim] = 0; | ||
|
||
TensorTuple output(indices_or_sections); | ||
for(int32_t i = 0; i < indices_or_sections; i++){ | ||
int64_t split_size = (i < num_splits_one_extra) ? (min_split_size + 1) : min_split_size; | ||
stop[pos_dim] += split_size; | ||
output[i] = JUST(Slice(input, start, stop, step)); | ||
start[pos_dim] += split_size; | ||
} | ||
|
||
return output; | ||
} | ||
}; | ||
|
||
class HsplitIntFunctor { | ||
public: | ||
HsplitIntFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const int32_t& indices_or_sections) const { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
int32_t ndim = input->shape()->NumAxes(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
CHECK_OR_RETURN(ndim>=1)<<"torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with "<<ndim <<" dimensions!"; | ||
CHECK_OR_RETURN(indices_or_sections>0) << "indices_or_sections must greater than 0"; | ||
int32_t dim = (ndim == 1) ? 0 : 1; | ||
CHECK_OR_RETURN(input->shape()->At(dim)% indices_or_sections == 0) << "torch.hsplit attempted to split along dimension " << dim | ||
<<", but the size of the dimension " << input->shape()->At(dim) << | ||
" is not divisible by the split_size " <<indices_or_sections<< "!"; | ||
return TensorSplitInt(input, indices_or_sections, dim); | ||
} | ||
}; | ||
|
||
class HsplitVecFunctor { | ||
public: | ||
HsplitVecFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const std::vector<int32_t>& indices_or_sections) const { | ||
int32_t ndim = input->shape()->NumAxes(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
CHECK_OR_RETURN(ndim>=1)<<"torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with "<<ndim <<" dimensions!"; | ||
int32_t dim = (ndim == 1) ? 0 : 1; | ||
return TensorSplitVec(input, indices_or_sections, dim); | ||
} | ||
}; | ||
|
||
class VsplitIntFunctor { | ||
public: | ||
VsplitIntFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const int32_t& indices_or_sections) const { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
int32_t ndim = input->shape()->NumAxes(); | ||
CHECK_OR_RETURN(ndim>=2)<<"torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with "<<ndim <<" dimensions!"; | ||
CHECK_OR_RETURN(indices_or_sections>0) << "indices_or_sections must greater than 0"; | ||
CHECK_OR_RETURN(input->shape()->At(0)% indices_or_sections == 0) << "torch.vsplit attempted to split along dimension " << 0 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. consider |
||
<<", but the size of the dimension " << input->shape()->At(0) << | ||
" is not divisible by the split_size " <<indices_or_sections<< "!"; | ||
return TensorSplitInt(input, indices_or_sections, 0); | ||
} | ||
}; | ||
|
||
class VsplitVecFunctor { | ||
public: | ||
VsplitVecFunctor() = default; | ||
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& input, | ||
const std::vector<int32_t>& indices_or_sections) const { | ||
int32_t ndim = input->shape()->NumAxes(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. consider |
||
CHECK_OR_RETURN(ndim>=2)<<"torch.vsplit requires a tensor with at least 1 dimension, but got a tensor with "<<ndim <<" dimensions!"; | ||
return TensorSplitVec(input, indices_or_sections, 0); | ||
} | ||
}; | ||
|
||
class ErfinvFunctor { | ||
public: | ||
ErfinvFunctor() { op_ = CHECK_JUST(one::OpBuilder("erfinv").Input("x").Output("y").Build()); } | ||
|
@@ -1886,6 +2009,12 @@ ONEFLOW_FUNCTION_LIBRARY(m) { | |
m.add_functor<DotFunctor>("Dot"); | ||
m.add_functor<MovedimVecFunctor>("MovedimVec"); | ||
m.add_functor<MovedimIntFunctor>("MovedimInt"); | ||
m.add_functor<TensorSplitVecFunctor>("TensorSplitVec"); | ||
m.add_functor<TensorSplitIntFunctor>("TensorSplitInt"); | ||
m.add_functor<HsplitIntFunctor>("HsplitInt"); | ||
m.add_functor<HsplitVecFunctor>("HsplitVec"); | ||
m.add_functor<VsplitIntFunctor>("VsplitInt"); | ||
m.add_functor<VsplitVecFunctor>("VsplitVec"); | ||
m.add_functor<ErfinvFunctor>("Erfinv"); | ||
m.add_functor<ErfinvInplaceFunctor>("ErfinvInplace"); | ||
m.add_functor<CumsumFunctor>("Cumsum"); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
""" | ||
Copyright 2020 The OneFlow Authors. All rights reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
""" | ||
import unittest | ||
from oneflow.test_utils.automated_test_util import * | ||
import oneflow as flow | ||
import oneflow.unittest | ||
|
||
|
||
class TestHsplitVec(flow.unittest.TestCase): | ||
@autotest(check_graph=False) | ||
def test_flow_hsplit_vec(test_case): | ||
device = random_device() | ||
x = random_pytorch_tensor( | ||
ndim=4, | ||
dim1=random(3, 6), | ||
dim2=random(3, 6), | ||
dim3=random(3, 6), | ||
dim4=random(3, 6), | ||
).to(device) | ||
z = torch.hsplit(x, (1,2)) | ||
return z[0] | ||
|
||
class TestHsplitInt(flow.unittest.TestCase): | ||
@autotest(check_graph=False) | ||
def test_flow_hsplit_int(test_case): | ||
device = random_device() | ||
x = random_pytorch_tensor( | ||
ndim=4, | ||
dim1=random(3, 6), | ||
dim2=random(3, 6), | ||
dim3=random(3, 6), | ||
dim4=random(3, 6), | ||
).to(device) | ||
split = random(1, 3).to(int) | ||
z = torch.hsplit(x, split) | ||
return z[0] | ||
|
||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 参考一下这个:#7275 (comment) |
||
if __name__ == "__main__": | ||
unittest.main() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
参考上个pr的comment:#7275 (comment)