Skip to content

Commit

Permalink
[Fix] graph support 0-Size tensor (#6957)
Browse files Browse the repository at this point in the history
* Add nn.functional.glu graph test

* add filter to motify functional autotest

* motify code

* add test example

* add test else

* add test judging condition for test_masked_fill.py,test_constant.py,test_tile.py、test_repeat.py,test_expand.py

* add test ok example

* Clear tensor name scope after graph build

* Add test case of 2 graph caught same free eager tensor

* auto format by CI

* Dev cc clean tensor name scope (#7082)

* Clear tensor name scope after graph build

* Add test case of 2 graph caught same free eager tensor

* auto format by CI

Co-authored-by: chengtbf <472491134@qq.com>
Co-authored-by: oneflow-ci-bot <ci-bot@oneflow.org>

* submit test success example

* test success example

* submit test code

* fix a bug about relu module with 0 shape data

* fixed a bug about relu module with 0 shape data

* fix a bug about relu module with 0 shape data

* fix a bug about relu module with 0 shape data

* 0shape and 0d autotest

* fix a bug about relu module with 0 shape data

* 0shape changed to 0_size

* modify test_var.py

* modify test_eye.py

* modify test_reshape.py

* modify test_.py

* modify ReshapeFunctor

* modify some file

* Fixed graph autotest bug with reshape op test

* Fixed graph autotest bug with reshape op test

* fixed test_sub.py

* modify test_sub.py

* modify tensor_methods.cpp

* modify array_functor.cpp

* graph support 0-Size tensor

* rename 0shape to 0 size

* modified check_graph=True

* fix and refine

Co-authored-by: Zhenhua <huangzhenhua@zhejianglab.com>
Co-authored-by: tangnana925 <85614052+tangnana925@users.noreply.github.com>
Co-authored-by: tangnana <tnn_personal@163.com>
Co-authored-by: Zhenhua <1209435+hengzi@users.noreply.github.com>
Co-authored-by: chengtbf <472491134@qq.com>
Co-authored-by: oneflow-ci-bot <ci-bot@oneflow.org>
Co-authored-by: Xiaoyu Xu <xiaoyulink@gmail.com>
Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
  • Loading branch information
9 people committed Dec 31, 2021
1 parent de9fc41 commit eabe79e
Show file tree
Hide file tree
Showing 28 changed files with 60 additions and 60 deletions.
2 changes: 1 addition & 1 deletion oneflow/core/functional/impl/array_functor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -911,7 +911,7 @@ class ReshapeFunctor {
}
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x, const Shape& shape) const {
// if input tensor is eager local, than return tensor's view
if (x->is_eager() && x->is_local()) { return view::Reshape(x, shape); }
if (x->is_local() && !(LazyMode::is_enabled())) { return view::Reshape(x, shape); }
int need_infer_axis = -1;
size_t count = 1;
for (int i = 0; i < shape.NumAxes(); ++i) {
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/job/plan_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ void GenChunkForMultiNNGraphMemoryReuseInMultiClient(
CHECK_LE(current_chunk_offset + mem_block->mem_size(), chunk->mem_size());
CHECK_GE(current_chunk_offset, 0);
// CHECK_GT(mem_block->mem_size(), 0); NOTE(chengcheng): has mem block mem size = 0
CHECK_GT(chunk->mem_size(), 0);
CHECK_GE(chunk->mem_size(), 0);
mem_block->set_chunk_id(chunk->chunk_id());
mem_block->set_chunk_offset(current_chunk_offset);
current_chunk_offset += mem_block->mem_size();
Expand Down
2 changes: 1 addition & 1 deletion oneflow/core/operator/interface_op_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace oneflow {
namespace {

void CheckShape(const Shape& shape) {
FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GT(shape.At(i), 0); }
FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GE(shape.At(i), 0); }
}

Maybe<void> GetSbpSignature(const InterfaceBlobConf& blob_conf, const PbRpf<std::string>& input_bns,
Expand Down
2 changes: 1 addition & 1 deletion python/oneflow/test/modules/test_abs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
@flow.unittest.skip_unless_1n1d()
class TestAbsModule(flow.unittest.TestCase):
@autotest(check_graph=True)
def test_abs_with_0shape_data(test_case):
def test_abs_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.abs(x)
Expand Down
24 changes: 12 additions & 12 deletions python/oneflow/test/modules/test_activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ def test_relu_module_with_random_data(test_case):
y = m(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_relu_module_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_relu_module_with_0_size_data(test_case):
m = torch.nn.ReLU()
m.train(random())
device = random_device()
Expand All @@ -62,8 +62,8 @@ def test_relu6_module_with_random_data(test_case):
y = m(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_relu6_module_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_relu6_module_with_0_size_data(test_case):
m = torch.nn.ReLU6()
m.train(random())
device = random_device()
Expand All @@ -85,8 +85,8 @@ def test_tanh_module_with_random_data(test_case):
y = m(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_tanh_module_with_0shapedata(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_tanh_module_with_0_size_data(test_case):
m = torch.nn.Tanh()
m.train(random())
device = random_device()
Expand All @@ -102,8 +102,8 @@ def test_flow_tanh_with_random_data(test_case):
y = torch.tanh(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_flow_tanh_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_flow_tanh_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = torch.tanh(x)
Expand All @@ -122,8 +122,8 @@ def test_elu_module_with_random_data(test_case):
y = m(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_elu_module_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_elu_module_with_0_size_data(test_case):
m = torch.nn.ELU(alpha=random() | nothing())
m.train(random())
device = random_device()
Expand All @@ -145,8 +145,8 @@ def test_celu_module_with_random_data(test_case):
y = m(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_celu_module_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_celu_module_with_0_size_data(test_case):
m = torch.nn.CELU(alpha=random() | nothing())
m.train(random())
device = random_device()
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ def test_add(test_case):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])

@autotest(check_graph=False)
def test_0shape_add(test_case):
@autotest(check_graph=True)
def test_0_size_add(test_case):
device = random_device()
x = random_pytorch_tensor(2, 0, 3).to(device)
y = random_pytorch_tensor(2, 1, 3).to(device)
Expand Down
2 changes: 1 addition & 1 deletion python/oneflow/test/modules/test_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def test_cast(test_case):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])

def test_cast_with_0shape_data(test_case):
def test_cast_with_0_size_data(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_cast_float2int,
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_ceil.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ def test_ceil_flow_with_random_data(test_case):
y = torch.ceil(input)
return y

@autotest(auto_backward=False, check_graph=False)
def test_ceil_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_ceil_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.ceil(x)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_clamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,8 @@ def test_clip_max_none_flow_with_random_data(test_case):
)
return y

@autotest(auto_backward=False, check_graph=False)
def test_clamp_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_clamp_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.clamp(x, min=random().to(float), max=random().to(float))
Expand Down
8 changes: 4 additions & 4 deletions python/oneflow/test/modules/test_concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,16 +140,16 @@ def test_cat_with_random_data(test_case):
x = random_pytorch_tensor(ndim=2, dim0=random(), dim1=random()).to(device)
return torch.cat((x, x, x), random(0, 2).to(int))

@autotest(n=10, auto_backward=False, check_graph=False)
def test_concat_with_input_0shape_data(test_case):
@autotest(n=10, auto_backward=False, check_graph=True)
def test_concat_with_input_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 2, 4).to(device)
y = random_pytorch_tensor(4, 2, 3, random(0, 3), 4).to(device)
z = torch.cat((x, y), dim=2)
return z

@autotest(n=10, auto_backward=False, check_graph=False)
def test_concat_with_output_0shape_data(test_case):
@autotest(n=10, auto_backward=False, check_graph=True)
def test_concat_with_output_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 0, 2, 4).to(device)
y = random_pytorch_tensor(4, 2, 0, 2, 4).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_div.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ def test_div_against_pytorch(test_case):
device=arg[1],
)

@autotest(auto_backward=False, check_graph=False)
def test_0shape_div(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_0_size_div(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_eq.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@

@flow.unittest.skip_unless_1n1d()
class TestEq(flow.unittest.TestCase):
@autotest(auto_backward=False, check_graph=False)
def test_eq_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_eq_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(3, 2, 0, 3).to(device)
y = random_pytorch_tensor(3, 2, 0, 3).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_expm1.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ def test_expm1_flow_with_random_data(test_case):
y = torch.expm1(input)
return y

@autotest(auto_backward=False, check_graph=False)
def test_expm1_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_expm1_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.expm1(x)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_fmod.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def test_flow_fmod_scalar_with_random_data(test_case):
other = 3
return torch.fmod(input, other)

@autotest(auto_backward=False, check_graph=False)
def test_fmod_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_fmod_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.fmod(x, 2)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_greater.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ def test_tensor_greater_with_random_data(test_case):
y2 = x1 > x2
return (y1, y2)

@autotest(auto_backward=False, check_graph=False)
def test_greater_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_greater_with_0_size_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_ne.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ def test_ne(test_case):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])

@autotest(auto_backward=False, check_graph=False)
def test_ne_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_ne_with_0_size_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_negative.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@

@flow.unittest.skip_unless_1n1d()
class TestNegativeModule(flow.unittest.TestCase):
@autotest(auto_backward=False, check_graph=False)
def test_ne_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_ne_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
y1 = torch.negative(x)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,8 @@ def test_reshape_flow_with_random_data(test_case):
y = torch.reshape(x, shape=(-1,))
return y

@autotest(auto_backward=False, check_graph=False)
def test_reshape_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_reshape_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 0, 3).to(device)
y = torch.reshape(
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_sign.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ def test_sign_with_random_data(test_case):
y = torch.sign(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_sign_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_sign_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device)
y = torch.sign(x)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_squeeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ def test_flow_squeeze_with_random_data(test_case):
y = torch.squeeze(x, random(1, 3).to(int))
return y

@autotest(auto_backward=False, check_graph=False)
def test_squeeze_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_squeeze_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(3, 2, 1, 0).to(device)
y = torch.squeeze(x)
Expand Down
2 changes: 1 addition & 1 deletion python/oneflow/test/modules/test_sub.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def test_sub_against_pytorch(test_case):
)

@autotest(auto_backward=False, check_graph=False)
def test_sub_with_0shape_data(test_case):
def test_sub_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(2, 0, 3).to(device)
y = random_pytorch_tensor(2, 1, 3).to(device)
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_sum.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ def test_sum_against_pytorch(test_case):
y = torch.sum(x)
return y

@autotest(auto_backward=False, check_graph=False)
def test_sum_with_0shape_tensor(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_sum_with_0_size_tensor(test_case):
device = random_device()
x = random_pytorch_tensor(4, 4, 3, 0, 2).to(device)
y = torch.sum(x, dim=np.random.randint(0, 3))
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ def test_transpose_flow_with_random_data(test_case):
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
return y

@autotest(auto_backward=False, check_graph=False)
def test_transpose_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_transpose_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/modules/test_triu.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def test_triu(test_case):
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])

@autotest(auto_backward=False, check_graph=False)
def test_triu_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_triu_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.triu(x)
Expand Down
2 changes: 1 addition & 1 deletion python/oneflow/test/modules/test_unsqueeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def test_flow_unsqueeze_with_random_data(test_case):
return y

@autotest(auto_backward=False, check_graph=True)
def test_unsqueeze_with_0shape_data(test_case):
def test_unsqueeze_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(3, 2, 1, 0).to(device)
y = torch.unsqueeze(x, random(0, 2).to(int))
Expand Down
3 changes: 1 addition & 2 deletions python/oneflow/test/modules/test_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@


class TestVar(flow.unittest.TestCase):
@autotest(check_graph=False)
def test_flow_var_all_dim_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
Expand All @@ -44,7 +43,7 @@ def test_flow_var_one_dim_with_random_data(test_case):

@unittest.skip("var not support 0-shape tensor currently")
@autotest(check_graph=False)
def test_flow_var_0d_tensor_with_random_data(test_case):
def test_flow_var_0_size_data_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 4).to(device)
y = torch.var(
Expand Down
4 changes: 2 additions & 2 deletions python/oneflow/test/tensor/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,8 +717,8 @@ def test_flow_fmod_scalar_with_random_data(test_case):
other = 3
return input.fmod(other)

@autotest(auto_backward=False, check_graph=False)
def test_fmod_with_0shape_data(test_case):
@autotest(auto_backward=False, check_graph=True)
def test_fmod_with_0_size_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = x.fmod(2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,8 @@ def new_f(test_case):
rtol=rtol,
atol=atol,
equal_nan=True,
)
),
f"Check graph failed: graph result {eager_tensor_2_graph_tensor[flow_tensor].numpy()} not equals to eager result {flow_tensor.numpy()}.",
)
if verbose:
print(f"{f.__name__} test graph passed.")
Expand Down

0 comments on commit eabe79e

Please sign in to comment.