From e78e39fe54d591a4fc8859f1320415da015aac1d Mon Sep 17 00:00:00 2001 From: dorhar Date: Tue, 24 May 2022 13:53:00 +0300 Subject: [PATCH 1/2] raise on lamda layer exists --- test/utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/utils.py b/test/utils.py index 33f62977..0e088ea7 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1,5 +1,4 @@ import io -import warnings import onnx import torch @@ -9,6 +8,10 @@ from onnx2kerastl import onnx_to_keras, check_torch_keras_error +class LambdaLayerException(Exception): + pass + + def torch2keras(model: torch.nn.Module, input_variable, verbose=True, change_ordering=False): if isinstance(input_variable, (tuple, list)): input_variable = tuple(torch.FloatTensor(var) for var in input_variable) @@ -36,7 +39,7 @@ def convert_and_test(model: torch.nn.Module, error = check_torch_keras_error(model, k_model, input_variable, change_ordering=change_ordering, epsilon=epsilon, should_transform_inputs=should_transform_inputs) if is_lambda_layers_exist(k_model): - warnings.warn("Found Lambda layers") + raise LambdaLayerException("Found Lambda layers") return error From cb89e6ff000a3021026db2d0eefc74158f4861c9 Mon Sep 17 00:00:00 2001 From: dorhar Date: Tue, 24 May 2022 13:55:36 +0300 Subject: [PATCH 2/2] remove change_ordering from test and use our data format first to last with transform_inputs --- test/models/test_alexnet.py | 11 +++-------- test/models/test_deeplab.py | 6 ++---- test/models/test_densenet.py | 10 +++------- test/models/test_googlenet.py | 10 +++------- test/models/test_mbnet2.py | 11 +++-------- test/models/test_mnasnet.py | 10 +++------- test/models/test_resnet18.py | 8 ++------ test/models/test_resnext.py | 10 +++------- test/models/test_squeezenet.py | 10 +++------- test/models/test_vgg.py | 10 +++------- 10 files changed, 28 insertions(+), 68 deletions(-) diff --git a/test/models/test_alexnet.py b/test/models/test_alexnet.py index a680d12d..79f5a5b3 100644 --- a/test/models/test_alexnet.py +++ b/test/models/test_alexnet.py @@ -1,17 +1,12 @@ import numpy as np -import pytest -import tensorflow as tf +from torchvision.models import alexnet from test.utils import convert_and_test -from torchvision.models import alexnet -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_alexnet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_alexnet(): model = alexnet() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_deeplab.py b/test/models/test_deeplab.py index b95ac5eb..b6fbaf7a 100644 --- a/test/models/test_deeplab.py +++ b/test/models/test_deeplab.py @@ -6,12 +6,10 @@ @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('model_class', [deeplabv3_resnet50, deeplabv3_resnet101, deeplabv3_mobilenet_v3_large]) -def test_deeplab(change_ordering, model_class): +def test_deeplab(model_class): model = model_class() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 256, 256)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering, - should_transform_inputs=True) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_densenet.py b/test/models/test_densenet.py index e7770100..d3c3136f 100644 --- a/test/models/test_densenet.py +++ b/test/models/test_densenet.py @@ -1,18 +1,14 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models.densenet import densenet121 from test.utils import convert_and_test -from torchvision.models.densenet import densenet121 @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_densenet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_densenet(): model = densenet121() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_googlenet.py b/test/models/test_googlenet.py index c34bc965..d3eec49f 100644 --- a/test/models/test_googlenet.py +++ b/test/models/test_googlenet.py @@ -1,18 +1,14 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models import googlenet from test.utils import convert_and_test -from torchvision.models import googlenet @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_googlenet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_googlenet(): model = googlenet() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_mbnet2.py b/test/models/test_mbnet2.py index dde732fe..ecb03e5f 100644 --- a/test/models/test_mbnet2.py +++ b/test/models/test_mbnet2.py @@ -1,17 +1,12 @@ import numpy as np -import pytest -import tensorflow as tf +from torchvision.models import mobilenet_v2 from test.utils import convert_and_test -from torchvision.models import mobilenet_v2 -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_mobilenetv2(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_mobilenetv2(): model = mobilenet_v2() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_mnasnet.py b/test/models/test_mnasnet.py index 0ce309b0..a94940f3 100644 --- a/test/models/test_mnasnet.py +++ b/test/models/test_mnasnet.py @@ -1,19 +1,15 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models import mnasnet0_5, mnasnet1_0, mnasnet0_75, mnasnet1_3 from test.utils import convert_and_test -from torchvision.models import mnasnet0_5, mnasnet1_0, mnasnet0_75, mnasnet1_3 @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) @pytest.mark.parametrize('model_class', [mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3]) -def test_mnasnet(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_mnasnet(model_class): model = model_class() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_resnet18.py b/test/models/test_resnet18.py index 7e69ffad..a4eafa18 100644 --- a/test/models/test_resnet18.py +++ b/test/models/test_resnet18.py @@ -1,18 +1,14 @@ import numpy as np import pytest -import tensorflow as tf - from torchvision.models import resnet18 from test.utils import convert_and_test -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_resnet18(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") model = resnet18() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_resnext.py b/test/models/test_resnext.py index 242ded7f..74a440bd 100644 --- a/test/models/test_resnext.py +++ b/test/models/test_resnext.py @@ -1,19 +1,15 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models import resnext50_32x4d, resnext101_32x8d from test.utils import convert_and_test -from torchvision.models import resnext50_32x4d, resnext101_32x8d @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) @pytest.mark.parametrize('model_class', [resnext50_32x4d, resnext101_32x8d]) -def test_resnext(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_resnext(model_class): model = model_class() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_squeezenet.py b/test/models/test_squeezenet.py index 25246520..3a95771e 100644 --- a/test/models/test_squeezenet.py +++ b/test/models/test_squeezenet.py @@ -1,19 +1,15 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models import squeezenet1_0, squeezenet1_1 from test.utils import convert_and_test -from torchvision.models import squeezenet1_0, squeezenet1_1 @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) @pytest.mark.parametrize('model_class', [squeezenet1_1, squeezenet1_0]) -def test_squeezenet(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_squeezenet(model_class): model = model_class() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_vgg.py b/test/models/test_vgg.py index cf0e7866..770f7fdc 100644 --- a/test/models/test_vgg.py +++ b/test/models/test_vgg.py @@ -1,19 +1,15 @@ import numpy as np import pytest -import tensorflow as tf +from torchvision.models import vgg11, vgg11_bn from test.utils import convert_and_test -from torchvision.models import vgg11, vgg11_bn @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) @pytest.mark.parametrize('model_class', [vgg11, vgg11_bn]) -def test_vgg(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") +def test_vgg(model_class): model = model_class() model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True)