Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2020 Grigory Malivenko
Copyright (c) 2020 TensorLeap

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
3 changes: 0 additions & 3 deletions MANIFEST.in

This file was deleted.

31 changes: 31 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
PYTHONPATH := .
POETRY_MODULE := poetry run python -m
PYTEST := $(POETRY_MODULE) pytest

.PHONY: run_tests
run_tests:
$(PYTEST) test -v

.PHONY: test_models
test_models:
$(PYTEST) test/models -v

.PHONY: watch
watch:
$(POETRY_MODULE) pytest_watch --runner "python -m pytest -v -k $(K)"

.PHONY: lint
lint:
$(POETRY_MODULE) mypy --install-types --non-interactive .

.PHONY: lint_strict_code
lint_strict_code:
$(POETRY_MODULE) mypy --install-types --non-interactive --strict code_loader

.PHONY: lint_tests
lint_tests:
$(POETRY_MODULE) mypy --install-types --non-interactive tests

.PHONY: test_with_coverage
test_with_coverage:
$(PYTEST) --cov=code_loader --cov-branch --no-cov-on-fail --cov-report term-missing --cov-report html -v tests/
24 changes: 24 additions & 0 deletions onnx2kerastl/activation_layers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from tensorflow import keras
import logging

from .customonnxlayer.onnxhardsigmoid import OnnxHardSigmoid
from .utils import ensure_tf_type, ensure_numpy_type


Expand Down Expand Up @@ -176,3 +178,25 @@ def convert_prelu(node, params, layers, lambda_func, node_name, keras_name):

prelu = keras.layers.PReLU(weights=[W], shared_axes=shared_axes, name=keras_name)
layers[node_name] = prelu(input_0)


def convert_hard_sigmoid(node, params, layers, lambda_func, node_name, keras_name):
"""
Convert Hard Sigmoid activation layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param lambda_func: function for keras Lambda layer
:param node_name: internal converter name
:param keras_name: resulting layer name
:return: None
"""
if len(node.input) != 1:
assert AttributeError('More than 1 input for an activation layer.')

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

alpha = params.get("alpha", 0.2)
beta = params.get("beta", 0.5)
onnx_hard_sigmoid = OnnxHardSigmoid(alpha=alpha, beta=beta, name=keras_name)
layers[node_name] = onnx_hard_sigmoid(input_0)
3 changes: 2 additions & 1 deletion onnx2kerastl/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import collections
from onnx import numpy_helper

from .customonnxlayer import onnx_custom_objects_map
from .layers import AVAILABLE_CONVERTERS


Expand Down Expand Up @@ -290,7 +291,7 @@ def onnx_to_keras(onnx_model, input_names,
layer['config']['function'] = tuple(kerasf)

keras.backend.set_image_data_format('channels_last')
model_tf_ordering = keras.models.Model.from_config(conf)
model_tf_ordering = keras.models.Model.from_config(conf, custom_objects=onnx_custom_objects_map)

for dst_layer, src_layer, conf in zip(model_tf_ordering.layers, model.layers, conf['layers']):
W = src_layer.get_weights()
Expand Down
3 changes: 3 additions & 0 deletions onnx2kerastl/customonnxlayer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from onnx2kerastl.customonnxlayer.onnxhardsigmoid import OnnxHardSigmoid

onnx_custom_objects_map = {"OnnxHardSigmoid": OnnxHardSigmoid}
23 changes: 23 additions & 0 deletions onnx2kerastl/customonnxlayer/onnxhardsigmoid.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from keras.layers import Layer
import tensorflow as tf


class OnnxHardSigmoid(Layer):
def __init__(self, alpha: float = 0.2, beta: float = 0.5, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta

def call(self, inputs, **kwargs):
x = tf.multiply(inputs, self.alpha)
x = tf.add(x, self.beta)
x = tf.clip_by_value(x, 0., 1.)
return x

def get_config(self):
config = super().get_config()
config.update({
"alpha": self.alpha,
"beta": self.beta,
})
return config
3 changes: 2 additions & 1 deletion onnx2kerastl/layers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .convolution_layers import convert_conv, convert_convtranspose
from .activation_layers import convert_relu, convert_elu, convert_lrelu, convert_selu, \
convert_sigmoid, convert_tanh, convert_softmax, convert_prelu
convert_sigmoid, convert_tanh, convert_softmax, convert_prelu, convert_hard_sigmoid
from .operation_layers import convert_clip, convert_exp, convert_reduce_sum, convert_reduce_mean, \
convert_log, convert_pow, convert_sqrt, convert_split, convert_cast, convert_floor, convert_identity, \
convert_argmax, convert_reduce_l2, convert_reduce_max
Expand All @@ -22,6 +22,7 @@
'Elu': convert_elu,
'LeakyRelu': convert_lrelu,
'Sigmoid': convert_sigmoid,
'HardSigmoid': convert_hard_sigmoid,
'Tanh': convert_tanh,
'Selu': convert_selu,
'Clip': convert_clip,
Expand Down
17 changes: 11 additions & 6 deletions onnx2kerastl/upsampling_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,17 @@ def convert_upsample(node, params, layers, lambda_func, node_name, keras_name):
# Upsample since opset version 9 uses input[1] as 'scales' instead of attributes.
scale = np.uint8(layers[node.input[1]][-2:])

if params['mode'].decode('utf-8') != 'nearest':
logger.error('Cannot convert non-nearest upsampling.')
raise AssertionError('Cannot convert non-nearest upsampling')
interpolation_mode = params['mode'].decode('utf-8')
if interpolation_mode == 'nearest':
interpolation = "nearest"
elif interpolation_mode in ['bilinear', 'linear']:
interpolation = "bilinear"
elif interpolation_mode in "cubic":
interpolation = "bicubic"
else:
logger.error(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported')
raise AssertionError(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported')

upsampling = keras.layers.UpSampling2D(
size=scale, name=keras_name
)
upsampling = keras.layers.UpSampling2D(size=scale, name=keras_name, interpolation=interpolation)

layers[node_name] = upsampling(layers[node.input[0]])
43 changes: 32 additions & 11 deletions onnx2kerastl/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import numpy as np
from tensorflow import keras
from keras_data_format_converter import convert_channels_first_to_last


def is_numpy(obj):
Expand Down Expand Up @@ -47,31 +48,33 @@ def target_layer(_, inp=obj, dtype=obj.dtype.name):
return obj


def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False):
def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False,
should_transform_inputs=False):
"""
Check difference between Torch and Keras models
:param model: torch model
:param k_model: keras model
:param input_np: input data as numpy array or list of numpy array
:param epsilon: allowed difference
:param change_ordering: change ordering for keras input
:param should_transform_inputs: default False, set to True for converting channel first inputs to channel last format
:return: actual difference

"""
from torch.autograd import Variable
import torch

initial_keras_image_format = keras.backend.image_data_format()

if isinstance(input_np, np.ndarray):
input_np = [input_np.astype(np.float32)]


input_var = [Variable(torch.FloatTensor(i)) for i in input_np]
pytorch_output = model(*input_var)
if not isinstance(pytorch_output, tuple):
pytorch_output = [pytorch_output.data.numpy()]
else:
if isinstance(pytorch_output, dict):
pytorch_output = [p.data.numpy() for p in list(pytorch_output.values())]
elif isinstance(pytorch_output, (tuple, list)):
pytorch_output = [p.data.numpy() for p in pytorch_output]
else:
pytorch_output = [pytorch_output.data.numpy()]

if change_ordering:
# change image data format
Expand Down Expand Up @@ -101,13 +104,31 @@ def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_order
_koutput.append(k)
keras_output = _koutput
else:
keras.backend.set_image_data_format("channels_first")
keras_output = k_model.predict(input_np)
inputs_to_transpose = []
if should_transform_inputs:
inputs_to_transpose = [k_input.name for k_input in k_model.inputs]

_input_np = []
for i in input_np:
axes = list(range(len(i.shape)))
axes = axes[0:1] + axes[2:] + axes[1:2]
_input_np.append(np.transpose(i, axes))
input_np = _input_np

k_model = convert_channels_first_to_last(k_model, inputs_to_transpose)
keras_output = k_model(*input_np)
if not isinstance(keras_output, list):
keras_output = [keras_output]

# reset to previous image_data_format
keras.backend.set_image_data_format(initial_keras_image_format)
_koutput = []
for i, k in enumerate(keras_output):
if k.shape != pytorch_output[i].shape:
axes = list(range(len(k.shape)))
axes = axes[0:1] + axes[-1:] + axes[1:-1]
k = np.transpose(k, axes)
_koutput.append(k)
keras_output = _koutput


max_error = 0
for p, k in zip(pytorch_output, keras_output):
Expand Down
Loading