From 170faa3d8e811b5813fbd66b294a3b9a2b380ce1 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 11:33:42 +0200 Subject: [PATCH 01/16] handling constants with a custom layer (mul) --- onnx2kerastl/customonnxlayer/__init__.py | 4 +- .../customonnxlayer/onnxconstantmul.py | 20 +++++ onnx2kerastl/elementwise_layers.py | 89 +++++++++++++++---- 3 files changed, 93 insertions(+), 20 deletions(-) create mode 100644 onnx2kerastl/customonnxlayer/onnxconstantmul.py diff --git a/onnx2kerastl/customonnxlayer/__init__.py b/onnx2kerastl/customonnxlayer/__init__.py index 2247dddb..6818e488 100644 --- a/onnx2kerastl/customonnxlayer/__init__.py +++ b/onnx2kerastl/customonnxlayer/__init__.py @@ -1,8 +1,10 @@ +from onnx2kerastl.customonnxlayer.onnxconstantmul import ONNXMultiplyByConstantLayer from onnx2kerastl.customonnxlayer.onnxeinsum import OnnxEinsumLayer from onnx2kerastl.customonnxlayer.onnxlstm import OnnxLSTM onnx_custom_objects_map = { - "OnnxLSTM": OnnxLSTM + "OnnxLSTM": OnnxLSTM, + "ONNXMultiplyByConstantLayer": ONNXMultiplyByConstantLayer } onnx_custom_layers = { diff --git a/onnx2kerastl/customonnxlayer/onnxconstantmul.py b/onnx2kerastl/customonnxlayer/onnxconstantmul.py new file mode 100644 index 00000000..e33488f6 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxconstantmul.py @@ -0,0 +1,20 @@ +from keras.layers import Layer +import tensorflow as tf + +class ONNXMultiplyByConstantLayer(Layer): + def __init__(self, constant_shape, constant_value, **kwargs): + super(ONNXMultiplyByConstantLayer, self).__init__(**kwargs) + self.constant_shape = constant_shape + self.constant_value = constant_value + + def call(self, inputs): + constant_tensor = tf.fill(self.constant_shape, self.constant_value) + return inputs * constant_tensor + + def get_config(self): + config = super(ONNXMultiplyByConstantLayer, self).get_config() + config.update({ + 'constant_shape': self.constant_shape, + 'constant_value': self.constant_value, + }) + return config \ No newline at end of file diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index e761503f..762407ab 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -1,6 +1,8 @@ import numpy as np import keras import logging + +from onnx2kerastl.customonnxlayer.onnxconstantmul import ONNXMultiplyByConstantLayer from .utils import is_numpy, ensure_tf_type from .tfops_funcs import tf_tensor_scatter_nd_update, tf_maximum, tf_minimum, tf_cast, tf_expand_dims, tf_repeat,\ tf_equal, tf_where, tf_round, tf_sign, tf_abs, tf_math_mod, tf_bitwise_left_shift, tf_bitwise_right_shift,\ @@ -98,39 +100,88 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_ layers[node_name] = input_0 + input_1 +# def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): +# """ +# Convert element-wise mul. +# :param node: current operation node +# :param params: operation attributes +# :param layers: available keras layers +# :param lambda_func: function for keras Lambda layer +# :param node_name: internal converter name +# :param keras_name: resulting layer name +# :return: None +# """ +# logger = logging.getLogger('onnx2keras.mul') + +# if len(node.input) != 2: +# raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + +# input_0 = layers[node.input[0]] +# input_1 = layers[node.input[1]] + +# input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) +# input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) +# try: +# if not input_0_is_constant and not input_1_is_constant: +# mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") +# layers[node_name] = mul([input_0, input_1]) +# else: +# raise ValueError('Operands are different.') + +# except (IndexError, ValueError): +# logger.warning('Failed to use keras.layers.Multiply. Fallback to TF lambda.') +# layers[node_name] = input_0 * input_1 + def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise mul. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None """ logger = logging.getLogger('onnx2keras.mul') if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + raise AttributeError('Number of inputs is not equal to 2 for element-wise layer') input_0 = layers[node.input[0]] input_1 = layers[node.input[1]] input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) - try: - if not input_0_is_constant and not input_1_is_constant: - mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") - layers[node_name] = mul([input_0, input_1]) - else: - raise ValueError('Operands are different.') - - except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Multiply. Fallback to TF lambda.') - layers[node_name] = input_0 * input_1 - + if not input_0_is_constant and not input_1_is_constant: + # Both inputs are tensors; use Multiply layer + mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") + layers[node_name] = mul([input_0, input_1]) + else: + # One of the inputs is a constant + if input_0_is_constant: + # input_0 is constant + constant_value = input_0 + variable_input = input_1 + else: + # input_1 is constant + constant_value = input_1 + variable_input = input_0 + + # Define the shape and value of the constant tensor + constant_shape = constant_value.shape + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = float(constant_value.flat[0]) + # Use tf.fill to create the constant tensor at runtime + layers[node_name] = ONNXMultiplyByConstantLayer( + constant_shape=constant_shape, + constant_value=const_val, + name=keras_name + )(variable_input) + else: + # If the constant tensor has varying values, we need to embed it + logger.warning('Constant tensor has varying values; embedding it into the model.') + constant_tensor = tf.constant(constant_value) + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_tensor, + name=keras_name + )(variable_input) + def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise sub. From f263a0a3da1f0592ab0977ac2554e2d314110bee Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 12:05:20 +0200 Subject: [PATCH 02/16] modify mul to use lambda rather than custom layer --- onnx2kerastl/elementwise_layers.py | 72 ++++++++++++++++++------------ 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 762407ab..19080a33 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -147,40 +147,56 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) - if not input_0_is_constant and not input_1_is_constant: - # Both inputs are tensors; use Multiply layer - mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") - layers[node_name] = mul([input_0, input_1]) - else: - # One of the inputs is a constant - if input_0_is_constant: - # input_0 is constant + try: + if not input_0_is_constant and not input_1_is_constant: + mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") + layers[node_name] = mul([input_0, input_1]) + else: + raise ValueError('Operands are different.') + + except (IndexError, ValueError): + logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable constant_value = input_0 variable_input = input_1 - else: - # input_1 is constant + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = float(constant_value.flat[0]) + layers[node_name] = keras.layers.Lambda( + lambda x: x * const_val, + name=keras_name + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_value, + name=keras_name + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant constant_value = input_1 variable_input = input_0 - # Define the shape and value of the constant tensor - constant_shape = constant_value.shape - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = float(constant_value.flat[0]) - # Use tf.fill to create the constant tensor at runtime - layers[node_name] = ONNXMultiplyByConstantLayer( - constant_shape=constant_shape, - constant_value=const_val, - name=keras_name - )(variable_input) + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = float(constant_value.flat[0]) + layers[node_name] = keras.layers.Lambda( + lambda x: x * const_val, + name=keras_name + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_value, + name=keras_name + )(variable_input) else: - # If the constant tensor has varying values, we need to embed it - logger.warning('Constant tensor has varying values; embedding it into the model.') - constant_tensor = tf.constant(constant_value) - layers[node_name] = keras.layers.Lambda( - lambda x: x * constant_tensor, - name=keras_name - )(variable_input) + # Both inputs are constants; compute the result now + layers[node_name] = input_0 * input_1 def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ From 72113e0490be98ea3e5747c15d43109e2106f883 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 12:24:24 +0200 Subject: [PATCH 03/16] fixed eager tensor and dtype errors --- onnx2kerastl/elementwise_layers.py | 108 ++++++++++++++++++++++------- 1 file changed, 83 insertions(+), 25 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 19080a33..fe88bba4 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -147,56 +147,114 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) + # Helper functions + def is_uniform(tensor): + if isinstance(tensor, EagerTensor): + tensor_np = tensor.numpy() + elif is_numpy(tensor): + tensor_np = tensor + else: + return False + return np.all(tensor_np == tensor_np.flat[0]) + + def get_scalar_value(tensor): + if isinstance(tensor, EagerTensor): + tensor_np = tensor.numpy() + return tensor_np.flat[0] + elif is_numpy(tensor): + return tensor.flat[0] + else: + raise ValueError('Cannot get scalar value from non-constant tensor') + try: if not input_0_is_constant and not input_1_is_constant: mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") layers[node_name] = mul([input_0, input_1]) else: + # One or both inputs are constants raise ValueError('Operands are different.') - except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') - if input_0_is_constant and not input_1_is_constant: - # input_0 is constant, input_1 is variable - constant_value = input_0 - variable_input = input_1 + if input_0_is_constant and input_1_is_constant: + # Both inputs are constants + if is_uniform(input_0) and is_uniform(input_1): + const_val_0 = get_scalar_value(input_0) + const_val_1 = get_scalar_value(input_1) + const_val = const_val_0 * const_val_1 + + tensor_shape = input_0.shape # Assuming both have the same shape + tensor_dtype = input_0.dtype if isinstance(input_0, EagerTensor) else input_1.dtype + + # Ensure const_val is a Python scalar + if isinstance(const_val, np.ndarray): + const_val = const_val.item() + + def generate_constant_tensor(_): + return tf.fill(tensor_shape, const_val, dtype=tensor_dtype) - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = float(constant_value.flat[0]) layers[node_name] = keras.layers.Lambda( - lambda x: x * const_val, + generate_constant_tensor, name=keras_name - )(variable_input) + )(None) else: - # Cannot avoid embedding the constant tensor + logger.warning('Both constants have varying values; cannot avoid embedding them.') + # Proceed by embedding the constant tensor (may increase model size) + const_value = input_0 * input_1 + + # Convert EagerTensor to NumPy array if necessary + if isinstance(const_value, EagerTensor): + const_value = const_value.numpy() + + def return_constant_tensor(_): + return tf.constant(const_value) + layers[node_name] = keras.layers.Lambda( - lambda x: x * constant_value, + return_constant_tensor, name=keras_name - )(variable_input) + )(None) + else: + # One input is constant, the other is variable + if input_0_is_constant: + constant_value = input_0 + variable_input = input_1 + else: + constant_value = input_1 + variable_input = input_0 - elif not input_0_is_constant and input_1_is_constant: - # input_0 is variable, input_1 is constant - constant_value = input_1 - variable_input = input_0 + variable_dtype = variable_input.dtype + + if is_uniform(constant_value): + const_val = get_scalar_value(constant_value) + # Ensure const_val is a Python scalar + if isinstance(const_val, np.ndarray): + const_val = const_val.item() + + # Cast const_val to the variable's dtype using NumPy + const_val = variable_dtype.as_numpy_dtype(const_val) - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = float(constant_value.flat[0]) layers[node_name] = keras.layers.Lambda( lambda x: x * const_val, name=keras_name )(variable_input) else: - # Cannot avoid embedding the constant tensor + logger.warning('Constant tensor has varying values; embedding it into the model.') + + # Convert EagerTensor to NumPy array if necessary + if isinstance(constant_value, EagerTensor): + constant_value = constant_value.numpy() + + # Avoid capturing the NumPy array directly in the lambda function + def mul_with_constant(x, const=constant_value): + const_tensor = tf.constant(const, dtype=variable_dtype) + return x * const_tensor + layers[node_name] = keras.layers.Lambda( - lambda x: x * constant_value, + mul_with_constant, name=keras_name )(variable_input) - else: - # Both inputs are constants; compute the result now - layers[node_name] = input_0 * input_1 + + def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ From f540b7a8eda2699da2a55d64a3f1396b0d1bddd8 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 12:38:03 +0200 Subject: [PATCH 04/16] added kwargs --- onnx2kerastl/elementwise_layers.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index fe88bba4..2790c282 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -190,7 +190,7 @@ def get_scalar_value(tensor): if isinstance(const_val, np.ndarray): const_val = const_val.item() - def generate_constant_tensor(_): + def generate_constant_tensor(inputs, **kwargs): return tf.fill(tensor_shape, const_val, dtype=tensor_dtype) layers[node_name] = keras.layers.Lambda( @@ -206,7 +206,7 @@ def generate_constant_tensor(_): if isinstance(const_value, EagerTensor): const_value = const_value.numpy() - def return_constant_tensor(_): + def return_constant_tensor(inputs, **kwargs): return tf.constant(const_value) layers[node_name] = keras.layers.Lambda( @@ -245,7 +245,7 @@ def return_constant_tensor(_): constant_value = constant_value.numpy() # Avoid capturing the NumPy array directly in the lambda function - def mul_with_constant(x, const=constant_value): + def mul_with_constant(x, const=constant_value, **kwargs): const_tensor = tf.constant(const, dtype=variable_dtype) return x * const_tensor @@ -254,7 +254,6 @@ def mul_with_constant(x, const=constant_value): name=keras_name )(variable_input) - def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ From 3ea820ddfaa088265d6d362a58b9cd3926e3cdd4 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 13:38:37 +0200 Subject: [PATCH 05/16] gitignore + convert eager tensor to numpy --- .gitignore | 1 + onnx2kerastl/elementwise_layers.py | 107 +++++++---------------------- 2 files changed, 26 insertions(+), 82 deletions(-) diff --git a/.gitignore b/.gitignore index 34dbe605..83ce9f90 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ __pycache__/ *.h5 *.npy *.pth +*.log dist/ *.DS_Store .python-version diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 2790c282..fcb6dc59 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -147,113 +147,56 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) - # Helper functions - def is_uniform(tensor): - if isinstance(tensor, EagerTensor): - tensor_np = tensor.numpy() - elif is_numpy(tensor): - tensor_np = tensor - else: - return False - return np.all(tensor_np == tensor_np.flat[0]) - - def get_scalar_value(tensor): - if isinstance(tensor, EagerTensor): - tensor_np = tensor.numpy() - return tensor_np.flat[0] - elif is_numpy(tensor): - return tensor.flat[0] - else: - raise ValueError('Cannot get scalar value from non-constant tensor') - try: if not input_0_is_constant and not input_1_is_constant: mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") layers[node_name] = mul([input_0, input_1]) else: - # One or both inputs are constants raise ValueError('Operands are different.') + except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') - if input_0_is_constant and input_1_is_constant: - # Both inputs are constants - if is_uniform(input_0) and is_uniform(input_1): - const_val_0 = get_scalar_value(input_0) - const_val_1 = get_scalar_value(input_1) - const_val = const_val_0 * const_val_1 - - tensor_shape = input_0.shape # Assuming both have the same shape - tensor_dtype = input_0.dtype if isinstance(input_0, EagerTensor) else input_1.dtype - - # Ensure const_val is a Python scalar - if isinstance(const_val, np.ndarray): - const_val = const_val.item() - - def generate_constant_tensor(inputs, **kwargs): - return tf.fill(tensor_shape, const_val, dtype=tensor_dtype) + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(input_0) + variable_input = input_1 + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = float(constant_value.flat[0]) layers[node_name] = keras.layers.Lambda( - generate_constant_tensor, + lambda x: x * const_val, name=keras_name - )(None) + )(variable_input) else: - logger.warning('Both constants have varying values; cannot avoid embedding them.') - # Proceed by embedding the constant tensor (may increase model size) - const_value = input_0 * input_1 - - # Convert EagerTensor to NumPy array if necessary - if isinstance(const_value, EagerTensor): - const_value = const_value.numpy() - - def return_constant_tensor(inputs, **kwargs): - return tf.constant(const_value) - + # Cannot avoid embedding the constant tensor layers[node_name] = keras.layers.Lambda( - return_constant_tensor, + lambda x: x * constant_value, name=keras_name - )(None) - else: - # One input is constant, the other is variable - if input_0_is_constant: - constant_value = input_0 - variable_input = input_1 - else: - constant_value = input_1 - variable_input = input_0 - - variable_dtype = variable_input.dtype - - if is_uniform(constant_value): - const_val = get_scalar_value(constant_value) - # Ensure const_val is a Python scalar - if isinstance(const_val, np.ndarray): - const_val = const_val.item() + )(variable_input) - # Cast const_val to the variable's dtype using NumPy - const_val = variable_dtype.as_numpy_dtype(const_val) + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(input_1) + variable_input = input_0 + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = float(constant_value.flat[0]) layers[node_name] = keras.layers.Lambda( lambda x: x * const_val, name=keras_name )(variable_input) else: - logger.warning('Constant tensor has varying values; embedding it into the model.') - - # Convert EagerTensor to NumPy array if necessary - if isinstance(constant_value, EagerTensor): - constant_value = constant_value.numpy() - - # Avoid capturing the NumPy array directly in the lambda function - def mul_with_constant(x, const=constant_value, **kwargs): - const_tensor = tf.constant(const, dtype=variable_dtype) - return x * const_tensor - + # Cannot avoid embedding the constant tensor layers[node_name] = keras.layers.Lambda( - mul_with_constant, + lambda x: x * constant_value, name=keras_name )(variable_input) - + else: + # Both inputs are constants; compute the result now + layers[node_name] = input_0 * input_1 def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ From eab9c68bc5e92c5b10d43a9fc4f85bb70d1fa9b0 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 14:44:43 +0200 Subject: [PATCH 06/16] deleted custom layer --- onnx2kerastl/customonnxlayer/__init__.py | 2 -- .../customonnxlayer/onnxconstantmul.py | 20 ------------------- 2 files changed, 22 deletions(-) delete mode 100644 onnx2kerastl/customonnxlayer/onnxconstantmul.py diff --git a/onnx2kerastl/customonnxlayer/__init__.py b/onnx2kerastl/customonnxlayer/__init__.py index 6818e488..86d6e194 100644 --- a/onnx2kerastl/customonnxlayer/__init__.py +++ b/onnx2kerastl/customonnxlayer/__init__.py @@ -1,10 +1,8 @@ -from onnx2kerastl.customonnxlayer.onnxconstantmul import ONNXMultiplyByConstantLayer from onnx2kerastl.customonnxlayer.onnxeinsum import OnnxEinsumLayer from onnx2kerastl.customonnxlayer.onnxlstm import OnnxLSTM onnx_custom_objects_map = { "OnnxLSTM": OnnxLSTM, - "ONNXMultiplyByConstantLayer": ONNXMultiplyByConstantLayer } onnx_custom_layers = { diff --git a/onnx2kerastl/customonnxlayer/onnxconstantmul.py b/onnx2kerastl/customonnxlayer/onnxconstantmul.py deleted file mode 100644 index e33488f6..00000000 --- a/onnx2kerastl/customonnxlayer/onnxconstantmul.py +++ /dev/null @@ -1,20 +0,0 @@ -from keras.layers import Layer -import tensorflow as tf - -class ONNXMultiplyByConstantLayer(Layer): - def __init__(self, constant_shape, constant_value, **kwargs): - super(ONNXMultiplyByConstantLayer, self).__init__(**kwargs) - self.constant_shape = constant_shape - self.constant_value = constant_value - - def call(self, inputs): - constant_tensor = tf.fill(self.constant_shape, self.constant_value) - return inputs * constant_tensor - - def get_config(self): - config = super(ONNXMultiplyByConstantLayer, self).get_config() - config.update({ - 'constant_shape': self.constant_shape, - 'constant_value': self.constant_value, - }) - return config \ No newline at end of file From 539ba32a56d853e71f6328d91ddcc15e9a116969 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 14:44:58 +0200 Subject: [PATCH 07/16] git ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 83ce9f90..0c84d248 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ test/models/custom_conversion_tests .vscode/ gallery_models +.DS_Store From 185db00f4d18a6c151e5d1b4feefe52567177f40 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 14:45:14 +0200 Subject: [PATCH 08/16] added dtype conversion --- onnx2kerastl/elementwise_layers.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index fcb6dc59..9baaa2d5 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -2,7 +2,6 @@ import keras import logging -from onnx2kerastl.customonnxlayer.onnxconstantmul import ONNXMultiplyByConstantLayer from .utils import is_numpy, ensure_tf_type from .tfops_funcs import tf_tensor_scatter_nd_update, tf_maximum, tf_minimum, tf_cast, tf_expand_dims, tf_repeat,\ tf_equal, tf_where, tf_round, tf_sign, tf_abs, tf_math_mod, tf_bitwise_left_shift, tf_bitwise_right_shift,\ @@ -159,12 +158,12 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ if input_0_is_constant and not input_1_is_constant: # input_0 is constant, input_1 is variable - constant_value = np.asarray(input_0) + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) variable_input = input_1 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout - const_val = float(constant_value.flat[0]) + const_val = constant_value.flat[0] layers[node_name] = keras.layers.Lambda( lambda x: x * const_val, name=keras_name @@ -178,12 +177,12 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ elif not input_0_is_constant and input_1_is_constant: # input_0 is variable, input_1 is constant - constant_value = np.asarray(input_1) + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout - const_val = float(constant_value.flat[0]) + const_val = constant_value.flat[0] layers[node_name] = keras.layers.Lambda( lambda x: x * const_val, name=keras_name From 11ca70c5e11010fcdf139e19860d5352138de16f Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 15:07:32 +0200 Subject: [PATCH 09/16] elementwise sub with proper const handling --- onnx2kerastl/elementwise_layers.py | 125 ++++++++++++++++++----------- 1 file changed, 80 insertions(+), 45 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 9baaa2d5..1686c682 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -99,38 +99,6 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_ layers[node_name] = input_0 + input_1 -# def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): -# """ -# Convert element-wise mul. -# :param node: current operation node -# :param params: operation attributes -# :param layers: available keras layers -# :param lambda_func: function for keras Lambda layer -# :param node_name: internal converter name -# :param keras_name: resulting layer name -# :return: None -# """ -# logger = logging.getLogger('onnx2keras.mul') - -# if len(node.input) != 2: -# raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - -# input_0 = layers[node.input[0]] -# input_1 = layers[node.input[1]] - -# input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) -# input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) -# try: -# if not input_0_is_constant and not input_1_is_constant: -# mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") -# layers[node_name] = mul([input_0, input_1]) -# else: -# raise ValueError('Operands are different.') - -# except (IndexError, ValueError): -# logger.warning('Failed to use keras.layers.Multiply. Fallback to TF lambda.') -# layers[node_name] = input_0 * input_1 - def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise mul. @@ -197,16 +165,44 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ # Both inputs are constants; compute the result now layers[node_name] = input_0 * input_1 +# def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): +# """ +# Convert element-wise sub. +# :param node: current operation node +# :param params: operation attributes +# :param layers: available keras layers +# :param lambda_func: function for keras Lambda layer +# :param node_name: internal converter name +# :param keras_name: resulting layer name +# :return: None +# """ +# logger = logging.getLogger('onnx2keras.sub') + +# if len(node.input) != 2: +# raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + +# input_0 = layers[node.input[0]] +# input_1 = layers[node.input[1]] +# input_0_is_np = is_numpy(input_0) or isinstance(input_0, EagerTensor) +# input_1_is_np = is_numpy(input_1) or isinstance(input_1, EagerTensor) + +# try: +# if not input_0_is_np and not input_1_is_np: +# sub = keras.layers.Subtract(name=f"{params['cleaned_name']}_sub") +# layers[node_name] = sub([input_0, input_1]) +# else: +# raise ValueError('Operands are different.') + +# except (IndexError, ValueError): +# logger.warning('Failed to use keras.layers.Subtract. Fallback to TF lambda.') +# if input_0_is_np and not input_1_is_np: # constant - tensor does not parse well +# layers[node_name] = - (input_1 - input_0) +# else: +# layers[node_name] = input_0 - input_1 + def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise sub. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None """ logger = logging.getLogger('onnx2keras.sub') @@ -215,24 +211,63 @@ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_ input_0 = layers[node.input[0]] input_1 = layers[node.input[1]] - input_0_is_np = is_numpy(input_0) or isinstance(input_0, EagerTensor) - input_1_is_np = is_numpy(input_1) or isinstance(input_1, EagerTensor) + + input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) try: - if not input_0_is_np and not input_1_is_np: + if not input_0_is_constant and not input_1_is_constant: sub = keras.layers.Subtract(name=f"{params['cleaned_name']}_sub") layers[node_name] = sub([input_0, input_1]) else: raise ValueError('Operands are different.') except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Subtract. Fallback to TF lambda.') - if input_0_is_np and not input_1_is_np: # constant - tensor does not parse well - layers[node_name] = - (input_1 - input_0) + logger.warning('Failed to use keras.layers.Subtract. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable: constant - variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: const_val - x, + name=keras_name + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: constant_value - x, + name=keras_name + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant: variable - constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x - const_val, + name=keras_name + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x - constant_value, + name=keras_name + )(variable_input) else: + # Both inputs are constants; compute the result now layers[node_name] = input_0 - input_1 + def convert_min(node, params, layers, lambda_func, node_name, keras_name): """ Convert Min layer From fcbdb4edb8ba8bb7209f418830b174549c21698e Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 15:31:12 +0200 Subject: [PATCH 10/16] elementwise add const handling --- onnx2kerastl/elementwise_layers.py | 107 ++++++++++++++--------------- 1 file changed, 52 insertions(+), 55 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 1686c682..70e9c3a2 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -61,44 +61,75 @@ def target_layer(x): def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise add. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None """ logger = logging.getLogger('onnx2keras.add') if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + raise AttributeError('Number of inputs is not equal to 2 for element-wise layer') input_0 = layers[node.input[0]] input_1 = layers[node.input[1]] - input_0_is_non_keras = is_numpy(input_0) or isinstance(input_0, EagerTensor) - input_1_is_non_keras = is_numpy(input_1) or isinstance(input_1, EagerTensor) + input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) + try: - if not input_0_is_non_keras and not input_1_is_non_keras: - to_add = input_1 - # We probably need to seperate two possibilities here. Currently we only deal with second option - # [Batch] + [Batch,1] -> [Batch,1] - # [Not-Batch] + [Not,Batch,1] -> [Not-batch, Not-batch] + if not input_0_is_constant and not input_1_is_constant: + # Both inputs are variables if len(input_0.shape) != len(input_1.shape): - layers[node_name] = tf_add(input_0, to_add, tf_name=f"{params['cleaned_name']}_add") + # Use TensorFlow add to handle shape differences + layers[node_name] = tf_add(input_0, input_1, tf_name=f"{params['cleaned_name']}_add") else: - layers[node_name] = keras.layers.Add(name=f"{params['cleaned_name']}_add")([input_0, to_add]) + # Use Keras Add layer + layers[node_name] = keras.layers.Add(name=f"{params['cleaned_name']}_add")([input_0, input_1]) else: raise ValueError('Operands are different.') except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Add. Fallback to TF lambda.') - if input_0_is_non_keras: - layers[node_name] = input_1 + input_0 + logger.warning('Failed to use keras.layers.Add. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x + const_val, + name=keras_name + )(variable_input) + else: + # Embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x + constant_value, + name=keras_name + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x + const_val, + name=keras_name + )(variable_input) + else: + # Embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x + constant_value, + name=keras_name + )(variable_input) else: + # Both inputs are constants; compute the result now layers[node_name] = input_0 + input_1 + def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise mul. @@ -164,41 +195,7 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ else: # Both inputs are constants; compute the result now layers[node_name] = input_0 * input_1 - -# def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): -# """ -# Convert element-wise sub. -# :param node: current operation node -# :param params: operation attributes -# :param layers: available keras layers -# :param lambda_func: function for keras Lambda layer -# :param node_name: internal converter name -# :param keras_name: resulting layer name -# :return: None -# """ -# logger = logging.getLogger('onnx2keras.sub') - -# if len(node.input) != 2: -# raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - -# input_0 = layers[node.input[0]] -# input_1 = layers[node.input[1]] -# input_0_is_np = is_numpy(input_0) or isinstance(input_0, EagerTensor) -# input_1_is_np = is_numpy(input_1) or isinstance(input_1, EagerTensor) - -# try: -# if not input_0_is_np and not input_1_is_np: -# sub = keras.layers.Subtract(name=f"{params['cleaned_name']}_sub") -# layers[node_name] = sub([input_0, input_1]) -# else: -# raise ValueError('Operands are different.') - -# except (IndexError, ValueError): -# logger.warning('Failed to use keras.layers.Subtract. Fallback to TF lambda.') -# if input_0_is_np and not input_1_is_np: # constant - tensor does not parse well -# layers[node_name] = - (input_1 - input_0) -# else: -# layers[node_name] = input_0 - input_1 + def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ From caa9da928dcc809123d48bd92233f2b5db17c008 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 16:13:14 +0200 Subject: [PATCH 11/16] up version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2ef41f9b..2f0d0975 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "onnx2kerastl" -version = "0.0.149" +version = "0.0.150" description = "" authors = ["dorhar "] license = "MIT" From f065566dc20babb61cda6f13ac9a1daa940430d9 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 3 Nov 2024 16:15:27 +0200 Subject: [PATCH 12/16] docstrings --- onnx2kerastl/elementwise_layers.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 70e9c3a2..ee29a377 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -61,6 +61,13 @@ def target_layer(x): def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise add. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None """ logger = logging.getLogger('onnx2keras.add') @@ -133,6 +140,13 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise mul. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None """ logger = logging.getLogger('onnx2keras.mul') @@ -200,6 +214,13 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): """ Convert element-wise sub. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None """ logger = logging.getLogger('onnx2keras.sub') From 8a3b71754f26b5c5d1c7a0c0f9fb299ed8e1041b Mon Sep 17 00:00:00 2001 From: ranhomri Date: Mon, 4 Nov 2024 09:23:37 +0200 Subject: [PATCH 13/16] padding --- onnx2kerastl/padding_layers.py | 26 +++++++++++++++----------- pyproject.toml | 2 +- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/onnx2kerastl/padding_layers.py b/onnx2kerastl/padding_layers.py index 72a8b165..cca51044 100644 --- a/onnx2kerastl/padding_layers.py +++ b/onnx2kerastl/padding_layers.py @@ -80,18 +80,22 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name): ) layers[node_name] = padding_layer(input_0) elif params['mode'] == 'reflect': + if pads.shape[0] == 6: + result = tf_pad(input_0, [[pads[0], pads[3]], [pads[1], pads[4]], [pads[2], pads[5]]], mode='REFLECT', + tf_name=f"{params['cleaned_name']}_reflect_pad") + layers[node_name] = result + else: + def target_layer(x, pads=pads): + if pads.shape[0] == 8: + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') + else: + logger.warning("Caution - no test yet") + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') + return layer - def target_layer(x, pads=pads): - if pads.shape[0] == 8: - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') - else: - logger.warning("Caution - no test yet") - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect") - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer elif params['mode'] == 'edge': def target_layer(x, pads=pads): diff --git a/pyproject.toml b/pyproject.toml index 2f0d0975..4c4b3c81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "onnx2kerastl" -version = "0.0.150" +version = "0.0.151" description = "" authors = ["dorhar "] license = "MIT" From 7a9a626d2f3abea18e575eead373d7bfb169abe5 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 10 Nov 2024 11:10:29 +0200 Subject: [PATCH 14/16] with padding fix --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4c4b3c81..7b3e811c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "onnx2kerastl" -version = "0.0.151" +version = "0.0.152" description = "" authors = ["dorhar "] license = "MIT" From 04f7e849caf856ca73b68551bf7490432bec36cc Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 10 Nov 2024 11:26:16 +0200 Subject: [PATCH 15/16] remove duplicated code --- onnx2kerastl/elementwise_layers.py | 89 ++++++++---------------------- 1 file changed, 22 insertions(+), 67 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index ee29a377..38e373b7 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -94,29 +94,14 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Add. Fallback to Lambda layer.') - if input_0_is_constant and not input_1_is_constant: - # input_0 is constant, input_1 is variable - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 - - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = constant_value.flat[0] - layers[node_name] = keras.layers.Lambda( - lambda x: x + const_val, - name=keras_name - )(variable_input) + if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): + # One input is constant and the other is variable + if input_0_is_constant: + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 else: - # Embedding the constant tensor - layers[node_name] = keras.layers.Lambda( - lambda x: x + constant_value, - name=keras_name - )(variable_input) - - elif not input_0_is_constant and input_1_is_constant: - # input_0 is variable, input_1 is constant - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -169,29 +154,14 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') - if input_0_is_constant and not input_1_is_constant: - # input_0 is constant, input_1 is variable - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 - - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = constant_value.flat[0] - layers[node_name] = keras.layers.Lambda( - lambda x: x * const_val, - name=keras_name - )(variable_input) + if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): + # One input is constant and the other is variable + if input_0_is_constant: + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 else: - # Cannot avoid embedding the constant tensor - layers[node_name] = keras.layers.Lambda( - lambda x: x * constant_value, - name=keras_name - )(variable_input) - - elif not input_0_is_constant and input_1_is_constant: - # input_0 is variable, input_1 is constant - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -243,10 +213,14 @@ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Subtract. Fallback to Lambda layer.') - if input_0_is_constant and not input_1_is_constant: - # input_0 is constant, input_1 is variable: constant - variable - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 + if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): + # One input is constant and the other is variable + if input_0_is_constant: + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + else: + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -261,25 +235,6 @@ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_ lambda x: constant_value - x, name=keras_name )(variable_input) - - elif not input_0_is_constant and input_1_is_constant: - # input_0 is variable, input_1 is constant: variable - constant - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 - - if np.all(constant_value == constant_value.flat[0]): - # Constant tensor has the same value throughout - const_val = constant_value.flat[0] - layers[node_name] = keras.layers.Lambda( - lambda x: x - const_val, - name=keras_name - )(variable_input) - else: - # Cannot avoid embedding the constant tensor - layers[node_name] = keras.layers.Lambda( - lambda x: x - constant_value, - name=keras_name - )(variable_input) else: # Both inputs are constants; compute the result now layers[node_name] = input_0 - input_1 From b3f182accb3ce36297a0b5ab6282f41f47dfca25 Mon Sep 17 00:00:00 2001 From: ranhomri Date: Sun, 10 Nov 2024 12:02:15 +0200 Subject: [PATCH 16/16] bugfix --- onnx2kerastl/elementwise_layers.py | 89 ++++++++++++++++++++++-------- 1 file changed, 67 insertions(+), 22 deletions(-) diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py index 38e373b7..ee29a377 100644 --- a/onnx2kerastl/elementwise_layers.py +++ b/onnx2kerastl/elementwise_layers.py @@ -94,14 +94,29 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Add. Fallback to Lambda layer.') - if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): - # One input is constant and the other is variable - if input_0_is_constant: - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x + const_val, + name=keras_name + )(variable_input) else: - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 + # Embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x + constant_value, + name=keras_name + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -154,14 +169,29 @@ def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') - if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): - # One input is constant and the other is variable - if input_0_is_constant: - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x * const_val, + name=keras_name + )(variable_input) else: - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_value, + name=keras_name + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -213,14 +243,10 @@ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_ except (IndexError, ValueError): logger.warning('Failed to use keras.layers.Subtract. Fallback to Lambda layer.') - if (input_0_is_constant and not input_1_is_constant) or (not input_0_is_constant and input_1_is_constant): - # One input is constant and the other is variable - if input_0_is_constant: - constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) - variable_input = input_1 - else: - constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) - variable_input = input_0 + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable: constant - variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 if np.all(constant_value == constant_value.flat[0]): # Constant tensor has the same value throughout @@ -235,6 +261,25 @@ def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_ lambda x: constant_value - x, name=keras_name )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant: variable - constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x - const_val, + name=keras_name + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x - constant_value, + name=keras_name + )(variable_input) else: # Both inputs are constants; compute the result now layers[node_name] = input_0 - input_1