Skip to content

Commit

Permalink
resolve pylint errors
Browse files Browse the repository at this point in the history
  • Loading branch information
jignparm committed Sep 27, 2019
1 parent a8a28ee commit 48adb6d
Show file tree
Hide file tree
Showing 9 changed files with 20 additions and 16 deletions.
2 changes: 1 addition & 1 deletion tests/backend_test_base.py
Expand Up @@ -21,7 +21,7 @@
from tf2onnx import optimizer


# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test, import-outside-toplevel

class Tf2OnnxBackendTestBase(unittest.TestCase):
def setUp(self):
Expand Down
1 change: 1 addition & 0 deletions tests/common.py
Expand Up @@ -15,6 +15,7 @@
import tensorflow as tf
from tf2onnx import constants, logging, utils

# pylint: disable=import-outside-toplevel
__all__ = [
"TestConfig",
"get_test_config",
Expand Down
2 changes: 1 addition & 1 deletion tests/run_pretrained_models.py
Expand Up @@ -30,7 +30,7 @@
from tf2onnx import loader, logging, optimizer, utils
from tf2onnx.tfonnx import process_tf_graph

# pylint: disable=broad-except,logging-not-lazy,unused-argument,unnecessary-lambda
# pylint: disable=broad-except,logging-not-lazy,unused-argument,unnecessary-lambda,import-outside-toplevel

logger = logging.getLogger("run_pretrained")

Expand Down
16 changes: 9 additions & 7 deletions tf2onnx/graph.py
Expand Up @@ -40,8 +40,8 @@ def __init__(self, node, graph, skip_conversion=False):
"""
self._op = node
self.graph = graph
self._input = [i for i in node.input]
self._output = [i for i in node.output]
self._input = list(node.input)
self._output = list(node.output)
self._attr = {}

graph.set_node_by_name(self)
Expand Down Expand Up @@ -306,11 +306,11 @@ def set_body_graph_as_attr(self, attr_name, graph):

def update_proto(self):
"""Update protobuf from internal structure."""
nodes = [n for n in self._op.input]
nodes = list(self._op.input)
for node in nodes:
self._op.input.remove(node)
self._op.input.extend(self.input)
nodes = [n for n in self._op.output]
nodes = list(self._op.output)
for node in nodes:
self._op.output.remove(node)
self._op.output.extend(self.output)
Expand All @@ -325,7 +325,7 @@ def update_proto(self):
graph_proto = sub_graph.make_graph("graph for " + self.name + " " + attr_name)
self.set_attr(attr_name, graph_proto)

attr = [a for a in self.attr_onnx.values()]
attr = list(self.attr_onnx.values())
if attr:
self._op.attribute.extend(attr)

Expand Down Expand Up @@ -772,10 +772,12 @@ def get_shape(self, name):
if shape:
for i, v in enumerate(shape):
if v is None:
# pylint: disable=unsupported-assignment-operation
shape[i] = -1
# hack to allow utils.ONNX_UNKNOWN_DIMENSION to override batchsize if needed.
# default is -1.
if shape[0] == -1:
# pylint: disable=unsupported-assignment-operation
shape[0] = utils.ONNX_UNKNOWN_DIMENSION
return shape
return shape
Expand Down Expand Up @@ -839,7 +841,7 @@ def _get_unvisited_child(g, node, not_visited):
label = [-1 for _ in range(n)]
stack = []
in_stack = dict()
not_visited = dict.fromkeys([i for i in range(n)])
not_visited = dict.fromkeys(range(n))
label_counter = n - 1

while not_visited:
Expand Down Expand Up @@ -884,7 +886,7 @@ def make_graph(self, doc, graph_name="tf2onnx"):
if op.is_const():
const_ops.append(op)
continue
elif op.is_graph_input():
if op.is_graph_input():
if op not in self._order_sensitive_inputs:
order_non_sensitive_placeholders.append(op)
continue
Expand Down
3 changes: 1 addition & 2 deletions tf2onnx/optimizer/transpose_optimizer.py
Expand Up @@ -227,8 +227,7 @@ def _get_input_index_for_trans(self, node, trans):
for i in node.input:
if i == trans.output[0]:
break
else:
input_index += 1
input_index += 1
return input_index

# the assumption is: both node and trans have only 1 output
Expand Down
2 changes: 1 addition & 1 deletion tf2onnx/rewriter/transpose_rewriter.py
Expand Up @@ -26,7 +26,7 @@ def rewrite_transpose(g, ops):
for match in match_results:
output = match.get_op('output')
shape = g.get_shape(output.input[0])
dims = [i for i in range(len(shape) - 1, -1, -1)]
dims = range(len(shape) - 1, -1, -1)
output.set_attr("perm", dims)
g.remove_input(output, output.input[1])
to_delete = [n for n in match.get_nodes() if n != output]
Expand Down
2 changes: 1 addition & 1 deletion tf2onnx/schemas.py
Expand Up @@ -136,7 +136,7 @@ def build_onnx_op(node):
copied_sub_graph = copy.deepcopy(sub_graph)
graph_proto = copied_sub_graph.make_graph("graph for " + node.name + " " + attr_name)
attr.append(helper.make_attribute(attr_name, graph_proto))
attr.extend([a for a in node.attr_onnx.values()])
attr.extend(node.attr_onnx.values())
if attr:
onnx_node.attribute.extend(attr)
return onnx_node
Expand Down
4 changes: 2 additions & 2 deletions tf2onnx/tfonnx.py
Expand Up @@ -233,7 +233,7 @@ def rewrite_incomplete_type_support(g, ops, impacted_ops):
"Where": [0], # Where's first input is bool
}
new_ops = []
org_ops = [n for n in ops]
org_ops = list(ops)
for op in org_ops:
if op.type in impacted_ops:
cast_inserted = []
Expand Down Expand Up @@ -312,7 +312,7 @@ def tensorflow_onnx_mapping(g, ops_mapping):
unmapped_op = collections.Counter()
exceptions = []

ops = [n for n in g.get_nodes()]
ops = list(g.get_nodes())
for node in ops:
logger.debug("Process node: %s\n%s", node.name, node.summary)

Expand Down
4 changes: 3 additions & 1 deletion tools/save_pretrained_model.py
Expand Up @@ -7,7 +7,8 @@
import tensorflow as tf
import numpy as np

# pylint: disable=redefined-outer-name,reimported

# pylint: disable=redefined-outer-name,reimported,import-outside-toplevel

def save_pretrained_model(sess, outputs, feeds, out_dir, model_name="pretrained"):
"""Save pretrained model and config"""
Expand Down Expand Up @@ -54,6 +55,7 @@ def save_pretrained_model(sess, outputs, feeds, out_dir, model_name="pretrained"

# save graph and weights
from tensorflow.saved_model import simple_save
# pylint: disable=unnecessary-comprehension
simple_save(sess, saved_model,
{n: i for n, i in zip(inputs.keys(), feeds.keys())},
{op.name: op for op in outputs})
Expand Down

0 comments on commit 48adb6d

Please sign in to comment.