Skip to content

Commit

Permalink
[CI] Fix lint test python version. (#225)
Browse files Browse the repository at this point in the history
* [CI] Fix lint test python version.

Co-authored-by: zhangqi3 <zhangqi3@sensetime.com>
  • Loading branch information
Tracin and zhangqi3 committed Jan 13, 2023
1 parent b8e3015 commit bfdb762
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 34 deletions.
23 changes: 8 additions & 15 deletions .github/workflows/lint-and-test.yml
Expand Up @@ -4,7 +4,7 @@ on: [push]

jobs:
Lint-and-test:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
strategy:
max-parallel: 5

Expand All @@ -13,28 +13,21 @@ jobs:
- name: Set up Python 3.6
uses: actions/setup-python@v2
with:
python-version: 3.6
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
python-version: 3.7
- name: Lint with flake8
run: |
conda install flake8
pip install flake8
flake8 .
- name: Install onnxruntime and onnxsim
run:
pip install onnxruntime onnx-simplifier
- name: Install prettytable
run:
pip install prettytable
- name: Install Protobuf
run:
conda install protobuf=3.20.1
pip install protobuf==3.19.0
- name: Install onnx onnxruntime and onnxsim
run:
pip install onnx==1.7.0 onnxruntime onnx-simplifier
- name: Install MQBench
run: |
python setup.py develop
- name: Test with pytest
run: |
conda install pytest
pip install pytest
pytest test --junitxml=report.xml
6 changes: 3 additions & 3 deletions mqbench/custom_quantizer/onnx_qnn_quantizer.py
Expand Up @@ -56,9 +56,9 @@ def _qat_swap_modules(self, root: GraphModule, additional_qat_module_mapping: Di
all_mappings = get_combined_dict(
get_default_qat_module_mappings(), additional_qat_module_mapping)
# There is no QLinearFC in ONNX for now.
del(all_mappings[torch.nn.modules.linear.Linear])
del(all_mappings[torch.nn.intrinsic.modules.fused.LinearReLU])
del(all_mappings[qnni.modules.fused.LinearBn1d])
del all_mappings[torch.nn.modules.linear.Linear]
del all_mappings[torch.nn.intrinsic.modules.fused.LinearReLU]
del all_mappings[qnni.modules.fused.LinearBn1d]
root = self._convert(root, all_mappings, inplace=True)
return root

Expand Down
2 changes: 1 addition & 1 deletion mqbench/deploy/common.py
Expand Up @@ -93,7 +93,7 @@ def insert_node_purely(self, node, idx=0):

def del_initializer(self, initializer_name):
if initializer_name in self.initializer:
del(self.initializer[initializer_name])
del self.initializer[initializer_name]

def optimize_model(self):
# Delete redundant nodes.
Expand Down
5 changes: 0 additions & 5 deletions mqbench/deploy/deploy_stpu.py
Expand Up @@ -3,7 +3,6 @@
from collections import OrderedDict

import onnx
from onnx import numpy_helper

from mqbench.deploy.common import (get_constant_inputs, prepare_data,
prepare_initializer,
Expand All @@ -15,10 +14,6 @@

class STPU_process(LinearQuantizer_process):

@staticmethod
def get_constant(node: onnx.NodeProto):
return numpy_helper.to_array(node.attribute[0].t).tolist()

def remove_fakequantize_and_collect_params(self, onnx_path, model_name):
model = onnx.load(onnx_path)
graph = model.graph
Expand Down
14 changes: 7 additions & 7 deletions mqbench/fuser_method_mappings.py
Expand Up @@ -47,7 +47,7 @@ def fuse_linear_bn(linear, bn):
>>> b1 = nn.BatchNorm1d(20)
>>> m2 = fuse_linear_bn(m1, b1)
"""
assert(linear.training == bn.training),\
assert linear.training == bn.training, \
"Linear and BN both must be in the same mode (train or eval)."

if linear.training:
Expand All @@ -59,7 +59,7 @@ def fuse_linear_bn(linear, bn):


def fuse_deconv_bn(deconv, bn):
assert(deconv.training == bn.training),\
assert deconv.training == bn.training, \
'DeConv and BN must be in the same mode (train or eval)'

if deconv.training:
Expand All @@ -72,7 +72,7 @@ def fuse_deconv_bn(deconv, bn):


def fuse_deconv_bn_relu(deconv, bn, relu):
assert(deconv.training == bn.training == relu.training),\
assert deconv.training == bn.training == relu.training, \
"DeConv and BN both must be in the same mode (train or eval)."

if deconv.training:
Expand All @@ -85,7 +85,7 @@ def fuse_deconv_bn_relu(deconv, bn, relu):


def fuse_conv_freezebn(conv, bn):
assert(bn.training is False), "Freezebn must be eval."
assert bn.training is False, "Freezebn must be eval."

fused_module_class_map = {
nn.Conv2d: qnni.ConvFreezebn2d,
Expand All @@ -102,7 +102,7 @@ def fuse_conv_freezebn(conv, bn):


def fuse_conv_freezebn_relu(conv, bn, relu):
assert(conv.training == relu.training and bn.training is False), "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
assert conv.training == relu.training and bn.training is False, "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
fused_module : Optional[Type[nn.Sequential]] = None
if conv.training:
map_to_fused_module_train = {
Expand All @@ -123,7 +123,7 @@ def fuse_conv_freezebn_relu(conv, bn, relu):


def fuse_deconv_freezebn(deconv, bn):
assert(bn.training is False), "Freezebn must be eval."
assert bn.training is False, "Freezebn must be eval."

if deconv.training:
assert bn.num_features == deconv.out_channels, 'Output channel of ConvTranspose2d must match num_features of BatchNorm2d'
Expand All @@ -135,7 +135,7 @@ def fuse_deconv_freezebn(deconv, bn):


def fuse_deconv_freezebn_relu(deconv, bn, relu):
assert(deconv.training == relu.training and bn.training is False), "Conv and relu both must be in the same mode (train or eval) and bn must be eval."
assert deconv.training == relu.training and bn.training is False, "Conv and relu both must be in the same mode (train or eval) and bn must be eval."

if deconv.training:
assert bn.num_features == deconv.out_channels, 'Output channel of ConvTranspose2d must match num_features of BatchNorm2d'
Expand Down
2 changes: 1 addition & 1 deletion mqbench/mix_precision/mix_precision.py
Expand Up @@ -163,7 +163,7 @@ def hawq(model: Module, data: Tuple, criterion, type='trace'):
elif type == 'trace':
return hessian_comp.layer_trace()
else:
raise(NotImplementedError, "{} is not supported, only trace and eigenvalues.".format(type))
raise NotImplementedError("{} is not supported, only trace and eigenvalues.".format(type))


def mixprecision_bit_selection(bitwidth_list, sensetive_dict, layer_parameters_dict, model_size_constraints, latency_constraints):
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
@@ -1,4 +1,4 @@
torch==1.10.0
torchvision==0.11.1
onnx
prettytable
onnx==1.7.0
prettytable

0 comments on commit bfdb762

Please sign in to comment.