From 437432dd60cafd5ebca84bac036c1d4c2aabb3f9 Mon Sep 17 00:00:00 2001 From: cclauss Date: Fri, 19 Oct 2018 14:41:31 +0200 Subject: [PATCH 1/3] Travis CI: lint Python for syntax errors and undefined names In Travis CI, add a Python linting step that runs [flake8](http://flake8.pycqa.org) to find syntax errors and undefined names. [flake8](http://flake8.pycqa.org) testing of https://github.com/IBM/FfDL on Python 3.7.0 $ __flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics__ ``` ./etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py:93:55: F821 undefined name 'bsz' num_batches = ceil(len(train_set.dataset) / float(bsz)) ^ ./etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py:121:14: E999 SyntaxError: positional argument follows keyword argument ', epoch ', epoch, '. avg_loss: ', ^ 1 E999 SyntaxError: positional argument follows keyword argument 1 F821 undefined name 'bsz' 2 ``` __E901,E999,F821,F822,F823__ are the "_showstopper_" flake8 issues that can halt the runtime with a SyntaxError, NameError, etc. Most other flake8 issues are merely "style violations" -- useful for readability but they do not effect runtime safety. * F821: undefined name `name` * F822: undefined name `name` in `__all__` * F823: local variable name referenced before assignment * E901: SyntaxError or IndentationError * E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree --- .travis.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index cdbf30ec..50d36f9d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,10 @@ +language: python +python: 3.6 sudo: required -branches: - only: - - master +#branches: +# only: +# - master services: - docker @@ -11,6 +13,9 @@ env: - CHANGE_MINIKUBE_NONE_USER=true before_script: + # lint Python for syntax errors and undefined names + - pip install flake8 + - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics # install Go - (sudo add-apt-repository -y ppa:gophers/archive; sudo apt-get -y update; sudo apt-get -y install golang-1.9-go) > /dev/null 2>&1 - export PATH=/usr/lib/go-1.9/bin:$PATH; export GOROOT=/usr/lib/go-1.9/ From e8cb7b4c9257817cab6653e228f0e722a27c7ba2 Mon Sep 17 00:00:00 2001 From: cclauss Date: Fri, 19 Oct 2018 15:30:02 +0200 Subject: [PATCH 2/3] Undefined name: bsz --> batch_size --- etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py b/etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py index 5a170dd3..830b8180 100644 --- a/etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py +++ b/etc/examples/c10d-onnx-mpi/model-files/train_dist_onnx_mpi.py @@ -90,7 +90,7 @@ def run(rank, size, batch_size, is_gpu): test_set = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=True, pin_memory=True) - num_batches = ceil(len(train_set.dataset) / float(bsz)) + num_batches = ceil(len(train_set.dataset) / float(batch_size)) # To train model model.train() for epoch in range(100): From 88854263d01a7a3616c66cd1d455d8b624437c09 Mon Sep 17 00:00:00 2001 From: cclauss Date: Fri, 19 Oct 2018 15:33:51 +0200 Subject: [PATCH 3/3] Fix syntax error: print() does not accept an 'rank=' parameter --- .travis.yml | 6 +++--- etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 50d36f9d..6d788546 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,9 +2,9 @@ language: python python: 3.6 sudo: required -#branches: -# only: -# - master +branches: + only: + - master services: - docker diff --git a/etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py b/etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py index cc0859a5..b75a1645 100644 --- a/etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py +++ b/etc/examples/c10d-dist-onnx/model-files/train_dist_onnx.py @@ -117,7 +117,7 @@ def run(rank, size, batch_size, is_gpu): if not (size == 1): average_gradients(model) optimizer.step() - print('Process ', rank=dist.get_rank(), + print('Process ', dist.get_rank(), ', epoch ', epoch, '. avg_loss: ', epoch_loss / len(train_set))