Skip to content

Commit

Permalink
Use Flake8
Browse files Browse the repository at this point in the history
  • Loading branch information
lvapeab committed Mar 24, 2021
1 parent 90813b0 commit 5684189
Show file tree
Hide file tree
Showing 10 changed files with 23 additions and 38 deletions.
9 changes: 9 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[flake8]
ignore = E501,E402,E731,E211,F403,F405
exclude =
./venv
./.idea
./notebooks
*conf*.py
src/*

4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ install:
- travis_wait 30 pip install .

# Install packages to run tests.
- pip install flaky pytest pytest-cache pytest-cov pytest-forked pytest-pep8 pytest-xdist pyux
- pip install flaky pytest pytest-cache pytest-cov pytest-forked pytest-xdist pyux flake8


# command to run tests
Expand All @@ -43,7 +43,7 @@ script:
- sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
- echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)"
- if [[ "$TEST_MODE" == "PEP8" ]]; then
PYTHONPATH=$PWD:$PYTHONPATH python -m pytest --pep8 -m pep8 -n0;
PYTHONPATH=$PWD:$PYTHONPATH python -m flake8 --config=./.flake8 .;
elif [[ "$TEST_MODE" == "GENERAL" ]]; then
PYTHONPATH=$PWD:$PYTHONPATH pytest -s tests/test_load_params.py tests/utils/ tests/data_engine/ 2>&1 ;
elif [[ "$TEST_MODE" == "ENCODINGS" ]]; then
Expand Down
7 changes: 4 additions & 3 deletions data_engine/prepare_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,9 @@ def build_dataset(params):

else:
# We can easily recover it with a single line
ds = loadDataset(os.path.join(params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] +
'_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl'))
ds = loadDataset(
os.path.join(params['DATASET_STORE_PATH'],
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl'))

# Prepare references
prepare_references(ds,
Expand Down Expand Up @@ -304,5 +304,6 @@ def prepare_references(ds, repeat, n=1, set_names=None):

logger.info('Samples reduced to ' + str(new_len) + ' in ' + s + ' set.')


# Backwards compatibility:
keep_n_captions = prepare_references
9 changes: 2 additions & 7 deletions demo-web/sample_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@

from __future__ import print_function

try:
import itertools.imap as map
except ImportError:
pass
import argparse
import ast
import logging
Expand All @@ -22,9 +18,9 @@
from keras_wrapper.cnn_model import loadModel, updateModel
from keras_wrapper.dataset import loadDataset
from keras_wrapper.extra.isles_utils import *
from keras_wrapper.extra.read_write import pkl2dict, list2file
from keras_wrapper.extra.read_write import pkl2dict
from keras_wrapper.online_trainer import OnlineTrainer
from keras_wrapper.utils import decode_predictions_beam_search, flatten_list_of_lists
from keras_wrapper.utils import decode_predictions_beam_search
from nmt_keras.model_zoo import TranslationModel
# from online_models import build_online_models
from utils.utils import update_parameters
Expand Down Expand Up @@ -547,7 +543,6 @@ def main():
# Get word2index and index2word dictionaries
index2word_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'][0]]['idx2words']
word2index_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'][0]]['words2idx']
index2word_x = dataset.vocabulary[parameters['INPUTS_IDS_DATASET'][0]]['idx2words']
word2index_x = dataset.vocabulary[parameters['INPUTS_IDS_DATASET'][0]]['words2idx']

excluded_words = None
Expand Down
4 changes: 2 additions & 2 deletions nmt_keras/model_zoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,10 +444,10 @@ def AttentionRNNEncoderDecoder(self, params):
* Possibly deep encoder/decoder
See:
* `Neural Machine Translation by Jointly Learning to Align and Translate`_.
* `Nematus\: a Toolkit for Neural Machine Translation`_.
* `Nematus: a Toolkit for Neural Machine Translation`_.
.. _Neural Machine Translation by Jointly Learning to Align and Translate: https://arxiv.org/abs/1409.0473
.. _Nematus\: a Toolkit for Neural Machine Translation: https://arxiv.org/abs/1703.04357
.. _Nematus: a Toolkit for Neural Machine Translation: https://arxiv.org/abs/1703.04357
:param int params: Dictionary of hyper-params (see config.py)
:return: None
Expand Down
4 changes: 1 addition & 3 deletions nmt_keras/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ def train_model(params, load_dataset=None):
dataset = loadDataset(
os.path.join(
params['DATASET_STORE_PATH'],
'Dataset_' +
params['DATASET_NAME'] + '_' +
params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
'Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl')
)

epoch_offset = 0 if dataset.len_train == 0 else int(
Expand Down
19 changes: 0 additions & 19 deletions pytest.ini

This file was deleted.

1 change: 1 addition & 0 deletions tests/NMT_architectures/unidir_deep_GRU_ConditionalLSTM.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,5 +67,6 @@ def test_NMT_Unidir_deep_GRU_ConditionalLSTM():
print("Done")
clean_dirs(params)


if __name__ == '__main__':
pytest.main([__file__])
1 change: 1 addition & 0 deletions tests/data_engine/test_prepare_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,5 +94,6 @@ def test_keep_n_captions():
assert len(eval('ds.X_' + split + str([params['INPUTS_IDS_DATASET'][0]]))) == len_split
assert len(eval('ds.Y_' + split + str([params['OUTPUTS_IDS_DATASET'][0]]))) == len_split


if __name__ == '__main__':
pytest.main([__file__])
3 changes: 1 addition & 2 deletions tests/utils/test_process_word_vectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@ def test_text_word2vec2npy():
vectors_name = 'wiki.fiu_vro.vec'
path = os.path.dirname(inspect.getfile(inspect.currentframe()))
if not os.path.exists(path + '/' + vectors_name):
call(["wget https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/" + vectors_name + " -O " +
path + "/" + vectors_name],
call(["wget https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/" + vectors_name + " -O " + path + "/" + vectors_name],
shell=True)
txtvec2npy(path + '/' + vectors_name, './', vectors_name[:-4])
vectors = np.load('./' + vectors_name[:-4] + '.npy', allow_pickle=True).item()
Expand Down

0 comments on commit 5684189

Please sign in to comment.