Skip to content

Commit

Permalink
fix python 2 tests
Browse files Browse the repository at this point in the history
  • Loading branch information
thomwolf committed Jul 9, 2019
1 parent b197869 commit c079d7d
Show file tree
Hide file tree
Showing 8 changed files with 39 additions and 37 deletions.
23 changes: 10 additions & 13 deletions pytorch_transformers/tests/tokenization_bert_test.py
Expand Up @@ -24,7 +24,7 @@
_is_control, _is_punctuation,
_is_whitespace, VOCAB_FILES_NAMES)

from .tokenization_tests_commons import create_and_check_tokenizer_commons
from .tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory

class TokenizationTest(unittest.TestCase):

Expand All @@ -33,21 +33,18 @@ def test_full_tokenizer(self):
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ",", "low", "lowest",
]
vocab_directory = "/tmp/"
vocab_file = os.path.join(vocab_directory, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding='utf-8') as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
vocab_file = vocab_writer.name
with TemporaryDirectory() as tmpdirname:
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding='utf-8') as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

create_and_check_tokenizer_commons(self, BertTokenizer, pretrained_model_name_or_path=vocab_directory)
create_and_check_tokenizer_commons(self, BertTokenizer, tmpdirname)

tokenizer = BertTokenizer(vocab_file)
tokenizer = BertTokenizer(vocab_file)

tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])

os.remove(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])

def test_chinese(self):
tokenizer = BasicTokenizer()
Expand Down
5 changes: 2 additions & 3 deletions pytorch_transformers/tests/tokenization_gpt2_test.py
Expand Up @@ -17,11 +17,10 @@
import os
import unittest
import json
import tempfile

from pytorch_transformers.tokenization_gpt2 import GPT2Tokenizer, VOCAB_FILES_NAMES

from .tokenization_tests_commons import create_and_check_tokenizer_commons
from .tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory

class GPT2TokenizationTest(unittest.TestCase):

Expand All @@ -34,7 +33,7 @@ def test_full_tokenizer(self):
merges = ["#version: 0.2", "l o", "lo w", "e r", ""]
special_tokens_map = {"unk_token": "<unk>"}

with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
merges_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, "w") as fp:
Expand Down
5 changes: 2 additions & 3 deletions pytorch_transformers/tests/tokenization_openai_test.py
Expand Up @@ -17,11 +17,10 @@
import os
import unittest
import json
import tempfile

from pytorch_transformers.tokenization_openai import OpenAIGPTTokenizer, VOCAB_FILES_NAMES

from.tokenization_tests_commons import create_and_check_tokenizer_commons
from .tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory


class OpenAIGPTTokenizationTest(unittest.TestCase):
Expand All @@ -35,7 +34,7 @@ def test_full_tokenizer(self):
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]

with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
merges_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, "w") as fp:
Expand Down
17 changes: 12 additions & 5 deletions pytorch_transformers/tests/tokenization_tests_commons.py
Expand Up @@ -14,26 +14,33 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals

import os
import sys
from io import open
import tempfile

if sys.version_info[0] == 3:
unicode = str
import shutil

if sys.version_info[0] == 2:
import cPickle as pickle

class TemporaryDirectory(object):
"""Context manager for tempfile.mkdtemp() so it's usable with "with" statement."""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
else:
import pickle
TemporaryDirectory = tempfile.TemporaryDirectory
unicode = str


def create_and_check_save_and_load_tokenizer(tester, tokenizer_class, *inputs, **kwargs):
tokenizer = tokenizer_class.from_pretrained(*inputs, **kwargs)

before_tokens = tokenizer.encode(u"He is very happy, UNwant\u00E9d,running")

with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
tokenizer.save_pretrained(tmpdirname)
tokenizer = tokenizer.from_pretrained(tmpdirname)

Expand Down
5 changes: 2 additions & 3 deletions pytorch_transformers/tests/tokenization_transfo_xl_test.py
Expand Up @@ -17,11 +17,10 @@
import os
import unittest
from io import open
import tempfile

from pytorch_transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES

from.tokenization_tests_commons import create_and_check_tokenizer_commons
from.tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory

class TransfoXLTokenizationTest(unittest.TestCase):

Expand All @@ -30,7 +29,7 @@ def test_full_tokenizer(self):
"<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un",
"running", ",", "low", "l",
]
with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding='utf-8') as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
Expand Down
5 changes: 2 additions & 3 deletions pytorch_transformers/tests/tokenization_xlm_test.py
Expand Up @@ -17,11 +17,10 @@
import os
import unittest
import json
import tempfile

from pytorch_transformers.tokenization_xlm import XLMTokenizer, VOCAB_FILES_NAMES

from .tokenization_tests_commons import create_and_check_tokenizer_commons
from .tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory

class XLMTokenizationTest(unittest.TestCase):

Expand All @@ -34,7 +33,7 @@ def test_full_tokenizer(self):
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]

with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
merges_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, "w") as fp:
Expand Down
7 changes: 3 additions & 4 deletions pytorch_transformers/tests/tokenization_xlnet_test.py
Expand Up @@ -16,11 +16,10 @@

import os
import unittest
import tempfile

from pytorch_transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE, VOCAB_FILES_NAMES)
from pytorch_transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)

from.tokenization_tests_commons import create_and_check_tokenizer_commons
from .tokenization_tests_commons import create_and_check_tokenizer_commons, TemporaryDirectory

SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
Expand All @@ -30,7 +29,7 @@ class XLNetTokenizationTest(unittest.TestCase):
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)

with tempfile.TemporaryDirectory() as tmpdirname:
with TemporaryDirectory() as tmpdirname:
tokenizer.save_pretrained(tmpdirname)

create_and_check_tokenizer_commons(self, XLNetTokenizer, tmpdirname)
Expand Down
9 changes: 6 additions & 3 deletions pytorch_transformers/tokenization_utils.py
Expand Up @@ -231,8 +231,7 @@ def _from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs

# Add supplementary tokens.
if added_tokens_file is not None:
added_tokens = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_encoder = dict((tok, len(tokenizer) + i) for i, tok in enumerate(added_tokens))
added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_decoder = {v:k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
Expand All @@ -256,7 +255,11 @@ def save_pretrained(self, save_directory):
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))

with open(added_tokens_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.added_tokens_decoder, ensure_ascii=False))
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_decoder, ensure_ascii=False)
else:
out_str = u"{}"
f.write(out_str)

vocab_files = self.save_vocabulary(save_directory)

Expand Down

0 comments on commit c079d7d

Please sign in to comment.