Skip to content

Commit

Permalink
[BIG] name change
Browse files Browse the repository at this point in the history
  • Loading branch information
thomwolf committed Jul 5, 2019
1 parent 9113b50 commit 0bab55d
Show file tree
Hide file tree
Showing 75 changed files with 284 additions and 234 deletions.
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
version: 2
jobs:
build_py3:
working_directory: ~/pytorch-pretrained-BERT
working_directory: ~/pytorch-transformers
docker:
- image: circleci/python:3.5
steps:
Expand All @@ -10,11 +10,11 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov
- run: python -m pytest -sv ./pytorch_transformers/tests/ --cov
- run: codecov
parallelism: 4
build_py2:
working_directory: ~/pytorch-pretrained-BERT
working_directory: ~/pytorch-transformers
docker:
- image: circleci/python:2.7
steps:
Expand All @@ -23,7 +23,7 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov
- run: python -m pytest -sv ./pytorch_transformers/tests/ --cov
- run: codecov
parallelism: 4
workflows:
Expand Down
2 changes: 1 addition & 1 deletion .coveragerc
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[run]
source=pytorch_pretrained_bert
source=pytorch_transformers
[report]
exclude_lines =
pragma: no cover
Expand Down
120 changes: 60 additions & 60 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ FROM pytorch/pytorch:latest

RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext

RUN pip install pytorch-pretrained-bert
RUN pip install pytorch_transformers

WORKDIR /workspace
2 changes: 1 addition & 1 deletion examples/bertology.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss

from pytorch_pretrained_bert import BertForSequenceClassification, BertTokenizer
from pytorch_transformers import BertForSequenceClassification, BertTokenizer

from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics

Expand Down
2 changes: 1 addition & 1 deletion examples/generation_xlnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
from torch.nn import functional as F
from pytorch_pretrained_bert import XLNetModel, XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import XLNetModel, XLNetLMHeadModel, XLNetTokenizer

import logging
logging.basicConfig(level=logging.INFO)
Expand Down
8 changes: 4 additions & 4 deletions examples/lm_finetuning/finetune_on_pregenerated.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForPreTraining
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")

Expand Down
2 changes: 1 addition & 1 deletion examples/lm_finetuning/pregenerate_training_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import shelve

from random import random, randrange, randint, shuffle, choice
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_transformers.tokenization_bert import BertTokenizer
import numpy as np
import json
import collections
Expand Down
8 changes: 4 additions & 4 deletions examples/lm_finetuning/simple_lm_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForPreTraining
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
Expand Down
8 changes: 4 additions & 4 deletions examples/run_bert_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@

from tensorboardX import SummaryWriter

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForSequenceClassification
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics

Expand Down
4 changes: 2 additions & 2 deletions examples/run_bert_extract_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler

from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_pretrained_bert.modeling_bert import BertModel
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.modeling_bert import BertModel

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
Expand Down
8 changes: 4 additions & 4 deletions examples/run_bert_squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@

from tensorboardX import SummaryWriter

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForQuestionAnswering
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForQuestionAnswering
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer

from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions

Expand Down
8 changes: 4 additions & 4 deletions examples/run_bert_swag.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_bert import BertForMultipleChoice, BertConfig
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_pretrained_bert.tokenization_bert import BertTokenizer
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_bert import BertForMultipleChoice, BertConfig
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
Expand Down
2 changes: 1 addition & 1 deletion examples/run_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import torch.nn.functional as F
import numpy as np

from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
Expand Down
2 changes: 1 addition & 1 deletion examples/run_openai_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)

from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
from pytorch_transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)

ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
Expand Down
2 changes: 1 addition & 1 deletion examples/run_transfo_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

import torch

from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
Expand Down
8 changes: 4 additions & 4 deletions examples/run_xlnet_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@

from tensorboardX import SummaryWriter

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_xlnet import XLNetForSequenceClassification
from pytorch_pretrained_bert.tokenization_xlnet import XLNetTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_xlnet import XLNetForSequenceClassification
from pytorch_transformers.tokenization_xlnet import XLNetTokenizer
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

from utils_glue import processors, output_modes, convert_examples_to_features, compute_metrics

Expand Down
8 changes: 4 additions & 4 deletions examples/run_xlnet_squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@

from tensorboardX import SummaryWriter

from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling_xlnet import BertForQuestionAnswering
from pytorch_pretrained_bert.tokenization_xlnet import XLNetTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_transformers.modeling_xlnet import BertForQuestionAnswering
from pytorch_transformers.tokenization_xlnet import XLNetTokenizer
from pytorch_transformers.optimization import BertAdam, WarmupLinearSchedule

from utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions

Expand Down
50 changes: 50 additions & 0 deletions examples/tests/examples_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import unittest
import json
import random
import shutil
import pytest

import torch

from pytorch_transformers import PretrainedConfig, PreTrainedModel
from pytorch_transformers.modeling_bert import BertModel, BertConfig, PRETRAINED_MODEL_ARCHIVE_MAP, PRETRAINED_CONFIG_ARCHIVE_MAP


class ModelUtilsTest(unittest.TestCase):
def test_model_from_pretrained(self):
for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)

model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)

config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_attentions, True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)

if __name__ == "__main__":
unittest.main()
2 changes: 1 addition & 1 deletion examples/utils_squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import collections
from io import open

from pytorch_pretrained_bert.tokenization_bert import BasicTokenizer, whitespace_tokenize
from pytorch_transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize

logger = logging.getLogger(__name__)

Expand Down

0 comments on commit 0bab55d

Please sign in to comment.