Skip to content

Commit

Permalink
Revert "Model templates encoder only (huggingface#8509)"
Browse files Browse the repository at this point in the history
This reverts commit aa2012f.
  • Loading branch information
fabiocapsouza authored Nov 15, 2020
1 parent 265d0bd commit e3cbd19
Show file tree
Hide file tree
Showing 29 changed files with 1,990 additions and 3,328 deletions.
18 changes: 0 additions & 18 deletions .github/workflows/self-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@ on:
push:
branches:
- master
- model-templates
paths:
- "src/**"
- "tests/**"
- ".github/**"
- "templates/**"
# pull_request:
repository_dispatch:

Expand Down Expand Up @@ -57,14 +55,6 @@ jobs:
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
- name: Create model files
run: |
source .env/bin/activate
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/encoder-bert-tokenizer.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/pt-encoder-bert-tokenizer.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/standalone.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/tf-encoder-bert-tokenizer.json --path=templates/cookiecutter
- name: Run all non-slow tests on GPU
env:
OMP_NUM_THREADS: 1
Expand Down Expand Up @@ -126,14 +116,6 @@ jobs:
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
- name: Create model files
run: |
source .env/bin/activate
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/encoder-bert-tokenizer.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/pt-encoder-bert-tokenizer.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/standalone.json --path=templates/cookiecutter
transformers-cli add-new-model --testing --testing_file=templates/cookiecutter/tests/tf-encoder-bert-tokenizer.json --path=templates/cookiecutter
- name: Run all non-slow tests on GPU
env:
OMP_NUM_THREADS: 1
Expand Down
5 changes: 2 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,21 +98,20 @@

extras["tokenizers"] = ["tokenizers==0.9.2"]
extras["onnxruntime"] = ["onnxruntime>=1.4.0", "onnxruntime-tools>=1.4.2"]
extras["modelcreation"] = ["cookiecutter==1.7.2"]

extras["serving"] = ["pydantic", "uvicorn", "fastapi", "starlette"]

extras["sentencepiece"] = ["sentencepiece==0.1.91"]
extras["retrieval"] = ["faiss-cpu", "datasets"]
extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil"] + extras["retrieval"] + extras["modelcreation"]
extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil"] + extras["retrieval"]
# sphinx-rtd-theme==0.5.0 introduced big changes in the style.
extras["docs"] = ["recommonmark", "sphinx==3.2.1", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"]
extras["quality"] = ["black >= 20.8b1", "isort >= 5.5.4", "flake8 >= 3.8.3"]


extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]

extras["dev"] = extras["all"] + extras["testing"] + extras["quality"] + extras["ja"] + extras["docs"] + extras["sklearn"] + extras["modelcreation"]
extras["dev"] = extras["all"] + extras["testing"] + extras["quality"] + extras["ja"] + extras["docs"] + extras["sklearn"]


setup(
Expand Down
192 changes: 0 additions & 192 deletions src/transformers/commands/add_new_model.py

This file was deleted.

2 changes: 0 additions & 2 deletions src/transformers/commands/transformers_cli.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#!/usr/bin/env python
from argparse import ArgumentParser

from transformers.commands.add_new_model import AddNewModelCommand
from transformers.commands.convert import ConvertCommand
from transformers.commands.download import DownloadCommand
from transformers.commands.env import EnvironmentCommand
Expand All @@ -21,7 +20,6 @@ def main():
RunCommand.register_subcommand(commands_parser)
ServeCommand.register_subcommand(commands_parser)
UserCommands.register_subcommand(commands_parser)
AddNewModelCommand.register_subcommand(commands_parser)

# Let's go
args = parser.parse_args()
Expand Down
3 changes: 0 additions & 3 deletions src/transformers/configuration_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = dict(
(key, value)
for pretrained_map in [
# Add archive maps here
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_CONFIG_ARCHIVE_MAP,
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
Expand Down Expand Up @@ -96,7 +95,6 @@

CONFIG_MAPPING = OrderedDict(
[
# Add configs here
("retribert", RetriBertConfig),
("t5", T5Config),
("mobilebert", MobileBertConfig),
Expand Down Expand Up @@ -138,7 +136,6 @@

MODEL_NAMES_MAPPING = OrderedDict(
[
# Add full (and cased) model names here
("retribert", "RetriBERT"),
("t5", "T5"),
("mobilebert", "MobileBERT"),
Expand Down
12 changes: 0 additions & 12 deletions src/transformers/modeling_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,14 +226,11 @@
from .utils import logging


# Add modeling imports here

logger = logging.get_logger(__name__)


MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(RetriBertConfig, RetriBertModel),
(T5Config, T5Model),
(DistilBertConfig, DistilBertModel),
Expand Down Expand Up @@ -269,7 +266,6 @@

MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
# Model for pre-training mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(RetriBertConfig, RetriBertModel),
(T5Config, T5ForConditionalGeneration),
Expand Down Expand Up @@ -299,7 +295,6 @@

MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
# Model with LM heads mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
Expand Down Expand Up @@ -330,7 +325,6 @@

MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Causal LM mapping
(CamembertConfig, CamembertForCausalLM),
(XLMRobertaConfig, XLMRobertaForCausalLM),
(RobertaConfig, RobertaForCausalLM),
Expand All @@ -353,7 +347,6 @@

MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
[
# Model for Masked LM mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
Expand All @@ -375,7 +368,6 @@

MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
(T5Config, T5ForConditionalGeneration),
(PegasusConfig, PegasusForConditionalGeneration),
(MarianConfig, MarianMTModel),
Expand All @@ -391,7 +383,6 @@

MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Sequence Classification mapping
(DistilBertConfig, DistilBertForSequenceClassification),
(AlbertConfig, AlbertForSequenceClassification),
(CamembertConfig, CamembertForSequenceClassification),
Expand All @@ -416,7 +407,6 @@

MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
# Model for Question Answering mapping
(DistilBertConfig, DistilBertForQuestionAnswering),
(AlbertConfig, AlbertForQuestionAnswering),
(CamembertConfig, CamembertForQuestionAnswering),
Expand All @@ -439,7 +429,6 @@

MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Token Classification mapping
(LayoutLMConfig, LayoutLMForTokenClassification),
(DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification),
Expand All @@ -461,7 +450,6 @@

MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
[
# Model for Multiple Choice mapping
(CamembertConfig, CamembertForMultipleChoice),
(ElectraConfig, ElectraForMultipleChoice),
(XLMRobertaConfig, XLMRobertaForMultipleChoice),
Expand Down
Loading

0 comments on commit e3cbd19

Please sign in to comment.