Skip to content

Commit

Permalink
Patches full import failure when sentencepiece is not installed (#10752)
Browse files Browse the repository at this point in the history
* Patches full import failure when sentencepiece is not installed

* Dummies :)
  • Loading branch information
LysandreJik committed Mar 16, 2021
1 parent c5d6a28 commit f213d23
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 4 deletions.
12 changes: 8 additions & 4 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@
"Wav2Vec2FeatureExtractor",
"Wav2Vec2Processor",
],
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100Tokenizer"],
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
"models.speech_to_text": [
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2TextConfig",
Expand Down Expand Up @@ -171,7 +171,7 @@
"models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"],
"models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"],
"models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"],
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config", "DebertaV2Tokenizer"],
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
"models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"],
"models.dpr": [
"DPR_PRETRAINED_CONFIG_ARCHIVE_MAP",
Expand Down Expand Up @@ -274,6 +274,8 @@
_import_structure["models.barthez"].append("BarthezTokenizer")
_import_structure["models.bert_generation"].append("BertGenerationTokenizer")
_import_structure["models.camembert"].append("CamembertTokenizer")
_import_structure["models.deberta_v2"].append("DebertaV2Tokenizer")
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
_import_structure["models.marian"].append("MarianTokenizer")
_import_structure["models.mbart"].append("MBartTokenizer")
_import_structure["models.mbart"].append("MBart50Tokenizer")
Expand Down Expand Up @@ -1361,7 +1363,7 @@
from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer
from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config, DebertaV2Tokenizer
from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config
from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer
from .models.dpr import (
DPR_PRETRAINED_CONFIG_ARCHIVE_MAP,
Expand All @@ -1383,7 +1385,7 @@
from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config, M2M100Tokenizer
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config
from .models.marian import MarianConfig
from .models.mbart import MBartConfig
from .models.mmbt import MMBTConfig
Expand Down Expand Up @@ -1480,6 +1482,8 @@
from .models.barthez import BarthezTokenizer
from .models.bert_generation import BertGenerationTokenizer
from .models.camembert import CamembertTokenizer
from .models.deberta_v2 import DebertaV2Tokenizer
from .models.m2m_100 import M2M100Tokenizer
from .models.marian import MarianTokenizer
from .models.mbart import MBart50Tokenizer, MBartTokenizer
from .models.mt5 import MT5Tokenizer
Expand Down
18 changes: 18 additions & 0 deletions src/transformers/utils/dummy_sentencepiece_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,24 @@ def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)


class DebertaV2Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)

@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)


class M2M100Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)

@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)


class MarianTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
Expand Down

0 comments on commit f213d23

Please sign in to comment.