diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 6318e1351f235..65a5d3228a7c1 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -43,7 +43,6 @@ from . import dependency_versions_check from .file_utils import ( _LazyModule, - is_detectron2_available, is_flax_available, is_sentencepiece_available, is_speech_available, @@ -99,7 +98,6 @@ "cached_path", "is_apex_available", "is_datasets_available", - "is_detectron2_available", "is_faiss_available", "is_flax_available", "is_psutil_available", @@ -442,25 +440,6 @@ name for name in dir(dummy_timm_objects) if not name.startswith("_") ] -# Detectron2-backed objects -if is_detectron2_available(): - _import_structure["models.layoutlmv2"].extend( - [ - "LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST", - "LayoutLMv2ForQuestionAnswering", - "LayoutLMv2ForSequenceClassification", - "LayoutLMv2ForTokenClassification", - "LayoutLMv2Model", - "LayoutLMv2PreTrainedModel", - ] - ) -else: - from .utils import dummy_detectron2_objects - - _import_structure["utils.dummy_detectron2_objects"] = [ - name for name in dir(dummy_detectron2_objects) if not name.startswith("_") - ] - # PyTorch-backed objects if is_torch_available(): _import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"] @@ -850,6 +829,16 @@ "LayoutLMPreTrainedModel", ] ) + _import_structure["models.layoutlmv2"].extend( + [ + "LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "LayoutLMv2ForQuestionAnswering", + "LayoutLMv2ForSequenceClassification", + "LayoutLMv2ForTokenClassification", + "LayoutLMv2Model", + "LayoutLMv2PreTrainedModel", + ] + ) _import_structure["models.led"].extend( [ "LED_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -1750,7 +1739,6 @@ cached_path, is_apex_available, is_datasets_available, - is_detectron2_available, is_faiss_available, is_flax_available, is_psutil_available, @@ -2387,6 +2375,14 @@ LayoutLMModel, LayoutLMPreTrainedModel, ) + from .models.layoutlmv2 import ( + LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + LayoutLMv2ForQuestionAnswering, + LayoutLMv2ForSequenceClassification, + LayoutLMv2ForTokenClassification, + LayoutLMv2Model, + LayoutLMv2PreTrainedModel, + ) from .models.led import ( LED_PRETRAINED_MODEL_ARCHIVE_LIST, LEDForConditionalGeneration, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index db07ae7184b79..0fdf5bd3a6f13 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1960,6 +1960,54 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) +LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class LayoutLMv2ForQuestionAnswering: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LayoutLMv2ForSequenceClassification: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LayoutLMv2ForTokenClassification: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LayoutLMv2Model: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LayoutLMv2PreTrainedModel: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + LED_PRETRAINED_MODEL_ARCHIVE_LIST = None