Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Commit

Permalink
Make tests work again (#4865)
Browse files Browse the repository at this point in the history
* New import paths

* Duplicate entries

* Dataset readers can't be lazy anymore
  • Loading branch information
dirkgr committed Dec 15, 2020
1 parent d16a5c7 commit 87e3536
Show file tree
Hide file tree
Showing 12 changed files with 27 additions and 57 deletions.
10 changes: 0 additions & 10 deletions allennlp/data/dataset_readers/sharded_dataset_reader.py
Expand Up @@ -30,23 +30,13 @@ class ShardedDatasetReader(DatasetReader):
Registered as a `DatasetReader` with name "sharded".
This class accepts all additional parameters of any `DatasetReader` class via `**kwargs`.
We give priority to the values set in the constructor for the instance of this class.
Optionally, we will automatically inherit attributes from the `base_reader` when required.
# Parameters
base_reader : `DatasetReader`
Reader with a read method that accepts a single file.
"""

def __init__(self, base_reader: DatasetReader, **kwargs) -> None:
# ShardedDatasetReader is a wrapper for the original base_reader so some of the parameters like 'lazy'
# can be safely inherited. However, ShardedDatasetReader is a class instance of a DatasetReader as well.
# So we give priority to the parameters for the current instance stored in 'kwargs'.
# If not present, we check the ones in the base reader
kwargs["lazy"] = kwargs.get("lazy", base_reader.lazy)

super().__init__(
manual_distributed_sharding=True, manual_multi_process_sharding=True, **kwargs
)
Expand Down
5 changes: 2 additions & 3 deletions allennlp/models/vision_text_model.py
Expand Up @@ -5,6 +5,7 @@
from overrides import overrides
import numpy as np
import torch
from transformers import AutoModel

from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.vocabulary import Vocabulary
Expand All @@ -16,8 +17,6 @@
TransformerPooler,
)

from transformers.modeling_auto import AutoModel

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -107,7 +106,7 @@ def from_huggingface_model_name(
if hasattr(transformer.config, "embedding_size"):
config = transformer.config

from transformers.modeling_albert import AlbertModel
from transformers.models.albert.modeling_albert import AlbertModel

if isinstance(transformer, AlbertModel):
linear_transform = deepcopy(transformer.encoder.embedding_hidden_mapping_in)
Expand Down
2 changes: 1 addition & 1 deletion allennlp/modules/elmo.py
Expand Up @@ -13,7 +13,7 @@
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.data.instance import Instance
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
Expand Down
2 changes: 1 addition & 1 deletion allennlp/modules/transformer/activation_layer.py
Expand Up @@ -5,7 +5,7 @@

from allennlp.modules.transformer.transformer_module import TransformerModule

from transformers.modeling_bert import ACT2FN
from transformers.models.bert.modeling_bert import ACT2FN


class ActivationLayer(TransformerModule, FromParams):
Expand Down
1 change: 0 additions & 1 deletion allennlp/nn/activations.py
Expand Up @@ -99,5 +99,4 @@ def _get_name(self):
"softsign": (torch.nn.Softsign, None),
"tanhshrink": (torch.nn.Tanhshrink, None),
"selu": (torch.nn.SELU, None),
"gelu": (torch.nn.GELU, None),
}
17 changes: 0 additions & 17 deletions tests/data/dataset_readers/sharded_dataset_reader_test.py
Expand Up @@ -73,20 +73,3 @@ def test_sharded_read_glob(self):

def test_sharded_read_archive(self):
self.read_and_check_instances(str(self.archive_filename))

def test_attributes_inheritance(self):
# current reader has lazy set to true
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader)

assert (
reader.lazy
), "The ShardedDatasetReader didn't inherit the 'lazy' attribute from base_reader"

def test_set_attributes_main(self):
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader, lazy=False)

assert (
not reader.lazy
), "The ShardedDatasetReader inherited the 'lazy' attribute from base_reader. It should be False"
2 changes: 1 addition & 1 deletion tests/models/vilbert_vqa_test.py
@@ -1,4 +1,4 @@
from transformers.modeling_auto import AutoModel
from transformers import AutoModel

from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
Expand Down
2 changes: 1 addition & 1 deletion tests/models/visual_entailment_test.py
@@ -1,4 +1,4 @@
from transformers.modeling_auto import AutoModel
from transformers import AutoModel

from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
Expand Down
16 changes: 8 additions & 8 deletions tests/modules/transformer/self_attention_test.py
Expand Up @@ -9,14 +9,14 @@
from allennlp.modules.transformer import SelfAttention
from allennlp.common.testing import AllenNlpTestCase

from transformers.configuration_bert import BertConfig
from transformers.modeling_bert import BertSelfAttention
from transformers.configuration_roberta import RobertaConfig
from transformers.modeling_roberta import RobertaSelfAttention
from transformers.configuration_electra import ElectraConfig
from transformers.modeling_electra import ElectraSelfAttention
from transformers.configuration_distilbert import DistilBertConfig
from transformers.modeling_distilbert import MultiHeadSelfAttention
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertSelfAttention
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.models.roberta.modeling_roberta import RobertaSelfAttention
from transformers.models.electra.configuration_electra import ElectraConfig
from transformers.models.electra.modeling_electra import ElectraSelfAttention
from transformers.models.distilbert.configuration_distilbert import DistilBertConfig
from transformers.models.distilbert.modeling_distilbert import MultiHeadSelfAttention

PARAMS_DICT = {
"hidden_size": 6,
Expand Down
3 changes: 1 addition & 2 deletions tests/modules/transformer/toolkit_test.py
@@ -1,7 +1,6 @@
import torch
from overrides import overrides

from transformers.modeling_albert import AlbertEmbeddings
from transformers.models.albert.modeling_albert import AlbertEmbeddings

from allennlp.common import cached_transformers
from allennlp.common.testing import assert_equal_parameters
Expand Down
12 changes: 6 additions & 6 deletions tests/modules/transformer/transformer_block_test.py
Expand Up @@ -9,12 +9,12 @@
from allennlp.modules.transformer import TransformerBlock
from allennlp.common.testing import AllenNlpTestCase

from transformers.configuration_bert import BertConfig
from transformers.modeling_bert import BertEncoder
from transformers.configuration_roberta import RobertaConfig
from transformers.modeling_roberta import RobertaEncoder
from transformers.configuration_electra import ElectraConfig
from transformers.modeling_electra import ElectraEncoder
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertEncoder
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.models.roberta.modeling_roberta import RobertaEncoder
from transformers.models.electra.configuration_electra import ElectraConfig
from transformers.models.electra.modeling_electra import ElectraEncoder

PARAMS_DICT = {
"num_hidden_layers": 3,
Expand Down
12 changes: 6 additions & 6 deletions tests/modules/transformer/transformer_layer_test.py
Expand Up @@ -8,12 +8,12 @@
from allennlp.modules.transformer import AttentionLayer, TransformerLayer
from allennlp.common.testing import AllenNlpTestCase

from transformers.configuration_bert import BertConfig
from transformers.modeling_bert import BertAttention, BertLayer
from transformers.configuration_roberta import RobertaConfig
from transformers.modeling_roberta import RobertaAttention, RobertaLayer
from transformers.configuration_electra import ElectraConfig
from transformers.modeling_electra import ElectraAttention, ElectraLayer
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertAttention, BertLayer
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.models.roberta.modeling_roberta import RobertaAttention, RobertaLayer
from transformers.models.electra.configuration_electra import ElectraConfig
from transformers.models.electra.modeling_electra import ElectraAttention, ElectraLayer

ATTENTION_PARAMS_DICT = {
"hidden_size": 6,
Expand Down

0 comments on commit 87e3536

Please sign in to comment.