Skip to content
This repository has been archived by the owner on Nov 21, 2022. It is now read-only.

Classed based examples/tests #241

Merged
merged 25 commits into from
May 16, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 2 additions & 6 deletions .github/workflows/ci_testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,9 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04, macOS-10.15, windows-2019]
python-version: [3.6, 3.8]
os: [ubuntu-20.04, macOS-10.15]
python-version: [3.8]
requires: ['minimal', 'latest']
exclude:
# excludes windows minimal test as HF hanging
- os: windows-2019
requires: 'minimal'

# Timeout: https://stackoverflow.com/a/59076067/4521646
timeout-minutes: 35
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ repos:
require_serial: false

- repo: https://github.com/psf/black
rev: 21.12b0
rev: 22.3.0
hooks:
- id: black
name: Format code
Expand Down
13 changes: 0 additions & 13 deletions examples/README.md

This file was deleted.

70 changes: 0 additions & 70 deletions examples/custom_language_modeling/dataset.py

This file was deleted.

22 changes: 0 additions & 22 deletions examples/custom_language_modeling/model.py

This file was deleted.

34 changes: 0 additions & 34 deletions examples/custom_translation/dataset.py

This file was deleted.

8 changes: 0 additions & 8 deletions examples/custom_translation/model.py

This file was deleted.

23 changes: 23 additions & 0 deletions examples/language_modeling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.language_modeling import (
LanguageModelingDataConfig,
LanguageModelingDataModule,
LanguageModelingTransformer,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="gpt2")
model = LanguageModelingTransformer(pretrained_model_name_or_path="gpt2")
dm = LanguageModelingDataModule(
cfg=LanguageModelingDataConfig(
batch_size=1,
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)
24 changes: 24 additions & 0 deletions examples/masked_language_modeling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.masked_language_modeling import (
MaskedLanguageModelingDataConfig,
MaskedLanguageModelingDataModule,
MaskedLanguageModelingTransformer,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
model = MaskedLanguageModelingTransformer(pretrained_model_name_or_path="bert-base-uncased")
dm = MaskedLanguageModelingDataModule(
cfg=MaskedLanguageModelingDataConfig(
batch_size=1,
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
max_length=512,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)
24 changes: 24 additions & 0 deletions examples/multiple_choice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.multiple_choice import (
MultipleChoiceDataConfig,
MultipleChoiceTransformer,
SwagMultipleChoiceDataModule,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
model = MultipleChoiceTransformer(pretrained_model_name_or_path="bert-base-uncased")
dm = SwagMultipleChoiceDataModule(
cfg=MultipleChoiceDataConfig(
batch_size=1,
dataset_name="swag",
dataset_config_name="regular",
padding=False,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)
29 changes: 29 additions & 0 deletions examples/question_answering_squad.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.question_answering import (
QuestionAnsweringDataConfig,
QuestionAnsweringTransformer,
SquadDataModule,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
model = QuestionAnsweringTransformer(pretrained_model_name_or_path="bert-base-uncased")
dm = SquadDataModule(
cfg=QuestionAnsweringDataConfig(
batch_size=1,
dataset_name="squad",
dataset_config_name="plain_text",
max_length=384,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
doc_stride=128,
n_best_size=20,
max_answer_length=30,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)
33 changes: 33 additions & 0 deletions examples/summarization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.summarization import (
SummarizationConfig,
SummarizationDataConfig,
SummarizationTransformer,
XsumSummarizationDataModule,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="t5-base")
model = SummarizationTransformer(
pretrained_model_name_or_path="t5-base",
cfg=SummarizationConfig(
use_stemmer=True,
val_target_max_length=142,
num_beams=None,
compute_generate_metrics=True,
),
)
dm = XsumSummarizationDataModule(
cfg=SummarizationDataConfig(
batch_size=1,
dataset_name="xsum",
max_source_length=128,
max_target_length=128,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices=1, max_epochs=1)

trainer.fit(model, dm)
25 changes: 25 additions & 0 deletions examples/text_classification.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.text_classification import (
TextClassificationDataConfig,
TextClassificationDataModule,
TextClassificationTransformer,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
dm = TextClassificationDataModule(
cfg=TextClassificationDataConfig(
batch_size=1,
dataset_name="glue",
dataset_config_name="sst2",
max_length=512,
),
tokenizer=tokenizer,
)
dm.setup("fit")
model = TextClassificationTransformer(pretrained_model_name_or_path="bert-base-uncased", num_labels=dm.num_classes)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)
26 changes: 26 additions & 0 deletions examples/token_classification.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import pytorch_lightning as pl
from transformers import AutoTokenizer

from lightning_transformers.task.nlp.token_classification import (
TokenClassificationDataConfig,
TokenClassificationDataModule,
TokenClassificationTransformer,
)

if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
dm = TokenClassificationDataModule(
cfg=TokenClassificationDataConfig(
batch_size=1,
task_name="ner",
dataset_name="conll2003",
preprocessing_num_workers=1,
label_all_tokens=False,
revision="master",
),
tokenizer=tokenizer,
)
model = TokenClassificationTransformer(pretrained_model_name_or_path="bert-base-uncased", labels=dm.labels)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)

trainer.fit(model, dm)