Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update some examples for AllenNLP 1.0.0 #5

Merged
merged 2 commits into from
Jul 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
45 changes: 35 additions & 10 deletions examples/generation/lm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install allennlp\n",
"!pip install allennlp==1.0.0\n",
"!pip install allennlp-models==1.0.0\n",
"!git clone https://github.com/mhagiwara/realworldnlp.git\n",
"%cd realworldnlp"
]
Expand All @@ -24,9 +25,10 @@
"import torch.optim as optim\n",
"from allennlp.common.file_utils import cached_path\n",
"from allennlp.common.util import START_SYMBOL, END_SYMBOL\n",
"from allennlp.data import DataLoader, AllennlpDataset\n",
"from allennlp.data.samplers import BucketBatchSampler\n",
"from allennlp.data.fields import TextField\n",
"from allennlp.data.instance import Instance\n",
"from allennlp.data.iterators import BasicIterator\n",
"from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\n",
"from allennlp.data.tokenizers import Token, CharacterTokenizer\n",
"from allennlp.data.vocabulary import Vocabulary, DEFAULT_PADDING_TOKEN\n",
Expand All @@ -35,7 +37,7 @@
"from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder\n",
"from allennlp.modules.token_embedders import Embedding\n",
"from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\n",
"from allennlp.training.trainer import Trainer"
"from allennlp.training.trainer import GradientDescentTrainer"
]
},
{
Expand Down Expand Up @@ -120,6 +122,27 @@
" embeddings = self.embedder(input_tokens)\n",
" rnn_hidden = self.rnn(embeddings, mask)\n",
" out_logits = self.hidden2out(rnn_hidden)\n",
"\n",
" \"\"\"\n",
" THIS IS LIKELY NOT HOW I SHOULD FIX THIS BUT IT WAS THE BEST\n",
" I COULD DO TO GET THIS WORKING\n",
"\n",
" At this stage, `output_tokens` looks like this (module the specific token indices):\n",
"\n",
" {'tokens': {'tokens': tensor([[16, 45, 5, ..., 0, 0, 0],\n",
" [51, 56, 48, ..., 0, 0, 0],\n",
" [44, 54, 2, ..., 0, 0, 0],\n",
" ...,\n",
" [14, 54, 7, ..., 0, 0, 0],\n",
" [10, 48, 22, ..., 0, 0, 0],\n",
" [51, 36, 56, ..., 58, 0, 0]])}}\n",
"\n",
" which seems like it's being double indexed somehow.\n",
"\n",
" Thus, calling output_tokens = output_tokens[\"tokens\"] to unnest `tokens`\n",
" resolves this in an unideal way.\n",
" \"\"\"\n",
" output_tokens = output_tokens[\"tokens\"]\n",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I couldn't quite figure out the correct way to wrap the data loaders from AllenNLP to avoid this nesting of the output_tokens.

" loss = sequence_cross_entropy_with_logits(out_logits, output_tokens['tokens'], mask)\n",
"\n",
" return {'loss': loss}\n",
Expand Down Expand Up @@ -219,8 +242,9 @@
"metadata": {},
"outputs": [],
"source": [
"iterator = BasicIterator(batch_size=BATCH_SIZE)\n",
"iterator.index_with(vocab)\n",
"dataset = AllennlpDataset(instances, vocab)\n",
"data_loader = DataLoader(dataset,\n",
" batch_size=BATCH_SIZE)\n",
"\n",
"optimizer = optim.Adam(model.parameters(), lr=5.e-3)"
]
Expand All @@ -231,11 +255,12 @@
"metadata": {},
"outputs": [],
"source": [
"trainer = Trainer(model=model,\n",
" optimizer=optimizer,\n",
" iterator=iterator,\n",
" train_dataset=instances,\n",
" num_epochs=10)\n",
"trainer = GradientDescentTrainer(\n",
" model=model,\n",
" optimizer=optimizer,\n",
" data_loader=data_loader,\n",
" num_epochs=10)\n",
"\n",
"trainer.train()"
]
},
Expand Down
64 changes: 39 additions & 25 deletions examples/ner/ner.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install allennlp\n",
"!pip install allennlp==1.0.0\n",
"!pip install allennlp-models==1.0.0\n",
"!git clone https://github.com/mhagiwara/realworldnlp.git\n",
"%cd realworldnlp"
]
Expand All @@ -24,10 +25,10 @@
"import torch\n",
"import torch.optim as optim\n",
"from allennlp.common.file_utils import cached_path\n",
"from allennlp.data import DatasetReader\n",
"from allennlp.data import DataLoader\n",
"from allennlp.data.fields import TextField, SequenceLabelField\n",
"from allennlp.data.instance import Instance\n",
"from allennlp.data.iterators import BucketIterator\n",
"from allennlp.data.samplers import BucketBatchSampler\n",
"from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\n",
"from allennlp.data.tokenizers.token import Token\n",
"from allennlp.data.vocabulary import Vocabulary\n",
Expand All @@ -37,7 +38,8 @@
"from allennlp.modules.token_embedders import Embedding\n",
"from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\n",
"from allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure\n",
"from allennlp.training.trainer import Trainer\n",
"from allennlp.training.trainer import GradientDescentTrainer\n",
"from allennlp.data.dataset_readers.dataset_reader import DatasetReader, AllennlpDataset\n",
"from overrides import overrides"
]
},
Expand Down Expand Up @@ -151,13 +153,11 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"metadata": {},
"outputs": [],
"source": [
"reader = NERDatasetReader()\n",
"dataset = list(reader.read('https://s3.amazonaws.com/realworldnlpbook/data/entity-annotated-corpus/ner_dataset.csv'))"
"dataset = reader.read('https://s3.amazonaws.com/realworldnlpbook/data/entity-annotated-corpus/ner_dataset.csv')"
]
},
{
Expand All @@ -183,8 +183,10 @@
"metadata": {},
"outputs": [],
"source": [
"train_dataset = [inst for i, inst in enumerate(dataset) if i % 10 != 0]\n",
"dev_dataset = [inst for i, inst in enumerate(dataset) if i % 10 == 0]"
"# Cast back to AllennlpDataset after splitting into train/dev\n",
"\n",
"train_dataset = AllennlpDataset([inst for i, inst in enumerate(dataset) if i % 10 != 0])\n",
"dev_dataset = AllennlpDataset([inst for i, inst in enumerate(dataset) if i % 10 == 0])"
]
},
{
Expand Down Expand Up @@ -232,7 +234,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = optim.Adam(model.parameters())"
"train_dataset.index_with(vocab)\n",
"dev_dataset.index_with(vocab)"
]
},
{
Expand All @@ -241,8 +244,16 @@
"metadata": {},
"outputs": [],
"source": [
"iterator = BucketIterator(batch_size=16, sorting_keys=[(\"tokens\", \"num_tokens\")])\n",
"iterator.index_with(vocab)"
"train_data_loader = DataLoader(train_dataset,\n",
" batch_sampler=BucketBatchSampler(\n",
" train_dataset,\n",
" batch_size=16,\n",
" sorting_keys=[\"tokens\"]))\n",
"dev_data_loader = DataLoader(dev_dataset,\n",
" batch_sampler=BucketBatchSampler(\n",
" dev_dataset,\n",
" batch_size=16,\n",
" sorting_keys=[\"tokens\"]))"
]
},
{
Expand All @@ -251,14 +262,7 @@
"metadata": {},
"outputs": [],
"source": [
"trainer = Trainer(model=model,\n",
" optimizer=optimizer,\n",
" iterator=iterator,\n",
" train_dataset=train_dataset,\n",
" validation_dataset=dev_dataset,\n",
" patience=10,\n",
" num_epochs=10)\n",
"trainer.train()"
"optimizer = optim.Adam(model.parameters())"
]
},
{
Expand All @@ -267,17 +271,27 @@
"metadata": {},
"outputs": [],
"source": [
"tokens = ['Apple', 'is', 'looking', 'to', 'buy', 'U.K.', 'startup', 'for', '$1', 'billion', '.']\n",
"labels = predict(tokens, model)\n",
"print(' '.join('{}/{}'.format(token, label) for token, label in zip(tokens, labels)))"
"trainer = GradientDescentTrainer(\n",
" model=model,\n",
" optimizer=optimizer,\n",
" data_loader=train_data_loader,\n",
" validation_data_loader=dev_data_loader,\n",
" patience=10,\n",
" num_epochs=20)\n",
"\n",
"trainer.train()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
"source": [
"tokens = ['Apple', 'is', 'looking', 'to', 'buy', 'U.K.', 'startup', 'for', '$1', 'billion', '.']\n",
"labels = predict(tokens, model)\n",
"print(' '.join('{}/{}'.format(token, label) for token, label in zip(tokens, labels)))"
]
}
],
"metadata": {
Expand Down
81 changes: 46 additions & 35 deletions examples/pos/pos_tagger.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install allennlp\n",
"!pip install allennlp==1.0.0\n",
"!pip install allennlp-models==1.0.0\n",
"!git clone https://github.com/mhagiwara/realworldnlp.git\n",
"%cd realworldnlp"
]
Expand All @@ -22,16 +23,19 @@
"import numpy as np\n",
"import torch\n",
"import torch.optim as optim\n",
"from allennlp.data.dataset_readers import UniversalDependenciesDatasetReader\n",
"from allennlp.data.iterators import BucketIterator\n",
"\n",
"\n",
"from allennlp.data import DataLoader\n",
"from allennlp.data.samplers import BucketBatchSampler\n",
"from allennlp.data.vocabulary import Vocabulary\n",
"from allennlp.models import Model\n",
"from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper\n",
"from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder\n",
"from allennlp.modules.token_embedders import Embedding\n",
"from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\n",
"from allennlp.training.metrics import CategoricalAccuracy\n",
"from allennlp.training.trainer import Trainer\n",
"from allennlp.training.trainer import GradientDescentTrainer\n",
"from allennlp_models.structured_prediction.dataset_readers.universal_dependencies import UniversalDependenciesDatasetReader\n",
"\n",
"from realworldnlp.predictors import UniversalPOSPredictor"
]
Expand Down Expand Up @@ -113,6 +117,16 @@
"vocab = Vocabulary.from_instances(train_dataset + dev_dataset)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dataset.index_with(vocab)\n",
"dev_dataset.index_with(vocab)"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -158,7 +172,24 @@
"metadata": {},
"outputs": [],
"source": [
"iterator = BucketIterator(batch_size=16, sorting_keys=[(\"words\", \"num_tokens\")])"
"# iterator = BucketIterator(batch_size=16, sorting_keys=[(\"words\", \"num_tokens\")])\n",
"train_data_loader = DataLoader(\n",
" train_dataset,\n",
" batch_sampler=BucketBatchSampler(\n",
" train_dataset,\n",
" batch_size=32,\n",
" sorting_keys=[\"words\"]\n",
" )\n",
")\n",
"\n",
"dev_data_loader = DataLoader(\n",
" train_dataset,\n",
" batch_sampler=BucketBatchSampler(\n",
" dev_dataset,\n",
" batch_size=32,\n",
" sorting_keys=[\"words\"]\n",
" )\n",
")"
]
},
{
Expand All @@ -167,7 +198,7 @@
"metadata": {},
"outputs": [],
"source": [
"iterator.index_with(vocab)"
"# iterator.index_with(vocab)"
]
},
{
Expand All @@ -176,13 +207,13 @@
"metadata": {},
"outputs": [],
"source": [
"trainer = Trainer(model=model,\n",
" optimizer=optimizer,\n",
" iterator=iterator,\n",
" train_dataset=train_dataset,\n",
" validation_dataset=dev_dataset,\n",
" patience=10,\n",
" num_epochs=10)\n",
"trainer = GradientDescentTrainer(\n",
" model=model,\n",
" optimizer=optimizer,\n",
" data_loader=train_data_loader,\n",
" validation_data_loader=dev_data_loader,\n",
" patience=10,\n",
" num_epochs=10)\n",
"trainer.train()"
]
},
Expand All @@ -199,34 +230,14 @@
"\n",
"[vocab.get_token_from_index(tag_id, 'pos') for tag_id in tag_ids]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.2"
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}