This repository has been archived by the owner on Dec 16, 2022. It is now read-only.
/
cnn_dm.py
171 lines (140 loc) · 6.49 KB
/
cnn_dm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
from pathlib import Path
from typing import Dict, Optional, List
import logging
import os
import glob
import hashlib
import ftfy
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("cnn_dm")
class CNNDailyMailDatasetReader(DatasetReader):
"""
Reads the CNN/DailyMail dataset for text summarization.
The output of `read` is a list of `Instance` s with the fields:
source_tokens : `TextField` and
target_tokens : `TextField`
# Parameters
source_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the input sequences into words or other kinds of tokens. Defaults
to `SpacyTokenizer()`.
target_tokenizer : `Tokenizer`, optional
Tokenizer to use to split the output sequences (during training) into words or other kinds
of tokens. Defaults to `source_tokenizer`.
source_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define input (source side) token representations. Defaults to
`{"tokens": SingleIdTokenIndexer()}`.
target_token_indexers : `Dict[str, TokenIndexer]`, optional
Indexers used to define output (target side) token representations. Defaults to
`source_token_indexers`.
source_max_tokens : `int`, optional
Maximum number of tokens in source sequence.
target_max_tokens : `int`, optional
Maximum number of tokens in target sequence.
"""
def __init__(
self,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
source_max_tokens: Optional[int] = None,
target_max_tokens: Optional[int] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._source_tokenizer = source_tokenizer or SpacyTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers = target_token_indexers or self._source_token_indexers
self._source_max_tokens = source_max_tokens
self._target_max_tokens = target_max_tokens
@staticmethod
def _hashhex(url):
h = hashlib.sha1()
h.update(url)
return h.hexdigest()
@staticmethod
def _sanitize_story_line(line):
line = ftfy.fix_encoding(line)
sentence_endings = [".", "!", "?", "...", "'", "`", '"', ")", "\u2019", "\u201d"]
# CNN stories always start with "(CNN)"
if line.startswith("(CNN)"):
line = line[len("(CNN)") :]
# Highlight are essentially bullet points and don't have proper sentence endings
if line[-1] not in sentence_endings:
line += "."
return line
@staticmethod
def _read_story(story_path: str):
article: List[str] = []
summary: List[str] = []
highlight = False
with open(story_path, "r") as f:
for line in f:
line = line.strip()
if line == "":
continue
if line == "@highlight":
highlight = True
continue
line = CNNDailyMailDatasetReader._sanitize_story_line(line)
(summary if highlight else article).append(line)
return " ".join(article), " ".join(summary)
@staticmethod
def _strip_extension(filename: str) -> str:
return os.path.splitext(filename)[0]
@overrides
def _read(self, file_path: str):
# Reset exceeded counts
self._source_max_exceeded = 0
self._target_max_exceeded = 0
url_file_path = cached_path(file_path, extract_archive=True)
data_dir = os.path.join(os.path.dirname(url_file_path), "..")
cnn_stories_path = os.path.join(data_dir, "cnn_stories")
dm_stories_path = os.path.join(data_dir, "dm_stories")
cnn_stories = {Path(s).stem for s in glob.glob(os.path.join(cnn_stories_path, "*.story"))}
dm_stories = {Path(s).stem for s in glob.glob(os.path.join(dm_stories_path, "*.story"))}
with open(url_file_path, "r") as url_file:
for url in url_file:
url = url.strip()
url_hash = self._hashhex(url.encode("utf-8"))
if url_hash in cnn_stories:
story_base_path = cnn_stories_path
elif url_hash in dm_stories:
story_base_path = dm_stories_path
else:
raise ConfigurationError(
"Story with url '%s' and hash '%s' not found" % (url, url_hash)
)
story_path = os.path.join(story_base_path, url_hash) + ".story"
article, summary = self._read_story(story_path)
if len(article) == 0 or len(summary) == 0 or len(article) < len(summary):
continue
yield self.text_to_instance(article, summary)
@overrides
def text_to_instance(
self, source_sequence: str, target_sequence: str = None
) -> Instance: # type: ignore
tokenized_source = self._source_tokenizer.tokenize(source_sequence)
if self._source_max_tokens is not None and len(tokenized_source) > self._source_max_tokens:
tokenized_source = tokenized_source[: self._source_max_tokens]
source_field = TextField(tokenized_source, self._source_token_indexers)
if target_sequence is not None:
tokenized_target = self._target_tokenizer.tokenize(target_sequence)
if (
self._target_max_tokens is not None
and len(tokenized_target) > self._target_max_tokens
):
tokenized_target = tokenized_target[: self._target_max_tokens]
target_field = TextField(tokenized_target, self._target_token_indexers)
return Instance({"source_tokens": source_field, "target_tokens": target_field})
else:
return Instance({"source_tokens": source_field})