forked from kevinkwl/AoAReader
/
preprocess.py
158 lines (125 loc) · 4.84 KB
/
preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# -*- coding: utf-8 -*-
# From https://github.com/nschuc/alternating-reader-tf/blob/master/load_data.py, some modifications are made
import json
import os
import numpy as np
import torch
from functools import reduce
import itertools
import time
import aoareader.Constants
from aoareader.Dict import Dict as Vocabulary
from io import open
from nltk.tokenize import word_tokenize
from sys import argv
data_path = 'data/CBTest/data'
data_filenames = {
'train': 'train.txt',
'valid': 'dev.txt',
'test': 'test.txt'
}
vocab_file = os.path.join(data_path, 'vocab.json')
dict_file = os.path.join(data_path, 'dict.pt')
def tokenize(sentence):
return [s.strip().lower() for s in word_tokenize(sentence) if s.strip()]
def parse_stories(lines, with_answer=True):
stories = []
story = []
for idx, line in enumerate(lines):
line = line.strip()
if not line:
story = []
else:
try:
_, line = line.split(' ', 1)
except:
print('*'*10 + line)
if line:
if '\t' in line: # query line
answer = ''
if with_answer:
q, answer, _, candidates = line.split('\t')
answer = answer.lower()
else:
q, _, candidates = line.split('\t')
q = tokenize(q)
# use the first 10
candidates = [cand.lower() for cand in candidates.split('|')[:10]]
stories.append((story, q, answer, candidates))
else:
story.append(tokenize(line))
return stories
def get_stories(story_lines, with_answer=True):
stories = parse_stories(story_lines, with_answer=with_answer)
flatten = lambda story: reduce(lambda x, y: x + y, story)
stories = [(flatten(story), q, a, candidates) for story, q, a, candidates in stories]
return stories
def vectorize_stories(stories, vocab):
X = []
Q = []
C = []
A = []
for s, q, a, c in stories:
x = vocab.convert2idx(s)
xq = vocab.convert2idx(q)
xc = vocab.convert2idx(c)
X.append(x)
Q.append(xq)
C.append(xc)
A.append(vocab.getIdx(a))
X = X
Q = Q
C = C
A = torch.LongTensor(A)
return X, Q, A, C
def build_dict(stories):
if os.path.isfile(vocab_file):
with open(vocab_file, "rt") as vf:
word2idx = json.load(vf)
else:
vocab = sorted(set(itertools.chain(*(story + q + [answer] + candidates
for story, q, answer, candidates in stories))))
vocab_size = len(vocab) + 2 # pad, unk
print('Vocab size:', vocab_size)
word2idx = dict((w, i + 2) for i,w in enumerate(vocab))
word2idx[aoareader.Constants.UNK_WORD] = 1
word2idx[aoareader.Constants.PAD_WORD] = 0
with open(vocab_file, "w") as vf:
json.dump(word2idx, vf)
return Vocabulary(word2idx)
def main():
print('Preparing process dataset ...')
train_filename = os.path.join(data_path, data_filenames['train'])
valid_filename = os.path.join(data_path, data_filenames['valid'])
test_filename = os.path.join(data_path, data_filenames['test'])
with open(train_filename, 'rt') as tf, open(valid_filename, 'rt') as vf, open(test_filename, 'rt') as tef:
tlines = tf.readlines()
vlines = vf.readlines()
telines = tef.readlines()
train_stories, valid_stories, test_stories = [get_stories(story_lines)
for story_lines in [tlines, vlines, telines]]
print('Preparing build dictionary ...')
vocab_dict = build_dict(train_stories + valid_stories + test_stories)
print('Preparing training, validation, testing ...')
train = {}
valid = {}
test = {}
train_data, valid_data, test_data = [vectorize_stories(stories, vocab_dict)
for stories in [train_stories, valid_stories, test_stories]]
train['documents'], train['querys'], train['answers'], train['candidates'] = train_data
valid['documents'], valid['querys'], valid['answers'], valid['candidates'] = valid_data
test['documents'], test['querys'], test['answers'], test['candidates'] = test_data
print('Saving data to \'' + data_path + '\'...')
torch.save(vocab_dict, dict_file)
torch.save(train, train_filename + '.pt')
torch.save(valid, valid_filename + '.pt')
torch.save(test, test_filename + '.pt')
if __name__ == '__main__':
data_filenames = {
'train': argv[1],
'valid': argv[2],
'test': argv[3]
}
vocab_file = os.path.join(data_path, data_filenames['train']+'vocab.json')
dict_file = os.path.join(data_path, data_filenames['train']+'dict.pt')
main()