/
base_extractors.py
59 lines (47 loc) · 1.57 KB
/
base_extractors.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/usr/bin/env python3
# X : list of lists of instances, each instance is a list of feature reprs
# y : list of lists of labels
def word2features(sent, i):
word = sent[i]
features = [
'bias',
'word.lower=' + word.lower(),
'word[-3:]=' + word[-3:],
'word[-2:]=' + word[-2:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
]
if i > 0:
word1 = sent[i-1]
features.extend([
'-1word.lower=' + word1.lower(),
'-1word.istitle=%s' % word1.istitle(),
'-1word.isupper=%s' % word1.isupper(),
])
else:
features.append('BOS')
if i < len(sent)-1:
word1 = sent[i+1]
features.extend([
'+1word.lower=' + word1.lower(),
'+1word.istitle=%s' % word1.istitle(),
'+1word.isupper=%s' % word1.isupper(),
])
else:
features.append('EOS')
return features
# takes a list of token/label pairs; returns a list of [feature]/label pairs
def featurise(sentence, brown_cluster = {}):
sentence_repr = []
for i in range(len(sentence)):
features = []
# add brown features
word = sentence[i]
if word in brown_cluster:
for j in range(1,len(brown_cluster[word])+1):
features.append('p' + str(j) + 'b' + brown_cluster[word][0:j])
# add per-word features
features += word2features(sentence, i)
sentence_repr.append(features)
return(sentence_repr)