-
Notifications
You must be signed in to change notification settings - Fork 1
/
test_sick.py
146 lines (137 loc) · 5.09 KB
/
test_sick.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
def load_data_SICK(loc='data/SICK/', rescale=True, contradiction_as_0=False):
"""
Load the SICK semantic-relatedness dataset
"""
trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]
trainS, devS, testS = [],[],[]
with open(loc + 'SICK_train.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
trainA.append(text[1])
trainB.append(text[2])
if contradiction_as_0 and text[4] == 'CONTRADICTION':
trainS.append('1.0')
else:
trainS.append(text[3])
with open(loc + 'SICK_trial.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
devA.append(text[1])
devB.append(text[2])
if contradiction_as_0 and text[4] == 'CONTRADICTION':
devS.append('1.0')
else:
devS.append(text[3])
with open(loc + 'SICK_test_annotated.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
testA.append(text[1])
testB.append(text[2])
if contradiction_as_0 and text[4] == 'CONTRADICTION':
testS.append('1.0')
else:
testS.append(text[3])
if rescale:
trainS = [(float(s)-1.0)/4.0 for s in trainS[1:]]
devS = [(float(s)-1.0)/4.0 for s in devS[1:]]
testS = [(float(s)-1.0)/4.0 for s in testS[1:]]
else:
trainS = [float(s) for s in trainS[1:]]
devS = [float(s) for s in devS[1:]]
testS = [float(s) for s in testS[1:]]
return [trainA[1:], trainB[1:], trainS], [devA[1:], devB[1:], devS], [testA[1:], testB[1:], testS]
def clip_data(data, N):
data[0] = data[0][0:N]
data[1] = data[1][0:N]
data[2] = data[2][0:N]
###################################################################################################
# print "Training BOW+LR model on SICK"
# from encoders.classifier import Classifier;
# c = Classifier(['bow']);
# train, dev, test = load_data_SICK();
# print "Untrained performance: "
# c.test(test)
# # ************ SUMMARY ***********
# # Test data size: 4927
# # Test Pearson: 0.694481553078
# # Test Spearman: 0.577979770056
# # Test MSE: 0.0298842735276
# # ********************************
# print "Training..."
# c.classifier = c.train(train, dev)
# print "Trained performance: "
# c.test(test)
# # ************ SUMMARY *********** WITH 1 LINEAR LAYER
# # Test data size: 4927
# # Test Pearson: 0.738651127214
# # Test Spearman: 0.627919944578
# # Test MSE: 0.0186701774748
# # ********************************
# # ************ SUMMARY *********** WITH 1 SIGMOID LAYER
# # Test data size: 4927
# # Test Pearson: 0.746720520786
# # Test Spearman: 0.625170110855
# # Test MSE: 0.0181161723794
# # ********************************
# # ************ SUMMARY *********** WITH 2 SIGMOID LAYERS
# # Test data size: 4927
# # Test Pearson: 0.768042370046
# # Test Spearman: 0.66275561178
# # Test MSE: 0.0168185726496
# # ********************************
###################################################################################################
print "Training infersent based model on SICK"
from encoders.classifier import Classifier;
c = Classifier(['infersent']);
train, dev, test = load_data_SICK(contradiction_as_0=True);
# N = 200
# clip_data(train, N)
# clip_data(dev, N/10)
# clip_data(test, N)
print "Untrained performance: "
c.test(test)
# # ************ SUMMARY ***********
# # Test data size: 2000
# # Test Pearson: 0.705948727038
# # Test Spearman: 0.638738528351
# # Test MSE: 0.0255961994753
# # ********************************
# # ************ SUMMARY *********** with contradiction_as_0 option set
# # Test data size: 4927
# # Test Pearson: 0.464855799532
# # Test Spearman: 0.481058474327
# # Test MSE: 0.154645949212
# # ********************************
print "Training..."
c.classifier = c.train(train, dev)
c.classifier.save('infersent-sick.h5');
print "Trained performance: "
c.test(test)
# ************ SUMMARY *********** saved as infersent-sick.h5
# Test data size: 4927
# Test Pearson: 0.872362947586
# Test Spearman: 0.823647106453
# Test MSE: 0.0154452371796
# ********************************
# ************ SUMMARY *********** with contradiction_as_0 option; saved as infersent-sick_rel_contra.h5
# Test data size: 4927
# Test Pearson: 0.85439653603
# Test Spearman: 0.849536383428
# Test MSE: 0.0304554283957
# ********************************
###################################################################################################
# print "Training bow + feature based model on SICK"
# from encoders.classifier import Classifier;
# c = Classifier(['bow', 'feature_based']);
# train, dev, test = load_data_SICK();
# print "Training..."
# c.classifier = c.train(train, dev)
# c.classifier.save('pretrained/classifiers/bow_fb_scaled-sick.h5')
# print "Trained performance: "
# c.test(test)
# # ************ SUMMARY *********** 2 layers; scaled to 0...1; saved as bow_fb_scaled-sick.h5
# # Test data size: 4927
# # Test Pearson: 0.775201503039
# # Test Spearman: 0.685546582637
# # Test MSE: 0.0261244947089
# # ********************************