-
Notifications
You must be signed in to change notification settings - Fork 0
/
character_2gram_embedding_for_AA.py
152 lines (121 loc) · 4.15 KB
/
character_2gram_embedding_for_AA.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File : core.classifiers.RCNLPTextClassifier.py
# Description : Echo State Network for text classification.
# Auteur : Nils Schaetti <nils.schaetti@unine.ch>
# Date : 01.02.2017 17:59:05
# Lieu : Nyon, Suisse
#
# This file is part of the Reservoir Computing NLP Project.
# The Reservoir Computing Memory Project is a set of free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
import nsNLP
import numpy as np
import torch.utils.data
from torch.autograd import Variable
from echotorch import datasets
from echotorch.transforms import text
from modules import CNNCharacterEmbedding, CNNDeepFeatureSelector, CNNFeatureSelector
from torch import optim
import torch.nn as nn
import echotorch.nn as etnn
import echotorch.utils
import os
# Settings
n_epoch = 1
embedding_dim = 10
n_authors = 15
use_cuda = True
voc_size = 1595
# Word embedding
transform = text.Character2Gram()
# Reuters C50 dataset
reutersloader = torch.utils.data.DataLoader(datasets.ReutersC50Dataset(download=True, n_authors=15,
transform=transform),
batch_size=1, shuffle=False)
# Model
model = CNNCharacterEmbedding(voc_size=voc_size, embedding_dim=embedding_dim)
# Optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Loss function
# loss_function = nn.NLLLoss()
loss_function = nn.CrossEntropyLoss()
# Set fold and training mode
reutersloader.dataset.set_fold(0)
# Epoch
for epoch in range(n_epoch):
# Total losses
training_loss = 0.0
test_loss = 0.0
# Set training mode
reutersloader.dataset.set_train(True)
# Get test data for this fold
for i, data in enumerate(reutersloader):
# Inputs and labels
inputs, labels, _ = data
# Outputs
outputs = torch.LongTensor(inputs.size(1)).fill_(labels[0])
# Shape
inputs = inputs.squeeze(0)
# To variable
inputs, outputs = Variable(inputs), Variable(outputs)
if use_cuda:
inputs, outputs = inputs.cuda(), outputs.cuda()
# end if
# Zero grad
model.zero_grad()
# Compute output
log_probs = model(inputs)
# Loss
loss = loss_function(log_probs, outputs)
# Backward and step
loss.backward()
optimizer.step()
# Add
training_loss += loss.data[0]
# end for
# Set test mode
reutersloader.dataset.set_train(False)
# Counters
total = 0.0
success = 0.0
# For each test sample
for i, data in enumerate(reutersloader):
# Inputs and labels
inputs, labels, _ = data
# Outputs
outputs = torch.LongTensor(inputs.size(1)).fill_(labels[0])
# Shape
inputs = inputs.squeeze(0)
# To variable
inputs, outputs = Variable(inputs), Variable(outputs)
if use_cuda:
inputs, outputs = inputs.cuda(), outputs.cuda()
# end if
# Forward
model_outputs = model(inputs)
loss = loss_function(model_outputs, outputs)
# Take the max as predicted
_, predicted = torch.max(model_outputs.data, 1)
# Add to correctly classified word
success += (predicted == outputs.data).sum()
total += predicted.size(0)
# Add loss
test_loss += loss.data[0]
# end for
# Print and save loss
print(u"Epoch {}, training loss {}, test loss {}, accuracy {}".format(epoch, training_loss, test_loss,
success / total * 100.0))
# end for