-
Notifications
You must be signed in to change notification settings - Fork 2
/
TextRNN_Att_new.py
111 lines (87 loc) · 5.41 KB
/
TextRNN_Att_new.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Config(object):
"""配置参数"""
def __init__(self, dataset, embedding):
self.model_name = 'TextRNN_Att_new'
self.train_path = dataset + '/data/train.txt' # 训练集
self.dev_path = dataset + '/data/dev.txt' # 验证集
self.test_path = dataset + '/data/test.txt' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.embedding_pretrained = torch.tensor(
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
if embedding != 'random' else None # 预训练词向量
self.device = torch.device('cuda') # 设备
self.dropout = 0.3 # 随机失活
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.num_epochs = 20 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 256 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = self.embedding_pretrained.size(1)\
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
self.hidden_size = 128 # lstm隐藏层
self.num_layers = 2 # lstm层数
self.hidden_size2 = 64
'''Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification'''
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.batch_size = config.batch_size
self.pad_size = config.pad_size
self.lstm1 = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.tanh1 = nn.Tanh()
# self.u = nn.Parameter(torch.Tensor(config.hidden_size * 2, config.hidden_size * 2))
self.w1 = nn.Parameter(torch.zeros(config.hidden_size * 2))
kernel_size = 7
self.kernel_size = kernel_size
sigma = 1.0
kernel = np.exp(-np.arange(kernel_size)**2 / (2*sigma**2))
kernel = kernel / kernel.sum()
self.kernel = torch.from_numpy(kernel).float().unsqueeze(0).unsqueeze(0).to("cuda")
#self.lstm2 = nn.LSTM(config.embed + config.hidden_size*2, config.hidden_size, config.num_layers,
# bidirectional=True, batch_first=True, dropout=config.dropout)
self.lstm2 = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.tanh2 = nn.Tanh()
self.w2 = nn.Parameter(torch.zeros(config.hidden_size * 2))
self.tanh3 = nn.Tanh()
self.fc1 = nn.Linear(config.hidden_size * 2, config.hidden_size2)
self.fc = nn.Linear(config.hidden_size2, config.num_classes)
def forward(self, x):
x, _ = x
emb = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
H, _ = self.lstm1(emb) # [batch_size, seq_len, hidden_size * num_direction]=[128, 32, 256]
M = self.tanh1(H) # [128, 32, 256]
# M = torch.tanh(torch.matmul(H, self.u))
alpha = F.softmax(torch.matmul(M, self.w1), dim=1).unsqueeze(-1) # [128, 1, 32]
alpha = torch.reshape(alpha, (alpha.shape[0], 1, -1))
alpha = F.conv1d(alpha, self.kernel, padding=(self.kernel_size-1)//2) # Gaussian Filters
alpha = alpha.squeeze(1).unsqueeze(-1) # [128, 32, 1]
out = H * alpha # [128, 32, 256]
emb_new = emb * alpha # [128, 32, 300]
#emb_new = torch.concat((emb_new, out), 2) # [128, 32, 556]
H_new, _ = self.lstm2(emb_new)
M_new = self.tanh2(H) # [128, 32, 256]
alpha_new = F.softmax(torch.matmul(M_new, self.w2), dim=1).unsqueeze(-1) # [128, 32, 1]
out_new = H_new * alpha_new # [128, 32, 256]
#out = (out + out_new) / 2
out = torch.sum(out_new, 1) # [128, 256]
out = F.relu(out)
out = self.fc1(out)
out = self.fc(out) # [128, 64]
return out, alpha