-
Notifications
You must be signed in to change notification settings - Fork 2
/
base_cnn.py
98 lines (84 loc) · 4.48 KB
/
base_cnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import tensorflow as tf
import numpy as np
class BaseCNN(object):
def __init__(self, args):
# prepare
self.input_x_text = tf.placeholder(tf.int32, [None, args.seq_length], name="input_x_text")
self.input_x_tag = tf.placeholder(tf.int32, [None, args.seq_length], name="input_x_tag")
self.input_y = tf.placeholder(tf.float32, [None, args.num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# keep track of l2 regularization loss
l2_loss = tf.constant(0.0)
# word embedding layer
with tf.name_scope("word-embedding"):
# make an embedding matrix
emb_dim = len(args.vocab[args.vocab.keys()[0]][1])
emb_mat = np.random.rand(args.vocab_size, emb_dim)
for word, (idx, emb_vec) in args.vocab.iteritems():
emb_mat[idx] = emb_vec
# make a word embedding variable
W = tf.Variable(tf.convert_to_tensor(emb_mat, dtype=tf.float32), name="W")
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x_text)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# convolution + max-pool layer
pooled_outputs = []
for i, filter_size in enumerate(args.filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# convolution
filter_shape = [filter_size, emb_dim, 1, args.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[args.num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# activation
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# max-pooling
pooled = tf.nn.max_pool(
h,
ksize=[1, args.seq_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# combine pooled features
num_filters_total = args.num_filters * len(args.filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# dropout layer
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# output layer (fully-connected layer included)
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total, args.num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[args.num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name="logits")
self.predictions = tf.argmax(self.logits, 1, name="predictions")
# calculate accuracy
with tf.name_scope("accuracy"):
self.targets = tf.argmax(self.input_y, 1)
correct_predictions = tf.equal(self.predictions, self.targets)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# calculate loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.logits, self.input_y)
self.loss = tf.reduce_mean(losses) + args.l2_reg_lambda * l2_loss
# train and update
with tf.name_scope("update"):
optimizer = tf.train.AdamOptimizer(args.learning_rate)
self.grads_and_vars = optimizer.compute_gradients(self.loss)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.train_op = optimizer.apply_gradients(self.grads_and_vars, global_step=self.global_step)
# l2 norm clipping
self.weight_clipping_op = []
self.vars_for_clipping = []
for var in tf.trainable_variables():
if var.name.startswith('output/W'):
self.vars_for_clipping.append(tf.nn.l2_loss(var))
updated_var = tf.clip_by_norm(var, args.l2_limit)
self.weight_clipping_op.append(tf.assign(var, updated_var))