-
Notifications
You must be signed in to change notification settings - Fork 0
/
layers.py
157 lines (108 loc) · 4.68 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import tensorflow as tf
def conv1d(inputs, filters, k_size, stride, padding, scope_name="conv", _weights=None):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
in_channels = inputs.shape[-1]
if _weights is None:
kernel = tf.get_variable(
"kernel",
[k_size, in_channels, filters],
initializer=tf.truncated_normal_initializer(),
)
biases = tf.get_variable(
"biases", [filters], initializer=tf.random_normal_initializer()
)
else:
kernel = tf.get_variable("kernel", initializer=tf.constant(_weights[0]))
biases = tf.get_variable("biases", initializer=tf.constant(_weights[1]))
conv = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding)
output = tf.add(conv, biases, name=scope.name)
return output
def relu(inputs, scope_name="relu"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_relu = tf.nn.relu(inputs, name=scope.name)
return _relu
def elu(inputs, scope_name="elu"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_elu = tf.nn.elu(inputs, name=scope.name)
return _elu
def leaky_relu(inputs, scope_name="leaky_relu"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_leaky_relu = tf.nn.leaky_relu(inputs, name=scope.name)
return _leaky_relu
def softplus(inputs, scope_name="softplus"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_softplus = tf.nn.softplus(inputs, name=scope.name)
return _softplus
def softsign(inputs, scope_name="softsign"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_softsign = tf.nn.softsign(inputs, name=scope.name)
return _softsign
def sigmoid(inputs, scope_name="sigmoid"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_sigmoid = tf.nn.sigmoid(inputs, name=scope.name)
return _sigmoid
def tanh(inputs, scope_name="tanh"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
_tanh = tf.nn.tanh(inputs, name=scope.name)
return _tanh
def one_maxpool(inputs, padding="VALID", scope_name="one-pool1d"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
height, in_channel = inputs.shape[-2:]
pool = tf.nn.pool(
input=inputs,
window_shape=[height],
pooling_type="MAX",
padding=padding,
strides=[1],
name=scope.name,
)
return pool
def maxpool1d(inputs, k_size, stride=None, padding="VALID", scope_name="pool1d"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
if stride is None:
stride = k_size
pool = tf.nn.pool(
input=inputs,
window_shape=[k_size],
pooling_type="MAX",
padding=padding,
strides=[stride],
name=scope.name,
)
return pool
def flatten(inputs, scope_name="flatten"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
feature_dim = inputs.shape[1] * inputs.shape[2]
flatten = tf.reshape(inputs, shape=[-1, feature_dim], name=scope.name)
return flatten
def concatinate(inputs, scope_name):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
concat = tf.concat(inputs, 1, name=scope.name)
return concat
def fully_connected(inputs, out_dim, scope_name="fc", _weights=None):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
if _weights is None:
in_dim = inputs.shape[-1]
w = tf.get_variable(
"weights",
[in_dim, out_dim],
initializer=tf.truncated_normal_initializer(),
)
b = tf.get_variable(
"biases", [out_dim], initializer=tf.constant_initializer(0.0)
)
else:
w = tf.get_variable("weights", initializer=tf.constant(_weights[0]))
biases = tf.get_variable("biases", initializer=tf.constant(_weights[1]))
out = tf.add(tf.matmul(inputs, w), b, name=scope.name)
return out
def Dropout(inputs, rate, scope_name="dropout"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
dropout = tf.nn.dropout(inputs, keep_prob=1 - rate, name=scope.name)
return dropout
def l2_norm(inputs, alpha, scope_name="l2_norm"):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
norm = alpha * tf.divide(
inputs, tf.norm(inputs, ord="euclidean"), name=scope.name
)
return norm