-
Notifications
You must be signed in to change notification settings - Fork 0
/
custom_operations.py
66 lines (58 loc) · 3.33 KB
/
custom_operations.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import tensorflow as tf
import numpy as np
def feedback_alignment_fc(input, weights, initializer=tf.initializers.he_normal(), name="fa_fc"):
random = tf.get_variable("random", shape=reversed(weights.get_shape().as_list()),
initializer=initializer, use_resource=True, trainable=False)
@tf.custom_gradient
def func(x):
def grad(dy, variables=[weights]):
dx = tf.matmul(dy, random)
dw = tf.matmul(tf.transpose(x), dy)
return dx, [dw]
return tf.matmul(x, weights), grad
with tf.name_scope(name):
return func(input)
def feedback_alignment_conv(input, weights, strides, padding, use_cudnn_on_gpu=True, data_format='NHWC',
dilations=[1, 1, 1, 1], initializer=tf.initializers.he_normal(),
name="fa_conv"):
random = tf.get_variable("random", shape=weights.get_shape().as_list(), initializer=initializer, use_resource=True, trainable=False)
@tf.custom_gradient
def func(x):
def grad(dy, variables=[weights]):
dx = tf.nn.conv2d_backprop_input(tf.shape(x), random, dy, strides, padding, use_cudnn_on_gpu,
data_format, dilations)
dw = tf.nn.conv2d_backprop_filter(x, weights.get_shape(), dy, strides, padding, use_cudnn_on_gpu,
data_format, dilations)
return dx, [dw]
return tf.nn.conv2d(input, weights, strides, padding, use_cudnn_on_gpu, data_format, dilations), grad
with tf.name_scope(name):
return func(input)
def direct_feedback_alignment_fc(input, weights, output_dim, error_container, initializer=tf.initializers.he_normal(),
name="dfa_fc"):
random = tf.get_variable("random", shape=[output_dim, weights.shape[0]], initializer=initializer, use_resource=True, trainable=False)
@tf.custom_gradient
def func(x):
def grad(dy, variables=[weights]):
dx = tf.matmul(error_container[0], random, name='matmul_grad_x')
dw = tf.matmul(tf.transpose(x), dy, name='matmul_grad_w')
return dx, [dw]
return tf.matmul(x, weights, name='matmul_forward_x'), grad
with tf.name_scope(name):
return func(input)
def direct_feedback_alignment_conv(input, weights, output_dim, error_container, strides, padding,
use_cudnn_on_gpu=True, data_format='NHWC', dilations=[1, 1, 1, 1],
initializer=tf.initializers.he_normal(), name="dfa_conv"):
input_shape = tf.shape(input)
input_flat_shape = np.prod(input.shape[1:])
random = tf.get_variable("random", shape=[output_dim, input_flat_shape],
initializer=initializer, use_resource=True, trainable=False)
@tf.custom_gradient
def func(x):
def grad(dy, variables=[weights]):
dx = tf.reshape(tf.matmul(error_container[0], random), input_shape)
dw = tf.nn.conv2d_backprop_filter(x, weights.get_shape(), dy, strides, padding, use_cudnn_on_gpu,
data_format, dilations)
return dx, [dw]
return tf.nn.conv2d(input, weights, strides, padding, use_cudnn_on_gpu, data_format, dilations), grad
with tf.name_scope(name):
return func(input)