forked from ganguli-lab/grid-pattern-formation
/
model.py
executable file
·142 lines (111 loc) · 4.72 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input, Dense, SimpleRNN
from tensorflow.keras.models import Model
class RNN(Model):
def __init__(self, options, place_cells):
super(RNN, self).__init__()
self.Ng = options.Ng
self.Np = options.Np
self.sequence_length = options.sequence_length
self.weight_decay = options.weight_decay
self.place_cells = place_cells
self.encoder = Dense(self.Ng, name='encoder', use_bias=False)
self.RNN = SimpleRNN(self.Ng,
return_sequences=True,
activation=tf.keras.layers.Activation(options.activation),
recurrent_initializer='glorot_uniform',
name='RNN',
use_bias=False)
# Linear read-out weights
self.decoder = Dense(self.Np, name='decoder', use_bias=False)
# Loss function
self.loss_fun = tf.nn.softmax_cross_entropy_with_logits
def g(self, inputs):
'''
Compute grid cell activations.
Args:
inputs: Batch of 2d velocity inputs with shape [batch_size, sequence_length, 2].
Returns:
g: Batch of grid cell activations with shape [batch_size, sequence_length, Ng].
'''
v, p0 = inputs
init_state = self.encoder(p0)
g = self.RNN(v, initial_state=init_state)
return g
def call(self, inputs):
'''
Predict place cell code.
Args:
inputs: Batch of 2d velocity inputs with shape [batch_size, sequence_length, 2].
Returns:
place_preds: Predicted place cell activations with shape
[batch_size, sequence_length, Np].
'''
place_preds = self.decoder(self.g(inputs))
return place_preds
def compute_loss(self, inputs, pc_outputs, pos):
'''
Compute avg. loss and decoding error.
Args:
inputs: Batch of 2d velocity inputs with shape [batch_size, sequence_length, 2].
pc_outputs: Ground truth place cell activations with shape
[batch_size, sequence_length, Np].
pos: Ground truth 2d position with shape [batch_size, sequence_length, 2].
Returns:
loss: Avg. loss for this training batch.
err: Avg. decoded position error in cm.
'''
g = self.g(inputs)
preds = self.decoder(g)
loss = tf.reduce_mean(self.loss_fun(pc_outputs, preds))
# Weight regularization
loss += self.weight_decay * tf.reduce_sum(self.RNN.weights[1]**2)
# Compute decoding error
pred_pos = self.place_cells.get_nearest_cell_pos(preds)
err = tf.reduce_mean(tf.sqrt(tf.reduce_sum((pos - pred_pos)**2, axis=-1)))
return loss, err
class LSTM(Model):
def __init__(self, options, place_cells):
super(LSTM, self).__init__()
self.Ng = options.Ng
self.Np = options.Np
self.sequence_length = options.sequence_length
self.weight_decay = options.weight_decay
self.place_cells = place_cells
self.encoder1 = Dense(self.Ng, name='encoder1')
self.encoder2 = Dense(self.Ng, name='encoder1')
self.M = Dense(self.Ng, name='M')
self.RNN = tf.keras.layers.LSTM(self.Ng, return_sequences=True,
activation=options.activation,
recurrent_initializer='glorot_uniform')
self.dense = Dense(self.Ng, name='dense', activation=options.activation)
self.decoder = Dense(self.Np, name='decoder')
# Loss function
self.loss_fun = tf.nn.softmax_cross_entropy_with_logits
def g(self, inputs):
'''Compute grid cell activations'''
v, p0 = inputs
l0 = self.encoder1(p0)
m0 = self.encoder2(p0)
init_state = (l0, m0)
Mv = self.M(v)
rnn = self.RNN(Mv, initial_state=init_state)
g = self.dense(rnn)
return g
def call(self, inputs):
'''Predict place cell code'''
place_preds = self.decoder(self.g(inputs))
return place_preds
def compute_loss(self, inputs, pc_outputs, pos):
'''Compute loss and decoding error'''
g = self.g(inputs)
preds = self.decoder(g)
loss = tf.reduce_mean(self.loss_fun(pc_outputs, preds))
# # Weight regularization
loss += self.weight_decay * tf.reduce_sum(self.RNN.weights[1]**2)
# Compute decoding error
pred_pos = self.place_cells.get_nearest_cell_pos(preds)
err = tf.reduce_mean(tf.sqrt(tf.reduce_sum((pos - pred_pos)**2, axis=-1)))
return loss, err