Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added validation split, compute validation loss in training #15

Merged
merged 1 commit into from
Feb 23, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 5 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,20 +54,23 @@ def train(args):
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
v_x, v_y = data_loader.validation_data()
valid_feed = {model.input_data: v_x, model.target_data: v_y, model.initial_state: model.initial_state.eval()}
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.target_data: y, model.initial_state: state}
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
valid_loss, = sess.run([model.cost], valid_feed)
end = time.time()
print(
"{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
"{}/{} (epoch {}), train_loss = {:.3f}, valid_loss = {:.3f}, time/batch = {:.3f}" \
.format(
e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e,
train_loss, end - start))
train_loss, valid_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
checkpoint_path = os.path.join('save', 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
Expand Down
25 changes: 22 additions & 3 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,21 +258,40 @@ def load_preprocessed(self, data_file):

# goes thru the list, and only keeps the text entries that have more than seq_length points
self.data = []
self.valid_data =[]
counter = 0

# every 1 in 20 (5%) will be used for validation data
cur_data_counter = 0
for data in self.raw_data:
if len(data) > (self.seq_length+2):
# removes large gaps from the data
data = np.minimum(data, self.limit)
data = np.maximum(data, -self.limit)
data = np.array(data,dtype=np.float32)
data[:,0:2] /= self.scale_factor
self.data.append(data)
counter += int(len(data)/((self.seq_length+2))) # number of equiv batches this datapoint is worth

cur_data_counter = cur_data_counter + 1
if cur_data_counter % 20 == 0:
self.valid_data.append(data)
else:
self.data.append(data)
counter += int(len(data)/((self.seq_length+2))) # number of equiv batches this datapoint is worth

print("train data: {}, valid data: {}".format(len(self.data), len(self.valid_data)))
# minus 1, since we want the ydata to be a shifted version of x data
self.num_batches = int(counter / self.batch_size)

def validation_data(self):
# returns validation data
x_batch = []
y_batch = []
for i in range(self.batch_size):
data = self.valid_data[i%len(self.valid_data)]
idx = 0
x_batch.append(np.copy(data[idx:idx+self.seq_length]))
y_batch.append(np.copy(data[idx+1:idx+self.seq_length+1]))
return x_batch, y_batch

def next_batch(self):
# returns a randomised, seq_length sized portion of the training data
x_batch = []
Expand Down