Skip to content

Commit

Permalink
Added 0 epoch to characterize untrained structure
Browse files Browse the repository at this point in the history
  • Loading branch information
ianwilliamson committed Mar 5, 2019
1 parent f136b3b commit 60c70c1
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 10 deletions.
5 changes: 2 additions & 3 deletions study/inference.py
Expand Up @@ -118,9 +118,8 @@
axs[1].plot(epochs, history["acc_test"], "o-", label="Test")
axs[1].set_xlabel("Number of training epochs")
axs[1].set_ylabel("Accuracy")
axs[1].set_xticks(epochs)
axs[1].set_ylim([0.5, 1.0])
axs[1].legend(fontsize="smaller")
axs[1].set_ylim(top=1.01)
axs[0].legend()
if args.save:
fig.savefig(os.path.splitext(args.model)[0] + '_hist.png', dpi=300)
else:
Expand Down
16 changes: 9 additions & 7 deletions wavetorch/wave.py
Expand Up @@ -13,25 +13,27 @@ def train(model, optimizer, criterion, train_dl, test_dl, N_epochs, batch_size):
"acc_test": []}

t_start = time.time()
for epoch in range(1, N_epochs + 1):
for epoch in range(0, N_epochs + 1):
t_epoch = time.time()
print('Epoch: %2d/%2d' % (epoch, N_epochs))

num = 1
for xb, yb in train_dl:
# Needed to define this for LBFGS.
# Technically, Adam doesn't require this but we can be flexible this way
def closure():
optimizer.zero_grad()
loss = criterion(model(xb), yb.argmax(dim=1))
loss.backward()
return loss

# Track loss
loss = optimizer.step(closure)
history["loss_iter"].append(loss.item())
if epoch == 0: # Don't take a step and just characterize the starting structure
print(" ... No optimizer step is taken on epoch 0")
with torch.no_grad():
loss = criterion(model(xb), yb.argmax(dim=1))
else:
loss = optimizer.step(closure)
model.clip_to_design_region()

model.clip_to_design_region()
history["loss_iter"].append(loss.item())

print(" ... Training batch %2d/%2d | loss = %.3e" % (num, len(train_dl), history["loss_iter"][-1]))
num += 1
Expand Down

0 comments on commit 60c70c1

Please sign in to comment.