forked from chaitjo/learning-paradigms-for-tsp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlog_utils.py
44 lines (31 loc) · 1.79 KB
/
log_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def log_values(cost, grad_norms, epoch, batch_id, step,
log_likelihood, reinforce_loss, bl_loss, tb_logger, opts):
avg_cost = cost.mean().item()
grad_norms, grad_norms_clipped = grad_norms
# Log values to screen
print('epoch: {}, train_batch_id: {}, avg_cost: {}'.format(epoch, batch_id, avg_cost))
print('grad_norm: {}, clipped: {}'.format(grad_norms[0], grad_norms_clipped[0]))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.log_value('avg_cost', avg_cost, step)
tb_logger.log_value('actor_loss', reinforce_loss.item(), step)
tb_logger.log_value('nll', -log_likelihood.mean().item(), step)
tb_logger.log_value('grad_norm', grad_norms[0], step)
tb_logger.log_value('grad_norm_clipped', grad_norms_clipped[0], step)
if opts.baseline == 'critic':
tb_logger.log_value('critic_loss', bl_loss.item(), step)
tb_logger.log_value('critic_grad_norm', grad_norms[1], step)
tb_logger.log_value('critic_grad_norm_clipped', grad_norms_clipped[1], step)
def log_values_sl(cost, grad_norms, epoch, batch_id, step,
loss, tb_logger, opts):
avg_cost = cost.mean().item()
grad_norms, grad_norms_clipped = grad_norms
# Log values to screen
print('epoch: {}, train_batch_id: {}, loss: {}, avg_cost: {}'.format(epoch, batch_id, loss, avg_cost))
print('grad_norm: {}, clipped: {}'.format(grad_norms[0], grad_norms_clipped[0]))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.log_value('avg_cost', avg_cost, step)
tb_logger.log_value('nll_loss', loss.item(), step)
tb_logger.log_value('grad_norm', grad_norms[0], step)
tb_logger.log_value('grad_norm_clipped', grad_norms_clipped[0], step)