Skip to content

Commit

Permalink
Trainer: simplify constructor, remove batch_size arg
Browse files Browse the repository at this point in the history
The trainer iterate over dataset in batch unit, and it can get the
batch size from the batch, so we don't need to pass in the batch_size
arg.
  • Loading branch information
JianyuZhan committed Sep 19, 2017
1 parent 82cc679 commit 1f9ff80
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 9 deletions.
12 changes: 4 additions & 8 deletions onmt/Trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@

class Trainer(object):
def __init__(self, model, train_iter, valid_iter,
train_loss, valid_loss, optim,
batch_size, gpuid,
train_loss, valid_loss, optim, gpuid,
truncated_decoder, max_generator_batches):
# Basic attributes.
self.model = model
Expand All @@ -15,7 +14,6 @@ def __init__(self, model, train_iter, valid_iter,
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.batch_size = batch_size
self.gpuid = gpuid
self.truncated_decoder = truncated_decoder
self.max_generator_batches = max_generator_batches
Expand Down Expand Up @@ -43,17 +41,15 @@ def train(self, epoch, report_func=None):
else target_size

for j in range(0, target_size-1, trunc_size):
# (1) Create truncated target.
# 1. Create truncated target.
tgt = tgt[j: j + trunc_size]

# (2) F-prop all but generator.

# Main training loop
# 2. F-prop all but generator.
self.model.zero_grad()
outputs, attn, dec_state = \
self.model(src, tgt, src_lengths, dec_state)

# (2) F-prop/B-prob generator in shards for memory
# 3. F-prop/B-prob generator in shards for memory
# efficiency.
batch_stats = onmt.Statistics()
# make_loss_batch doesn't really need to be a method of
Expand Down
2 changes: 1 addition & 1 deletion train.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def train_model(model, train_data, valid_data, fields, optim, opt):

trainer = onmt.Trainer(model, train_iter, valid_iter,
train_loss, valid_loss, optim,
opt.batch_size, opt.gpuid,
opt.gpuid,
opt.truncated_decoder,
opt.max_generator_batches)

Expand Down

0 comments on commit 1f9ff80

Please sign in to comment.