Skip to content

Commit

Permalink
workaround for train_set batching during inference time
Browse files Browse the repository at this point in the history
  • Loading branch information
varisd committed Jan 9, 2019
1 parent b384686 commit 299c1bc
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion neuralmonkey/learning_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from termcolor import colored

from neuralmonkey.logging import log, log_print, warn
from neuralmonkey.dataset import Dataset
from neuralmonkey.dataset import Dataset, BatchingScheme
from neuralmonkey.tf_manager import TensorFlowManager
from neuralmonkey.runners.base_runner import (
BaseRunner, ExecutionResult, GraphExecutor, OutputSeries)
Expand Down Expand Up @@ -85,6 +85,9 @@ def training_loop(cfg: Namespace) -> None:
trainer_result = cfg.tf_manager.execute(
batch, feedables, cfg.trainers, train=True,
summaries=True)
# workaround: we need to use validation batching scheme
# during evaluation
batch.batching = BatchingScheme(batch_size=cfg.batch_size)
train_results, train_outputs, f_batch = run_on_dataset(
cfg.tf_manager, cfg.runners, cfg.dataset_runner, batch,
cfg.postprocess, write_out=False)
Expand Down

0 comments on commit 299c1bc

Please sign in to comment.