Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
fix pep8
Signed-off-by: Chip Nguyen <huyenntkvn@gmail.com>
  • Loading branch information
chiphuyen committed Oct 11, 2019
1 parent d0bf2d5 commit af201fd
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 18 deletions.
21 changes: 4 additions & 17 deletions examples/nlp/transformer_lm.py
Expand Up @@ -76,19 +76,6 @@
# data layers, encoder, decoder, output log_softmax, beam_search_translator
# and loss function


# train_dataset = nemo_nlp.LanguageModelingDataset(
# tokenizer,
# dataset=f"{args.data_dir}/{args.train_dataset}",
# max_seq_length=args.max_seq_length,
# batch_step=args.max_seq_length)

# eval_dataset = nemo_nlp.LanguageModelingDataset(
# tokenizer,
# dataset=f"{args.data_dir}/{args.eval_dataset}",
# max_seq_length=args.max_seq_length,
# batch_step=args.predict_last_k)

encoder = nemo_nlp.TransformerEncoderNM(
d_model=args.d_model,
d_inner=args.d_inner,
Expand Down Expand Up @@ -135,10 +122,10 @@ def create_pipeline(dataset,
args.max_seq_length,
batch_step=args.max_seq_length,
batch_size=args.batch_size)
eval_loss = create_pipeline(f"{args.data_dir}/{args.train_dataset}",
args.max_seq_length,
batch_step=args.predict_last_k,
batch_size=args.eval_batch_size)
eval_loss = create_pipeline(f"{args.data_dir}/{args.eval_dataset}",
args.max_seq_length,
batch_step=args.predict_last_k,
batch_size=args.eval_batch_size)

# callback which prints training loss once in a while
train_callback = nemo.core.SimpleLossLoggerCallback(
Expand Down
1 change: 0 additions & 1 deletion nemo/nemo/backends/pytorch/common/parts.py
Expand Up @@ -122,7 +122,6 @@ def __init__(self,
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers-1}')


def forward(self, hidden_states):
output_states = hidden_states[:]
Expand Down

0 comments on commit af201fd

Please sign in to comment.