Skip to content

Commit

Permalink
Fix arguments
Browse files Browse the repository at this point in the history
  • Loading branch information
yuhaozhang committed Oct 5, 2018
1 parent 42e7b22 commit ecdae5a
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 11 deletions.
2 changes: 1 addition & 1 deletion eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
parser.add_argument('--dataset', default='test', help="Data split to use for evaluation: dev or test.")
parser.add_argument('--batch_size', type=int, default=100, help="Batch size for evaluation.")
parser.add_argument('--gold', default='', help="Optional: a file where to write gold summarizations. Default to not write.")
parser.add_argument('--out', default='', help="Optional: a file where to write predictions.i Default to not write.")
parser.add_argument('--out', default='', help="Optional: a file where to write predictions. Default to not write.")
parser.add_argument('--use_bleu', action='store_true', help="Use BLEU instead of ROUGE metrics for scoring.")

parser.add_argument('--seed', type=int, default=1234)
Expand Down
14 changes: 5 additions & 9 deletions model/copy_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.emb_matrix = emb_matrix
self.use_bg = opt.get('background', False)
self.attn_bg = opt.get('attn_background', False)

print("Building Seq2Seq with Copy model ...")
self.enc_hidden_dim = self.hidden_dim // 2
Expand All @@ -56,8 +55,7 @@ def __init__(self, opt, emb_matrix=None):
self.bg_encoder = nn.LSTM(self.emb_dim, self.enc_hidden_dim, 1, \
bidirectional=True, batch_first=True, dropout=0) # when nlayer=1, rnn dropout does not apply
self.bg_drop = nn.Dropout(self.dropout)
if self.attn_bg:
self.bg_attn_layer = BasicAttention(self.hidden_dim)
self.bg_attn_layer = BasicAttention(self.hidden_dim)

self.SOS_tensor = torch.LongTensor([constant.SOS_ID])
self.SOS_tensor = self.SOS_tensor.cuda() if self.use_cuda else self.SOS_tensor
Expand Down Expand Up @@ -205,9 +203,8 @@ def forward(self, src, tgt_in, bg):
bg_lens = bg_mask.eq(0).long().sum(1)
bg_out, bg_hidden = self.encode(bg_inputs, bg_lens, encoder=self.bg_encoder, sort=True)
bg_h = bg_hidden[0]
if self.attn_bg:
# use attentional representation
_, bg_h, _ = self.bg_attn_layer(enc_hidden[0], bg_out, mask=bg_mask)
# use attentional representation
_, bg_h, _ = self.bg_attn_layer(enc_hidden[0], bg_out, mask=bg_mask)
bg_h = self.bg_drop(bg_h)
else:
bg_h = None
Expand Down Expand Up @@ -249,9 +246,8 @@ def predict(self, src, bg, beam_size=5):
bg_lens = bg_mask.eq(0).long().sum(1)
bg_out, bg_hidden = self.encode(bg_inputs, bg_lens, encoder=self.bg_encoder, sort=True)
bg_h = bg_hidden[0]
if self.attn_bg:
# use attentional representation
_, bg_h, _ = self.bg_attn_layer(enc_hidden[0], bg_out, mask=bg_mask)
# use attentional representation
_, bg_h, _ = self.bg_attn_layer(enc_hidden[0], bg_out, mask=bg_mask)
bg_h = self.bg_drop(bg_h)
else:
bg_h = None
Expand Down
1 change: 0 additions & 1 deletion train.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
parser.add_argument('--cov_loss_epoch', type=int, default=0, help='Add coverage loss starting from this epoch.')

parser.add_argument('--background', action='store_true', help='Use background information for decoder.')
parser.add_argument('--attn_background', action='store_true', help='Use attentional background encoder.')
parser.add_argument('--concat_background', action='store_true', help='Simply concat background to findings.')

parser.add_argument('--use_bleu', action='store_true', help='Use BLEU as the metric. By default use ROUGE.')
Expand Down

0 comments on commit ecdae5a

Please sign in to comment.