-
Notifications
You must be signed in to change notification settings - Fork 0
/
sample.py
96 lines (78 loc) · 3.79 KB
/
sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import argparse
import torch
import torch.nn as nn
import numpy as np
import os
import pickle
from data_loader import get_loader
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
import pandas as pd
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main(args):
# Create model directory
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
# Load vocabulary wrapper
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build data loader
data_loader = get_loader('', args.caption_path, vocab,
args.dictionary, args.batch_size,
shuffle=True, num_workers=args.num_workers)
# Build the models
#encoder = EncoderCNN(args.embed_size).to(device)
dictionary = pd.read_csv(args.dictionary, header=0,encoding = 'unicode_escape',error_bad_lines=False)
dictionary = list(dictionary['keys'])
decoder = DecoderRNN(len(dictionary), args.hidden_size, len(vocab), args.num_layers).to(device)
decoder.load_state_dict(torch.load(args.model_path, map_location=device))
decoder.eval()
# Train the models
total_step = len(data_loader)
for epoch in range(args.num_epochs):
for i, (array, captions, lengths) in enumerate(data_loader):
keywords = []
arr = array.numpy()[0]
for i in range(len(arr)):
if arr[i] == 1:
keywords.append(dictionary[i])
keywords = ' '.join(keywords)
# Set mini-batch dataset
array = array.to(device)
#captions = captions.to(device)
targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# Forward, backward and optimize
#features = encoder(images)
outputs = decoder.sample(array)
sampled_ids = outputs[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)
# Convert word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx2word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
# Print out the image and the generated caption
print('Topic Keywords: {}\nOutput Virtual Document: {}\n'.format(keywords, sentence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='models/' , help='path of saved models')
parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
parser.add_argument('--dictionary', type=str, default='data/dict.csv', help='path to dictionary file')
parser.add_argument('--caption_path', type=str, default='data/annotations/captions_train2014.json', help='path for train annotation json file')
parser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')
# Model parameters
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
parser.add_argument('--num_epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--learning_rate', type=float, default=0.001)
args = parser.parse_args()
print(args)
main(args)