forked from stanford-oval/genie-parser
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_server.py
executable file
·80 lines (67 loc) · 2.65 KB
/
run_server.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/usr/bin/python3
#
# Copyright 2017 Giovanni Campagna <gcampagn@cs.stanford.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 15, 2017
@author: gcampagn
'''
import os
import sys
import numpy as np
import tensorflow as tf
import tornado.ioloop
import configparser
from concurrent.futures import ThreadPoolExecutor
from models import Config, create_model
from server.application import Application, LanguageContext
from server.tokenizer import Tokenizer, TokenizerService
from server.config import ServerConfig
def load_language(app, tokenizer_service, tag, model_dir):
config = Config.load(['./default.conf', './default.' + tag + '.conf', os.path.join(model_dir, 'model.conf')])
model = create_model(config)
graph = tf.Graph()
session = tf.Session(graph=graph)
with graph.as_default():
# Force everything to run on CPU, we run on single inputs so there is not much point
# on going through the GPU
with tf.device('/cpu:0'):
model.build()
loader = tf.train.Saver()
with session.as_default():
loader.restore(session, os.path.join(model_dir, 'best'))
tokenizer = Tokenizer(tokenizer_service, tag)
app.add_language(tag, LanguageContext(tag, tokenizer, session, config, model))
print('Loaded language ' + tag)
def run():
if len(sys.argv) < 2:
print("** Usage: python3 " + sys.argv[0] + " <<Language:Model Dir>>")
sys.exit(1)
np.random.seed(42)
config = ServerConfig.load(('./server.conf',))
if sys.version_info[2] >= 6:
thread_pool = ThreadPoolExecutor(thread_name_prefix='query-thread-')
else:
thread_pool = ThreadPoolExecutor()
app = Application(config, thread_pool)
app.listen(config.port)
tokenizer_service = TokenizerService()
tokenizer_service.run()
for language, model_directory in map(lambda x : x.split(':'), sys.argv[1:]):
load_language(app, tokenizer_service, language, model_directory)
sys.stdout.flush()
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
run()