-
Notifications
You must be signed in to change notification settings - Fork 341
/
worker.py
426 lines (378 loc) · 18.5 KB
/
worker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
__author__ = 'tanel'
import logging
import logging.config
import time
import thread
import threading
import os
import argparse
from subprocess import Popen, PIPE
from gi.repository import GObject
import yaml
import json
import sys
import locale
import codecs
import zlib
import base64
import time
import tornado.gen
import tornado.process
import tornado.ioloop
import tornado.locks
from ws4py.client.threadedclient import WebSocketClient
import ws4py.messaging
from decoder import DecoderPipeline
from decoder2 import DecoderPipeline2
import common
logger = logging.getLogger(__name__)
CONNECT_TIMEOUT = 5
SILENCE_TIMEOUT = 5
USE_NNET2 = False
class ServerWebsocket(WebSocketClient):
STATE_CREATED = 0
STATE_CONNECTED = 1
STATE_INITIALIZED = 2
STATE_PROCESSING = 3
STATE_EOS_RECEIVED = 7
STATE_CANCELLING = 8
STATE_FINISHED = 100
def __init__(self, uri, decoder_pipeline, post_processor, full_post_processor=None):
self.uri = uri
self.decoder_pipeline = decoder_pipeline
self.post_processor = post_processor
self.full_post_processor = full_post_processor
WebSocketClient.__init__(self, url=uri, heartbeat_freq=10)
self.pipeline_initialized = False
self.partial_transcript = ""
if USE_NNET2:
self.decoder_pipeline.set_result_handler(self._on_result)
self.decoder_pipeline.set_full_result_handler(self._on_full_result)
self.decoder_pipeline.set_error_handler(self._on_error)
else:
self.decoder_pipeline.set_word_handler(self._on_word)
self.decoder_pipeline.set_error_handler(self._on_error)
self.decoder_pipeline.set_eos_handler(self._on_eos)
self.state = self.STATE_CREATED
self.last_decoder_message = time.time()
self.request_id = "<undefined>"
self.timeout_decoder = 5
self.num_segments = 0
self.last_partial_result = ""
self.post_processor_lock = tornado.locks.Lock()
self.processing_condition = tornado.locks.Condition()
self.num_processing_threads = 0
def opened(self):
logger.info("Opened websocket connection to server")
self.state = self.STATE_CONNECTED
self.last_partial_result = ""
def guard_timeout(self):
global SILENCE_TIMEOUT
while self.state in [self.STATE_EOS_RECEIVED, self.STATE_CONNECTED, self.STATE_INITIALIZED, self.STATE_PROCESSING]:
if time.time() - self.last_decoder_message > SILENCE_TIMEOUT:
logger.warning("%s: More than %d seconds from last decoder hypothesis update, cancelling" % (self.request_id, SILENCE_TIMEOUT))
self.finish_request()
event = dict(status=common.STATUS_NO_SPEECH)
try:
self.send(json.dumps(event))
except:
logger.warning("%s: Failed to send error event to master" % (self.request_id))
self.close()
return
logger.debug("%s: Checking that decoder hasn't been silent for more than %d seconds" % (self.request_id, SILENCE_TIMEOUT))
time.sleep(1)
def received_message(self, m):
logger.debug("%s: Got message from server of type %s" % (self.request_id, str(type(m))))
if self.state == self.__class__.STATE_CONNECTED:
props = json.loads(str(m))
content_type = props['content_type']
self.request_id = props['id']
self.num_segments = 0
self.decoder_pipeline.init_request(self.request_id, content_type)
self.last_decoder_message = time.time()
thread.start_new_thread(self.guard_timeout, ())
logger.info("%s: Started timeout guard" % self.request_id)
logger.info("%s: Initialized request" % self.request_id)
self.state = self.STATE_INITIALIZED
elif m.data == "EOS":
if self.state != self.STATE_CANCELLING and self.state != self.STATE_EOS_RECEIVED and self.state != self.STATE_FINISHED:
self.decoder_pipeline.end_request()
self.state = self.STATE_EOS_RECEIVED
else:
logger.info("%s: Ignoring EOS, worker already in state %d" % (self.request_id, self.state))
else:
if self.state != self.STATE_CANCELLING and self.state != self.STATE_EOS_RECEIVED and self.state != self.STATE_FINISHED:
if isinstance(m, ws4py.messaging.BinaryMessage):
self.decoder_pipeline.process_data(m.data)
self.state = self.STATE_PROCESSING
elif isinstance(m, ws4py.messaging.TextMessage):
props = json.loads(str(m))
if 'adaptation_state' in props:
as_props = props['adaptation_state']
if as_props.get('type', "") == "string+gzip+base64":
adaptation_state = zlib.decompress(base64.b64decode(as_props.get('value', '')))
logger.info("%s: Setting adaptation state to user-provided value" % (self.request_id))
self.decoder_pipeline.set_adaptation_state(adaptation_state)
else:
logger.warning("%s: Cannot handle adaptation state type " % (self.request_id, as_props.get('type', "")))
else:
logger.warning("%s: Got JSON message but don't know what to do with it" % (self.request_id))
else:
logger.info("%s: Ignoring data, worker already in state %d" % (self.request_id, self.state))
def finish_request(self):
if self.state == self.STATE_CONNECTED:
# connection closed when we are not doing anything
self.decoder_pipeline.finish_request()
self.state = self.STATE_FINISHED
return
if self.state == self.STATE_INITIALIZED:
# connection closed when request initialized but with no data sent
self.decoder_pipeline.finish_request()
self.state = self.STATE_FINISHED
return
if self.state != self.STATE_FINISHED:
logger.info("%s: Master disconnected before decoder reached EOS?" % self.request_id)
self.state = self.STATE_CANCELLING
self.decoder_pipeline.cancel()
counter = 0
while self.state == self.STATE_CANCELLING:
counter += 1
if counter > 30:
# lost hope that the decoder will ever finish, likely it has hung
# FIXME: this might introduce new bugs
logger.info("%s: Giving up waiting after %d tries" % (self.request_id, counter))
self.state = self.STATE_FINISHED
else:
logger.info("%s: Waiting for EOS from decoder" % self.request_id)
time.sleep(1)
self.decoder_pipeline.finish_request()
logger.info("%s: Finished waiting for EOS" % self.request_id)
def closed(self, code, reason=None):
logger.debug("%s: Websocket closed() called" % self.request_id)
self.finish_request()
logger.debug("%s: Websocket closed() finished" % self.request_id)
@tornado.gen.coroutine
def _increment_num_processing(self, delta):
self.num_processing_threads += delta
self.processing_condition.notify()
@tornado.gen.coroutine
def _on_result(self, result, final):
try:
self._increment_num_processing(1)
if final:
# final results are handled by _on_full_result()
return
self.last_decoder_message = time.time()
if self.last_partial_result == result:
return
self.last_partial_result = result
logger.info("%s: Postprocessing (final=%s) result.." % (self.request_id, final))
processed_transcripts = yield self.post_process([result], blocking=False)
if processed_transcripts:
logger.info("%s: Postprocessing done." % self.request_id)
event = dict(status=common.STATUS_SUCCESS,
segment=self.num_segments,
result=dict(hypotheses=[dict(transcript=processed_transcripts[0])], final=final))
try:
self.send(json.dumps(event))
except:
e = sys.exc_info()[1]
logger.warning("Failed to send event to master: %s" % e)
finally:
self._increment_num_processing(-1)
@tornado.gen.coroutine
def _on_full_result(self, full_result_json):
try:
self._increment_num_processing(1)
self.last_decoder_message = time.time()
full_result = json.loads(full_result_json)
full_result['segment'] = self.num_segments
full_result['id'] = self.request_id
if full_result.get("status", -1) == common.STATUS_SUCCESS:
logger.debug(u"%s: Before postprocessing: %s" % (self.request_id, repr(full_result).decode("unicode-escape")))
full_result = yield self.post_process_full(full_result)
logger.info("%s: Postprocessing done." % self.request_id)
logger.debug(u"%s: After postprocessing: %s" % (self.request_id, repr(full_result).decode("unicode-escape")))
try:
self.send(json.dumps(full_result))
except:
e = sys.exc_info()[1]
logger.warning("Failed to send event to master: %s" % e)
if full_result.get("result", {}).get("final", True):
self.num_segments += 1
self.last_partial_result = ""
else:
logger.info("%s: Result status is %d, forwarding the result to the server anyway" % (self.request_id, full_result.get("status", -1)))
try:
self.send(json.dumps(full_result))
except:
e = sys.exc_info()[1]
logger.warning("Failed to send event to master: %s" % e)
finally:
self._increment_num_processing(-1)
@tornado.gen.coroutine
def _on_word(self, word):
try:
self._increment_num_processing(1)
self.last_decoder_message = time.time()
if word != "<#s>":
if len(self.partial_transcript) > 0:
self.partial_transcript += " "
self.partial_transcript += word
logger.debug("%s: Postprocessing partial result.." % self.request_id)
processed_transcript = (yield self.post_process([self.partial_transcript], blocking=False))[0]
if processed_transcript:
logger.debug("%s: Postprocessing done." % self.request_id)
event = dict(status=common.STATUS_SUCCESS,
segment=self.num_segments,
result=dict(hypotheses=[dict(transcript=processed_transcript)], final=False))
self.send(json.dumps(event))
else:
logger.info("%s: Postprocessing final result.." % self.request_id)
processed_transcript = (yield self.post_process(self.partial_transcript, blocking=True))
logger.info("%s: Postprocessing done." % self.request_id)
event = dict(status=common.STATUS_SUCCESS,
segment=self.num_segments,
result=dict(hypotheses=[dict(transcript=processed_transcript)], final=True))
self.send(json.dumps(event))
self.partial_transcript = ""
self.num_segments += 1
finally:
self._increment_num_processing(-1)
@tornado.gen.coroutine
def _on_eos(self, data=None):
self.last_decoder_message = time.time()
# Make sure we won't close the connection before the
# post-processing has finished
while self.num_processing_threads > 0:
logging.debug("Waiting until processing threads finish (%d)" % self.num_processing_threads)
yield self.processing_condition.wait()
self.state = self.STATE_FINISHED
self.send_adaptation_state()
self.close()
def _on_error(self, error):
self.state = self.STATE_FINISHED
event = dict(status=common.STATUS_NOT_ALLOWED, message=error)
try:
self.send(json.dumps(event))
except:
e = sys.exc_info()[1]
logger.warning("Failed to send event to master: %s" % e)
self.close()
def send_adaptation_state(self):
if hasattr(self.decoder_pipeline, 'get_adaptation_state'):
logger.info("%s: Sending adaptation state to client..." % (self.request_id))
adaptation_state = self.decoder_pipeline.get_adaptation_state()
event = dict(status=common.STATUS_SUCCESS,
adaptation_state=dict(id=self.request_id,
value=base64.b64encode(zlib.compress(adaptation_state)),
type="string+gzip+base64",
time=time.strftime("%Y-%m-%dT%H:%M:%S")))
try:
self.send(json.dumps(event))
except:
e = sys.exc_info()[1]
logger.warning("Failed to send event to master: " + str(e))
else:
logger.info("%s: Adaptation state not supported by the decoder, not sending it." % (self.request_id))
@tornado.gen.coroutine
def post_process(self, texts, blocking=False):
if self.post_processor:
logging.debug("%s: Waiting for postprocessor lock" % self.request_id)
if blocking:
timeout=None
else:
timeout=0.0
try:
with (yield self.post_processor_lock.acquire(timeout)):
result = []
for text in texts:
self.post_processor.stdin.write("%s\n" % text.encode("utf-8"))
self.post_processor.stdin.flush()
logging.debug("%s: Starting postprocessing: %s" % (self.request_id, text))
text = yield self.post_processor.stdout.read_until('\n')
text = text.decode("utf-8")
logging.debug("%s: Postprocessing returned: %s" % (self.request_id, text))
text = text.strip()
text = text.replace("\\n", "\n")
result.append(text)
raise tornado.gen.Return(result)
except tornado.gen.TimeoutError:
logging.debug("%s: Skipping postprocessing since post-processor already in use" % (self.request_id))
raise tornado.gen.Return(None)
else:
raise tornado.gen.Return(texts)
@tornado.gen.coroutine
def post_process_full(self, full_result):
if self.full_post_processor:
self.full_post_processor.stdin.write("%s\n\n" % json.dumps(full_result))
self.full_post_processor.stdin.flush()
lines = []
while True:
l = self.full_post_processor.stdout.readline()
if not l: break # EOF
if l.strip() == "":
break
lines.append(l)
full_result = json.loads("".join(lines))
elif self.post_processor:
transcripts = []
for hyp in full_result.get("result", {}).get("hypotheses", []):
transcripts.append(hyp["transcript"])
processed_transcripts = yield self.post_process(transcripts, blocking=True)
for (i, hyp) in enumerate(full_result.get("result", {}).get("hypotheses", [])):
hyp["original-transcript"] = hyp["transcript"]
hyp["transcript"] = processed_transcripts[i]
raise tornado.gen.Return(full_result)
def main_loop(uri, decoder_pipeline, post_processor, full_post_processor=None):
while True:
ws = ServerWebsocket(uri, decoder_pipeline, post_processor, full_post_processor=full_post_processor)
try:
logger.info("Opening websocket connection to master server")
ws.connect()
ws.run_forever()
except Exception:
logger.error("Couldn't connect to server, waiting for %d seconds", CONNECT_TIMEOUT)
time.sleep(CONNECT_TIMEOUT)
# fixes a race condition
time.sleep(1)
def main():
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up worker')
parser = argparse.ArgumentParser(description='Worker for kaldigstserver')
parser.add_argument('-u', '--uri', default="ws://localhost:8888/worker/ws/speech", dest="uri", help="Server<-->worker websocket URI")
parser.add_argument('-f', '--fork', default=1, dest="fork", type=int)
parser.add_argument('-c', '--conf', dest="conf", help="YAML file with decoder configuration")
args = parser.parse_args()
if args.fork > 1:
logging.info("Forking into %d processes" % args.fork)
tornado.process.fork_processes(args.fork)
conf = {}
if args.conf:
with open(args.conf) as f:
conf = yaml.safe_load(f)
if "logging" in conf:
logging.config.dictConfig(conf["logging"])
# fork off the post-processors before we load the model into memory
tornado.process.Subprocess.initialize()
post_processor = None
if "post-processor" in conf:
STREAM = tornado.process.Subprocess.STREAM
post_processor = tornado.process.Subprocess(conf["post-processor"], shell=True, stdin=PIPE, stdout=STREAM)
full_post_processor = None
if "full-post-processor" in conf:
full_post_processor = Popen(conf["full-post-processor"], shell=True, stdin=PIPE, stdout=PIPE)
global USE_NNET2
USE_NNET2 = conf.get("use-nnet2", False)
global SILENCE_TIMEOUT
SILENCE_TIMEOUT = conf.get("silence-timeout", 5)
if USE_NNET2:
decoder_pipeline = DecoderPipeline2(conf)
else:
decoder_pipeline = DecoderPipeline(conf)
loop = GObject.MainLoop()
thread.start_new_thread(loop.run, ())
thread.start_new_thread(main_loop, (args.uri, decoder_pipeline, post_processor, full_post_processor))
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()