-
Notifications
You must be signed in to change notification settings - Fork 825
/
model_microservice.py
269 lines (216 loc) · 9.63 KB
/
model_microservice.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
import grpc
from concurrent import futures
from google.protobuf import json_format
from flask import jsonify, Flask, send_from_directory, request
from flask_cors import CORS
import numpy as np
import logging
from tornado.tcpserver import TCPServer
from tornado.iostream import StreamClosedError
from tornado import gen
import tornado.ioloop
import struct
import traceback
import os
from seldon_core.proto import prediction_pb2, prediction_pb2_grpc
from seldon_core.microservice import extract_message, sanity_check_request, rest_datadef_to_array, \
array_to_rest_datadef, grpc_datadef_to_array, array_to_grpc_datadef, \
SeldonMicroserviceException, get_custom_tags, get_data_from_json, get_data_from_proto, \
get_meta_from_json, get_meta_from_proto, ANNOTATION_GRPC_MAX_MSG_SIZE
from seldon_core.metrics import get_custom_metrics
from seldon_core.seldon_flatbuffers import SeldonRPCToNumpyArray, NumpyArrayToSeldonRPC, CreateErrorMsg
logger = logging.getLogger(__name__)
# ---------------------------
# Interaction with user model
# ---------------------------
def predict(user_model, features, feature_names, **kwargs):
try:
return user_model.predict(features, feature_names, **kwargs)
except TypeError:
return user_model.predict(features, feature_names)
def send_feedback(user_model, features, feature_names, reward, truth):
if hasattr(user_model, "send_feedback"):
user_model.send_feedback(features, feature_names, reward, truth)
def get_class_names(user_model, n_targets):
if hasattr(user_model, "class_names"):
return user_model.class_names
else:
return ["t:{}".format(i) for i in range(n_targets)]
# ----------------------------
# REST
# ----------------------------
def get_rest_microservice(user_model, debug=False):
app = Flask(__name__, static_url_path='')
CORS(app)
@app.errorhandler(SeldonMicroserviceException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
logger.error("%s", error.to_dict())
response.status_code = 400
return response
@app.route("/seldon.json", methods=["GET"])
def openAPI():
return send_from_directory('', "seldon.json")
@app.route("/predict", methods=["GET", "POST"])
def Predict():
request = extract_message()
logger.debug("Request: %s", request)
sanity_check_request(request)
if hasattr(user_model, "predict_rest"):
return jsonify(user_model.predict_rest(request))
else:
features = get_data_from_json(request)
names = request.get("data", {}).get("names")
meta = get_meta_from_json(request)
predictions = predict(user_model, features, names, meta=meta)
logger.debug("Predictions: %s", predictions)
# If predictions is an numpy array or we used the default data then return as numpy array
if isinstance(predictions, np.ndarray) or "data" in request:
predictions = np.array(predictions)
if len(predictions.shape) > 1:
class_names = get_class_names(user_model, predictions.shape[1])
else:
class_names = []
data = array_to_rest_datadef(
predictions, class_names, request.get("data", {}))
response = {"data": data, "meta": {}}
else:
response = {"binData": predictions, "meta": {}}
tags = get_custom_tags(user_model)
if tags:
response["meta"]["tags"] = tags
metrics = get_custom_metrics(user_model)
if metrics:
response["meta"]["metrics"] = metrics
return jsonify(response)
@app.route("/send-feedback", methods=["GET", "POST"])
def SendFeedback():
feedback = extract_message()
logger.debug("Feedback received: %s", feedback)
if hasattr(user_model, "send_feedback_rest"):
return jsonify(user_model.send_feedback_rest(feedback))
else:
datadef_request = feedback.get("request", {}).get("data", {})
features = rest_datadef_to_array(datadef_request)
datadef_truth = feedback.get("truth", {}).get("data", {})
truth = rest_datadef_to_array(datadef_truth)
reward = feedback.get("reward")
send_feedback(user_model, features,
datadef_request.get("names"), reward, truth)
return jsonify({})
return app
# ----------------------------
# GRPC
# ----------------------------
class SeldonModelGRPC(object):
def __init__(self, user_model):
self.user_model = user_model
def Predict(self, request, context):
if hasattr(self.user_model, "predict_grpc"):
return self.user_model.predict_grpc(request)
else:
features = get_data_from_proto(request)
meta = get_meta_from_proto(request)
datadef = request.data
data_type = request.WhichOneof("data_oneof")
predictions = predict(self.user_model, features, datadef.names, meta=meta)
# Construct meta data
meta = prediction_pb2.Meta()
metaJson = {}
tags = get_custom_tags(self.user_model)
if tags:
metaJson["tags"] = tags
metrics = get_custom_metrics(self.user_model)
if metrics:
metaJson["metrics"] = metrics
json_format.ParseDict(metaJson, meta)
if isinstance(predictions, np.ndarray) or data_type == "data":
predictions = np.array(predictions)
if len(predictions.shape) > 1:
class_names = get_class_names(
self.user_model, predictions.shape[1])
else:
class_names = []
if data_type == "data":
default_data_type = request.data.WhichOneof("data_oneof")
else:
default_data_type = "tensor"
data = array_to_grpc_datadef(
predictions, class_names, default_data_type)
return prediction_pb2.SeldonMessage(data=data, meta=meta)
else:
return prediction_pb2.SeldonMessage(binData=predictions, meta=meta)
def SendFeedback(self, feedback, context):
if hasattr(self.user_model, "send_feedback_grpc"):
self.user_model.send_feedback_grpc(feedback)
else:
datadef_request = feedback.request.data
features = grpc_datadef_to_array(datadef_request)
truth = grpc_datadef_to_array(feedback.truth)
reward = feedback.reward
send_feedback(self.user_model, features,
datadef_request.names, truth, reward)
return prediction_pb2.SeldonMessage()
def get_grpc_server(user_model, debug=False, annotations={}, trace_interceptor=None):
seldon_model = SeldonModelGRPC(user_model)
options = []
if ANNOTATION_GRPC_MAX_MSG_SIZE in annotations:
max_msg = int(annotations[ANNOTATION_GRPC_MAX_MSG_SIZE])
logger.info(
"Setting grpc max message and receive length to %d", max_msg)
options.append(('grpc.max_message_length', max_msg))
options.append(('grpc.max_receive_message_length', max_msg))
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=10), options=options)
if trace_interceptor:
from grpc_opentracing.grpcext import intercept_server
server = intercept_server(server, trace_interceptor)
prediction_pb2_grpc.add_ModelServicer_to_server(seldon_model, server)
return server
# ----------------------------
# Flatbuffers (experimental)
# ----------------------------
class SeldonFlatbuffersServer(TCPServer):
def __init__(self, user_model):
super(SeldonFlatbuffersServer, self).__init__()
self.user_model = user_model
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
data = yield stream.read_bytes(4)
obj = struct.unpack('<i', data)
len_msg = obj[0]
data = yield stream.read_bytes(len_msg)
try:
features, names = SeldonRPCToNumpyArray(data)
predictions = np.array(
predict(self.user_model, features, names))
if len(predictions.shape) > 1:
class_names = get_class_names(
self.user_model, predictions.shape[1])
else:
class_names = []
outData = NumpyArrayToSeldonRPC(predictions, class_names)
yield stream.write(outData)
except StreamClosedError:
logger.exception(
"Stream closed during processing:", address)
break
except Exception:
tb = traceback.format_exc()
logger.exception(
"Caught exception during processing:", address, tb)
outData = CreateErrorMsg(tb)
yield stream.write(outData)
stream.close()
break
except StreamClosedError:
logger.exception(
"Stream closed during data inputstream read:", address)
break
def run_flatbuffers_server(user_model, port, debug=False):
server = SeldonFlatbuffersServer(user_model)
server.listen(port)
logger.info("Tornado Server listening on port %s", port)
tornado.ioloop.IOLoop.current().start()