/
topology_graph.py
417 lines (367 loc) · 18.9 KB
/
topology_graph.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
import asyncio
import copy
import re
from collections import defaultdict
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import grpc.aio
from jina import __default_endpoint__
from jina.excepts import InternalNetworkError
from jina.serve.networking import GrpcConnectionPool
from jina.serve.runtimes.helper import _parse_specific_params
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
from jina.types.request.data import DataRequest
class TopologyGraph:
"""
:class TopologyGraph is a class that describes a computational graph of nodes, where each node represents
a Deployment that needs to be sent requests in the order respecting the path traversal.
:param graph_description: A dictionary describing the topology of the Deployments. 2 special nodes are expected, the name `start-gateway` and `end-gateway` to
determine the nodes that receive the very first request and the ones whose response needs to be sent back to the client. All the nodes with no outgoing nodes
will be considered to be floating, and they will be "flagged" so that the user can ignore their tasks and not await them.
:param conditions: A dictionary describing which Executors have special conditions to be fullfilled by the `Documents` to be sent to them.
:param reduce: Reduce requests arriving from multiple needed predecessors, True by default
"""
class _ReqReplyNode:
def __init__(
self,
name: str,
number_of_parts: int = 1,
floating: bool = False,
filter_condition: dict = None,
reduce: bool = True,
timeout_send: Optional[float] = None,
retries: Optional[int] = -1,
):
self.name = name
self.outgoing_nodes = []
self.number_of_parts = number_of_parts
self.floating = floating
self.parts_to_send = []
self.start_time = None
self.end_time = None
self.status = None
self._filter_condition = filter_condition
self._reduce = reduce
self._timeout_send = timeout_send
self._retries = retries
self.result_in_params_returned = None
@property
def leaf(self):
return len(self.outgoing_nodes) == 0
def _update_requests_with_filter_condition(self, need_copy):
for i in range(len(self.parts_to_send)):
req = self.parts_to_send[i] if not need_copy else copy.deepcopy(self.parts_to_send[i])
filtered_docs = req.docs.find(self._filter_condition)
req.data.docs = filtered_docs
self.parts_to_send[i] = req
def _update_request_by_params(self, deployment_name: str, request_input_parameters: Dict):
specific_parameters = _parse_specific_params(
request_input_parameters, deployment_name
)
for i in range(len(self.parts_to_send)):
self.parts_to_send[i].parameters = specific_parameters
def _handle_internalnetworkerror(self, err):
err_code = err.code()
if err_code == grpc.StatusCode.UNAVAILABLE:
err._details = (
err.details()
+ f' |Gateway: Communication error with deployment {self.name} at address(es) {err.dest_addr}. '
f'Head or worker(s) may be down.'
)
raise err
elif err_code == grpc.StatusCode.DEADLINE_EXCEEDED:
err._details = (
err.details()
+ f'|Gateway: Connection with deployment {self.name} at address(es) {err.dest_addr} could be established, but timed out.'
f' You can increase the allowed time by setting `timeout_send` in your Flow YAML `with` block or Flow `__init__()` method.'
)
raise err
else:
raise
def get_endpoints(self, connection_pool: GrpcConnectionPool) -> asyncio.Task:
return connection_pool.send_discover_endpoint(
self.name, retries=self._retries
)
async def _wait_previous_and_send(
self,
request: Optional[DataRequest],
previous_task: Optional[asyncio.Task],
connection_pool: GrpcConnectionPool,
endpoint: Optional[str],
executor_endpoint_mapping: Optional[Dict] = None,
target_executor_pattern: Optional[str] = None,
request_input_parameters: Dict = {},
copy_request_at_send: bool = False
):
# Check my condition and send request with the condition
metadata = {}
if previous_task is not None:
result = await previous_task
request, metadata = result[0], result[1]
if metadata and 'is-error' in metadata:
return request, metadata
elif request is not None:
request.parameters = _parse_specific_params(
request.parameters, self.name
)
if copy_request_at_send:
self.parts_to_send.append(copy.deepcopy(request))
else:
self.parts_to_send.append(request)
# this is a specific needs
if len(self.parts_to_send) == self.number_of_parts:
self.start_time = datetime.utcnow()
self._update_request_by_params(self.name, request_input_parameters)
if self._filter_condition is not None:
self._update_requests_with_filter_condition(need_copy=not copy_request_at_send)
if self._reduce and len(self.parts_to_send) > 1:
self.parts_to_send = [
DataRequestHandler.reduce_requests(self.parts_to_send)
]
# avoid sending to executor which does not bind to this endpoint
if endpoint is not None and executor_endpoint_mapping is not None:
if (
endpoint not in executor_endpoint_mapping[self.name]
and __default_endpoint__
not in executor_endpoint_mapping[self.name]
):
return request, metadata
if target_executor_pattern is not None and not re.match(
target_executor_pattern, self.name
):
return request, metadata
# otherwise, send to executor and get response
try:
resp, metadata = await connection_pool.send_requests_once(
requests=self.parts_to_send,
deployment=self.name,
head=True,
endpoint=endpoint,
timeout=self._timeout_send,
retries=self._retries,
)
if DataRequestHandler._KEY_RESULT in resp.parameters:
# Accumulate results from each Node and then add them to the original
self.result_in_params_returned = resp.parameters[DataRequestHandler._KEY_RESULT]
request.parameters = request_input_parameters
resp.parameters = request_input_parameters
self.parts_to_send.clear()
except InternalNetworkError as err:
self._handle_internalnetworkerror(err)
self.end_time = datetime.utcnow()
if metadata and 'is-error' in metadata:
self.status = resp.header.status
return resp, metadata
return None, {}
def get_leaf_tasks(
self,
connection_pool: GrpcConnectionPool,
request_to_send: Optional[DataRequest],
previous_task: Optional[asyncio.Task],
endpoint: Optional[str] = None,
executor_endpoint_mapping: Optional[Dict] = None,
target_executor_pattern: Optional[str] = None,
request_input_parameters: Dict = {},
request_input_has_specific_params: bool = False,
copy_request_at_send: bool = False
) -> List[Tuple[bool, asyncio.Task]]:
"""
Gets all the tasks corresponding from all the subgraphs born from this node
:param connection_pool: The connection_pool need to actually send the requests
:param request_to_send: Optional request to be sent when the node is an origin of a graph
:param previous_task: Optional task coming from the predecessor of the Node
:param endpoint: Optional string defining the endpoint of this request
:param executor_endpoint_mapping: Optional map that maps the name of a Deployment with the endpoints that it binds to so that they can be skipped if needed
:param target_executor_pattern: Optional regex pattern for the target executor to decide whether or not the Executor should receive the request
:param request_input_parameters: The parameters coming from the Request as they arrive to the gateway
:param request_input_has_specific_params: Parameter added for optimization. If this is False, there is no need to copy at all the request
:param copy_request_at_send: Copy the request before actually calling the `ConnectionPool` sending
.. note:
deployment1 -> outgoing_nodes: deployment2
deployment2 -> outgoing_nodes: deployment4
deployment3 -> outgoing_nodes: deployment4
deployment4 -> outgoing_nodes: deployment6
deployment5 -> outgoing_nodes: deployment6
deployment6 -> outgoing_nodes: []
|-> deployment1 -> deployment2 -->
| | -> deployment4 --->
|-> deployment3 ----------> | -> deployment6
|-> deployment5 ------------------------>
Let's imagine a graph from this. Node corresponding to Deployment6 will receive 2 calls from deployment4 and deployment5.
The task returned by `deployment6` will backpropagated to the caller of deployment1.get_leaf_tasks, deployment3.get_leaf_tasks and deployment5.get_leaf_tasks.
When the caller of these methods await them, they will fire the logic of sending requests and responses from and to every deployment
:return: Return a list of tuples, where tasks corresponding to the leafs of all the subgraphs born from this node are in each tuple.
These tasks will be based on awaiting for the task from previous_node and sending a request to the corresponding node. The other member of the pair
is a flag indicating if the task is to be awaited by the gateway or not.
"""
wait_previous_and_send_task = asyncio.create_task(
self._wait_previous_and_send(
request=request_to_send,
previous_task=previous_task,
connection_pool=connection_pool,
endpoint=endpoint,
executor_endpoint_mapping=executor_endpoint_mapping,
target_executor_pattern=target_executor_pattern,
request_input_parameters=request_input_parameters,
copy_request_at_send=copy_request_at_send
)
)
if self.leaf: # I am like a leaf
return [
(not self.floating, wait_previous_and_send_task)
] # I am the last in the chain
hanging_tasks_tuples = []
num_outgoing_nodes = len(self.outgoing_nodes)
for outgoing_node in self.outgoing_nodes:
t = outgoing_node.get_leaf_tasks(
connection_pool=connection_pool,
request_to_send=None,
previous_task=wait_previous_and_send_task,
endpoint=endpoint,
executor_endpoint_mapping=executor_endpoint_mapping,
target_executor_pattern=target_executor_pattern,
request_input_parameters=request_input_parameters,
request_input_has_specific_params=request_input_has_specific_params,
copy_request_at_send=num_outgoing_nodes > 1 and request_input_has_specific_params
)
# We are interested in the last one, that will be the task that awaits all the previous
hanging_tasks_tuples.extend(t)
return hanging_tasks_tuples
def add_route(self, request: 'DataRequest'):
"""
Add routes to the DataRequest based on the state of request processing
:param request: the request to add the routes to
:return: modified request with added routes
"""
def _find_route(request):
for r in request.routes:
if r.executor == self.name:
return r
return None
r = _find_route(request)
if r is None and self.start_time:
r = request.routes.add()
r.executor = self.name
r.start_time.FromDatetime(self.start_time)
if self.end_time:
r.end_time.FromDatetime(self.end_time)
if self.status:
r.status.CopyFrom(self.status)
for outgoing_node in self.outgoing_nodes:
request = outgoing_node.add_route(request=request)
return request
class _EndGatewayNode(_ReqReplyNode):
"""
Dummy node to be added before the gateway. This is to solve a problem we had when implementing `floating Executors`.
If we do not add this at the end, this structure does not work:
GATEWAY -> EXEC1 -> FLOATING
-> GATEWAY
"""
def get_endpoints(self, *args, **kwargs) -> asyncio.Task:
async def task_wrapper():
from jina.serve.networking import default_endpoints_proto
return default_endpoints_proto, None
return asyncio.create_task(task_wrapper())
def get_leaf_tasks(
self, previous_task: Optional[asyncio.Task], *args, **kwargs
) -> List[Tuple[bool, asyncio.Task]]:
return [(True, previous_task)]
def __init__(
self,
graph_representation: Dict,
graph_conditions: Dict = {},
deployments_disable_reduce: List[str] = [],
timeout_send: Optional[float] = 1.0,
retries: Optional[int] = -1,
*args,
**kwargs,
):
num_parts_per_node = defaultdict(int)
if 'start-gateway' in graph_representation:
origin_node_names = graph_representation['start-gateway']
else:
origin_node_names = set()
floating_deployment_set = set()
node_set = set()
for node_name, outgoing_node_names in graph_representation.items():
if node_name not in {'start-gateway', 'end-gateway'}:
node_set.add(node_name)
if len(outgoing_node_names) == 0:
floating_deployment_set.add(node_name)
for out_node_name in outgoing_node_names:
if out_node_name not in {'start-gateway', 'end-gateway'}:
node_set.add(out_node_name)
num_parts_per_node[out_node_name] += 1
nodes = {}
for node_name in node_set:
condition = graph_conditions.get(node_name, None)
nodes[node_name] = self._ReqReplyNode(
name=node_name,
number_of_parts=num_parts_per_node[node_name]
if num_parts_per_node[node_name] > 0
else 1,
floating=node_name in floating_deployment_set,
filter_condition=condition,
reduce=node_name not in deployments_disable_reduce,
timeout_send=timeout_send,
retries=retries,
)
for node_name, outgoing_node_names in graph_representation.items():
if node_name not in ['start-gateway', 'end-gateway']:
for out_node_name in outgoing_node_names:
if out_node_name not in ['start-gateway', 'end-gateway']:
nodes[node_name].outgoing_nodes.append(nodes[out_node_name])
if out_node_name == 'end-gateway':
nodes[node_name].outgoing_nodes.append(
self._EndGatewayNode(name='__end_gateway__', floating=False)
)
self._origin_nodes = [nodes[node_name] for node_name in origin_node_names]
self.has_filter_conditions = bool(graph_conditions)
def add_routes(self, request: 'DataRequest'):
"""
Add routes to the DataRequest based on the state of request processing
:param request: the request to add the routes to
:return: modified request with added routes
"""
for node in self._origin_nodes:
request = node.add_route(request=request)
return request
@property
def origin_nodes(self):
"""
The list of origin nodes, the one that depend only on the gateway, so all the subgraphs will be born from them and they will
send to their deployments the request as received by the client.
:return: A list of nodes
"""
return self._origin_nodes
@property
def all_nodes(self):
"""
The set of all the nodes inside this Graph
:return: A list of nodes
"""
def _get_all_nodes(node, accum, accum_names):
if node.name not in accum_names:
accum.append(node)
accum_names.append(node.name)
for n in node.outgoing_nodes:
_get_all_nodes(n, accum, accum_names)
return accum, accum_names
nodes = []
node_names = []
for origin_node in self.origin_nodes:
subtree_nodes, subtree_node_names = _get_all_nodes(origin_node, [], [])
for st_node, st_node_name in zip(subtree_nodes, subtree_node_names):
if st_node_name not in node_names:
nodes.append(st_node)
node_names.append(st_node_name)
return nodes
def collect_all_results(self):
"""Collect all the results from every node into a single dictionary so that gateway can collect them
:return: A dictionary of the results
"""
res = {}
for node in self.all_nodes:
if node.result_in_params_returned:
res.update(node.result_in_params_returned)
return res