/
coordinator.py
1605 lines (1478 loc) · 61 KB
/
coordinator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Coordinator between client and engines"""
import argparse
import atexit
import datetime
import functools
import json
import logging
import os
import pickle
import queue
import random
import re
import signal
import string
import sys
import threading
import traceback
import urllib.parse
import urllib.request
import zipfile
from concurrent import futures
from io import BytesIO
import grpc
from packaging import version
from gscoordinator.io_utils import StdStreamWrapper
# capture system stdout
sys.stdout = StdStreamWrapper(sys.stdout)
sys.stderr = StdStreamWrapper(sys.stderr)
from graphscope.client.utils import GRPCUtils
from graphscope.framework import utils
from graphscope.framework.dag_utils import create_graph
from graphscope.framework.dag_utils import create_loader
from graphscope.framework.errors import AnalyticalEngineInternalError
from graphscope.framework.graph_utils import normalize_parameter_edges
from graphscope.framework.graph_utils import normalize_parameter_vertices
from graphscope.framework.loader import Loader
from graphscope.framework.utils import PipeMerger
from graphscope.framework.utils import get_tempdir
from graphscope.framework.utils import normalize_data_type_str
from graphscope.proto import attr_value_pb2
from graphscope.proto import coordinator_service_pb2_grpc
from graphscope.proto import engine_service_pb2_grpc
from graphscope.proto import error_codes_pb2
from graphscope.proto import graph_def_pb2
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
from gscoordinator.cluster import KubernetesClusterLauncher
from gscoordinator.dag_manager import DAGManager
from gscoordinator.dag_manager import GSEngine
from gscoordinator.dag_manager import split_op_result
from gscoordinator.launcher import LocalLauncher
from gscoordinator.object_manager import GraphMeta
from gscoordinator.object_manager import GremlinResultSet
from gscoordinator.object_manager import InteractiveQueryManager
from gscoordinator.object_manager import LearningInstanceManager
from gscoordinator.object_manager import LibMeta
from gscoordinator.object_manager import ObjectManager
from gscoordinator.utils import ANALYTICAL_ENGINE_JAVA_INIT_CLASS_PATH
from gscoordinator.utils import ANALYTICAL_ENGINE_JAVA_JVM_OPTS
from gscoordinator.utils import GRAPHSCOPE_HOME
from gscoordinator.utils import RESOURCE_DIR_NAME
from gscoordinator.utils import WORKSPACE
from gscoordinator.utils import check_gremlin_server_ready
from gscoordinator.utils import compile_app
from gscoordinator.utils import compile_graph_frame
from gscoordinator.utils import create_single_op_dag
from gscoordinator.utils import dump_string
from gscoordinator.utils import get_app_sha256
from gscoordinator.utils import get_graph_sha256
from gscoordinator.utils import get_lib_path
from gscoordinator.utils import op_pre_process
from gscoordinator.utils import str2bool
from gscoordinator.utils import to_maxgraph_schema
from gscoordinator.version import __version__
# endpoint of prelaunch analytical engine
GS_DEBUG_ENDPOINT = os.environ.get("GS_DEBUG_ENDPOINT", "")
# 2 GB
GS_GRPC_MAX_MESSAGE_LENGTH = 2 * 1024 * 1024 * 1024 - 1
logger = logging.getLogger("graphscope")
def catch_unknown_errors(response_on_error=None, using_yield=False):
"""A catcher that catches all (unknown) exceptions in gRPC handlers to ensure
the client not think the coordinator services is crashed.
"""
def catch_exceptions(handler):
@functools.wraps(handler)
def handler_execution(self, request, context):
try:
if using_yield:
for result in handler(self, request, context):
yield result
else:
yield handler(self, request, context)
except Exception as exc:
error_message = repr(exc)
error_traceback = traceback.format_exc()
context.set_code(error_codes_pb2.COORDINATOR_INTERNAL_ERROR)
context.set_details(
'Error occurs in handler: "%s", with traceback: ' % error_message
+ error_traceback
)
if response_on_error is not None:
yield response_on_error
return handler_execution
return catch_exceptions
class CoordinatorServiceServicer(
coordinator_service_pb2_grpc.CoordinatorServiceServicer
):
"""Provides methods that implement functionality of master service server.
Holding:
1. process: the grape-engine process.
2. session_id: the handle for a particular session to engine
3. vineyard_ipc_socket: returned by grape-engine
4. vineyard_rpc_socket: returned by grape-engine
5. engine_endpoint: the endpoint of grape-engine
6. engine_servicer: grpc connection to grape-engine
"""
def __init__(self, launcher, dangling_timeout_seconds, log_level="INFO"):
self._launcher = launcher
self._request = None
self._object_manager = ObjectManager()
self._grpc_utils = GRPCUtils()
self._dangling_detecting_timer = None
self._config_logging(log_level)
# only one connection is allowed at the same time
# generate session id when a client connection is established
self._session_id = None
# launch engines
if len(GS_DEBUG_ENDPOINT) > 0:
logger.info(
"Coordinator will connect to engine with endpoint: " + GS_DEBUG_ENDPOINT
)
self._launcher._analytical_engine_endpoint = GS_DEBUG_ENDPOINT
else:
if not self._launcher.start():
raise RuntimeError("Coordinator Launching failed.")
self._launcher_type = self._launcher.type()
self._instance_id = self._launcher.instance_id
# string of a list of hosts, comma separated
self._engine_hosts = self._launcher.hosts
self._k8s_namespace = ""
if self._launcher_type == types_pb2.K8S:
self._k8s_namespace = self._launcher.get_namespace()
# analytical engine
self._analytical_engine_stub = self._create_grpc_stub()
self._analytical_engine_config = None
self._analytical_engine_endpoint = None
self._builtin_workspace = os.path.join(WORKSPACE, "builtin")
# udf app workspace should be bound to a specific session when client connect.
self._udf_app_workspace = None
# java class path should contains
# 1) java runtime path
# 2) add resources, the recents added resource will be placed first.
self._java_class_path = ANALYTICAL_ENGINE_JAVA_INIT_CLASS_PATH
logger.info("Java initial class path set to: {}".format(self._java_class_path))
self._jvm_opts = ANALYTICAL_ENGINE_JAVA_JVM_OPTS
# control log fetching
self._streaming_logs = True
self._pipe_merged = PipeMerger(sys.stdout, sys.stderr)
# dangling check
self._dangling_timeout_seconds = dangling_timeout_seconds
if self._dangling_timeout_seconds >= 0:
self._dangling_detecting_timer = threading.Timer(
interval=self._dangling_timeout_seconds,
function=self._cleanup,
args=(
True,
True,
),
)
self._dangling_detecting_timer.start()
# a lock that protects the coordinator
self._lock = threading.Lock()
atexit.register(self._cleanup)
def __del__(self):
self._cleanup()
def _generate_session_id(self):
return "session_" + "".join(
[random.choice(string.ascii_lowercase) for _ in range(8)]
)
def _config_logging(self, log_level):
"""Set log level basic on config.
Args:
log_level (str): Log level of stdout handler
"""
logging.basicConfig(level=logging.CRITICAL)
if log_level:
log_level = log_level.upper()
logger = logging.getLogger("graphscope")
logger.setLevel(log_level)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(log_level)
stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)s][%(module)s:%(lineno)d]: %(message)s"
)
stdout_handler.setFormatter(formatter)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
def ConnectSession(self, request, context):
for result in self.ConnectSessionWrapped(request, context):
return result
def _ConnectSession(self, request, context):
# A session is already connected.
if self._request:
if getattr(request, "reconnect", False):
return message_pb2.ConnectSessionResponse(
session_id=self._session_id,
cluster_type=self._launcher.type(),
num_workers=self._launcher.num_workers,
engine_config=json.dumps(self._analytical_engine_config),
pod_name_list=self._engine_hosts.split(","),
namespace=self._k8s_namespace,
)
# connect failed, more than one connection at the same time.
context.set_code(grpc.StatusCode.ALREADY_EXISTS)
context.set_details(
"Cannot setup more than one connection at the same time."
)
return message_pb2.ConnectSessionResponse()
# Connect to serving coordinator.
self._key_to_op = {}
# dict of op_def_pb2.OpResult
self._op_result_pool = {}
self._request = request
try:
self._analytical_engine_config = self._get_engine_config()
except grpc.RpcError as e:
logger.error(
"Get engine config failed, code: %s, details: %s",
e.code().name,
e.details(),
)
context.set_code(e.code())
context.set_details(e.details())
return message_pb2.ConnectSessionResponse()
# Generate session id
self._session_id = self._generate_session_id()
self._udf_app_workspace = os.path.join(
WORKSPACE, self._instance_id, self._session_id
)
self._resource_dir = os.path.join(
WORKSPACE, self._instance_id, self._session_id, RESOURCE_DIR_NAME
)
self._launcher.set_session_workspace(self._session_id)
# Session connected, fetch logs via gRPC.
self._streaming_logs = True
sys.stdout.drop(False)
# check version compatibility from client
sv = version.parse(__version__)
cv = version.parse(self._request.version)
if sv.major != cv.major or sv.minor != cv.minor:
error_msg = f"Version between client and server is inconsistent: {self._request.version} vs {__version__}"
logger.warning(error_msg)
context.set_code(error_codes_pb2.CONNECTION_ERROR)
context.set_details(error_msg)
return message_pb2.ConnectSessionResponse()
return message_pb2.ConnectSessionResponse(
session_id=self._session_id,
cluster_type=self._launcher.type(),
num_workers=self._launcher.num_workers,
engine_config=json.dumps(self._analytical_engine_config),
pod_name_list=self._engine_hosts.split(","),
namespace=self._k8s_namespace,
)
ConnectSessionWrapped = catch_unknown_errors(message_pb2.ConnectSessionResponse())(
_ConnectSession
)
def HeartBeat(self, request, context):
for result in self.HeartBeatWrapped(request, context):
return result
def _HeartBeat(self, request, context):
if self._request and self._request.dangling_timeout_seconds >= 0:
# Reset dangling detect timer
if self._dangling_detecting_timer:
self._dangling_detecting_timer.cancel()
self._dangling_detecting_timer = threading.Timer(
interval=self._request.dangling_timeout_seconds,
function=self._cleanup,
args=(
self._request.cleanup_instance,
True,
),
)
self._dangling_detecting_timer.start()
# analytical engine
request = message_pb2.HeartBeatRequest()
return self._analytical_engine_stub.HeartBeat(request)
HeartBeatWrapped = catch_unknown_errors(message_pb2.HeartBeatResponse())(_HeartBeat)
def run_on_analytical_engine( # noqa: C901
self,
dag_def: op_def_pb2.DagDef,
dag_bodies,
loader_op_bodies: dict,
):
def _generate_runstep_request(session_id, dag_def, dag_bodies):
runstep_requests = []
# head
runstep_requests.append(
message_pb2.RunStepRequest(
head=message_pb2.RunStepRequestHead(
session_id=session_id, dag_def=dag_def
)
)
)
runstep_requests.extend(dag_bodies)
for item in runstep_requests:
yield item
# preprocess of op before run on analytical engine
for op in dag_def.op:
self._key_to_op[op.key] = op
op_pre_process(
op,
self._op_result_pool,
self._key_to_op,
engine_hosts=self._engine_hosts,
engine_config=self._analytical_engine_config,
engine_java_class_path=self._java_class_path, # may be needed in CREATE_GRAPH or RUN_APP
engine_jvm_opts=self._jvm_opts,
)
# Handle op that depends on loader (data source)
if op.op == types_pb2.CREATE_GRAPH or op.op == types_pb2.ADD_LABELS:
for key_of_parent_op in op.parents:
parent_op = self._key_to_op[key_of_parent_op]
if parent_op.op == types_pb2.DATA_SOURCE:
# handle bodies of loader op
if parent_op.key in loader_op_bodies:
dag_bodies.extend(loader_op_bodies[parent_op.key])
# Compile app or not.
if op.op == types_pb2.BIND_APP:
op, app_sig, app_lib_path = self._maybe_compile_app(op)
# Compile graph or not
# arrow property graph and project graph need to compile
# If engine crashed, we will get a SocketClosed grpc Exception.
# In that case, we should notify client the engine is dead.
if (
(
op.op == types_pb2.CREATE_GRAPH
and op.attr[types_pb2.GRAPH_TYPE].graph_type
== graph_def_pb2.ARROW_PROPERTY
)
or op.op == types_pb2.TRANSFORM_GRAPH
or op.op == types_pb2.PROJECT_TO_SIMPLE
or op.op == types_pb2.ADD_LABELS
):
op = self._maybe_register_graph(op, self._session_id)
# generate runstep requests, and run on analytical engine
requests = _generate_runstep_request(self._session_id, dag_def, dag_bodies)
# response
response_head = None
response_bodies = []
try:
responses = self._analytical_engine_stub.RunStep(requests)
for response in responses:
if response.HasField("head"):
response_head = response
else:
response_bodies.append(response)
except grpc.RpcError as e:
logger.error(
"Engine RunStep failed, code: %s, details: %s",
e.code().name,
e.details(),
)
if e.code() == grpc.StatusCode.INTERNAL:
# TODO: make the stacktrace seperated from normal error messages
# Too verbose.
if len(e.details()) > 3072: # 3k bytes
msg = f"{e.details()[:3072]} ... [truncated]"
else:
msg = e.details()
raise AnalyticalEngineInternalError(msg)
else:
raise
# handle result from response stream
if response_head is None:
raise AnalyticalEngineInternalError(
"Missing head from the response stream."
)
for op_result in response_head.head.results:
# record result in coordinator, which doesn't contains large data
self._op_result_pool[op_result.key] = op_result
# get the op corresponding to the result
op = self._key_to_op[op_result.key]
# register graph and dump graph schema
if op.op in (
types_pb2.CREATE_GRAPH,
types_pb2.PROJECT_GRAPH,
types_pb2.ADD_LABELS,
types_pb2.ADD_COLUMN,
):
schema_path = os.path.join(
get_tempdir(), op_result.graph_def.key + ".json"
)
vy_info = graph_def_pb2.VineyardInfoPb()
op_result.graph_def.extension.Unpack(vy_info)
self._object_manager.put(
op_result.graph_def.key,
GraphMeta(
op_result.graph_def.key,
vy_info.vineyard_id,
op_result.graph_def,
schema_path,
),
)
if op_result.graph_def.graph_type == graph_def_pb2.ARROW_PROPERTY:
dump_string(
to_maxgraph_schema(vy_info.property_schema_json),
schema_path,
)
vy_info.schema_path = schema_path
op_result.graph_def.extension.Pack(vy_info)
# register app
elif op.op == types_pb2.BIND_APP:
self._object_manager.put(
app_sig,
LibMeta(op_result.result.decode("utf-8"), "app", app_lib_path),
)
# unregister graph
elif op.op == types_pb2.UNLOAD_GRAPH:
self._object_manager.pop(op.attr[types_pb2.GRAPH_NAME].s.decode())
# unregister app
elif op.op == types_pb2.UNLOAD_APP:
self._object_manager.pop(op.attr[types_pb2.APP_NAME].s.decode())
return response_head, response_bodies
def run_on_interactive_engine(self, dag_def: op_def_pb2.DagDef):
response_head = message_pb2.RunStepResponse(
head=message_pb2.RunStepResponseHead()
)
response_bodies = []
for op in dag_def.op:
self._key_to_op[op.key] = op
op_pre_process(
op,
self._op_result_pool,
self._key_to_op,
engine_hosts=self._engine_hosts,
engine_config=self._analytical_engine_config,
)
if op.op == types_pb2.CREATE_INTERACTIVE_QUERY:
op_result = self._create_interactive_instance(op)
elif op.op == types_pb2.GREMLIN_QUERY:
op_result = self._execute_gremlin_query(op)
elif op.op == types_pb2.FETCH_GREMLIN_RESULT:
op_result = self._fetch_gremlin_result(op)
elif op.op == types_pb2.CLOSE_INTERACTIVE_QUERY:
op_result = self._close_interactive_instance(op)
elif op.op == types_pb2.SUBGRAPH:
op_result = self._gremlin_to_subgraph(op)
else:
raise RuntimeError("Unsupport op type: " + str(op.op))
splited_result = split_op_result(op_result)
response_head.head.results.append(op_result)
for i, chunk in enumerate(splited_result):
has_next = True
if i + 1 == len(splited_result):
has_next = False
response_bodies.append(
message_pb2.RunStepResponse(
body=message_pb2.RunStepResponseBody(
chunk=chunk, has_next=has_next
)
)
)
# record op result
self._op_result_pool[op.key] = op_result
return response_head, response_bodies
def run_on_learning_engine(self, dag_def: op_def_pb2.DagDef):
response_head = message_pb2.RunStepResponse(
head=message_pb2.RunStepResponseHead()
)
response_bodies = []
for op in dag_def.op:
self._key_to_op[op.key] = op
op_pre_process(
op,
self._op_result_pool,
self._key_to_op,
engine_hosts=self._engine_hosts,
engine_config=self._analytical_engine_config,
)
if op.op == types_pb2.CREATE_LEARNING_INSTANCE:
op_result = self._create_learning_instance(op)
elif op.op == types_pb2.CLOSE_LEARNING_INSTANCE:
op_result = self._close_learning_instance(op)
else:
raise RuntimeError("Unsupport op type: " + str(op.op))
response_head.head.results.append(op_result)
self._op_result_pool[op.key] = op_result
return response_head, response_bodies
def run_on_coordinator(
self,
dag_def: op_def_pb2.DagDef,
dag_bodies,
loader_op_bodies: dict,
):
response_head = message_pb2.RunStepResponse(
head=message_pb2.RunStepResponseHead()
)
response_bodies = []
for op in dag_def.op:
self._key_to_op[op.key] = op
op_pre_process(
op,
self._op_result_pool,
self._key_to_op,
engine_hosts=self._engine_hosts,
engine_config=self._analytical_engine_config,
)
if op.op == types_pb2.DATA_SOURCE:
op_result = self._process_data_source(op, dag_bodies, loader_op_bodies)
elif op.op == types_pb2.DATA_SINK:
op_result = self._process_data_sink(op)
else:
raise RuntimeError("Unsupport op type: " + str(op.op))
response_head.head.results.append(op_result)
self._op_result_pool[op.key] = op_result
return response_head, response_bodies
def RunStep(self, request_iterator, context):
with self._lock:
for response in self.RunStepWrapped(request_iterator, context):
yield response
def _RunStep(self, request_iterator, context):
# split dag
dag_manager = DAGManager(request_iterator)
loader_op_bodies = {}
# response list for stream
responses = []
# head
responses.append(
message_pb2.RunStepResponse(head=message_pb2.RunStepResponseHead())
)
while not dag_manager.empty():
run_dag_on, dag, dag_bodies = dag_manager.next_dag()
error_code = error_codes_pb2.COORDINATOR_INTERNAL_ERROR
head = None
bodies = None
try:
# run on analytical engine
if run_dag_on == GSEngine.analytical_engine:
# need dag_bodies to load graph from pandas/numpy
error_code = error_codes_pb2.ANALYTICAL_ENGINE_INTERNAL_ERROR
head, bodies = self.run_on_analytical_engine(
dag, dag_bodies, loader_op_bodies
)
# run on interactive engine
elif run_dag_on == GSEngine.interactive_engine:
error_code = error_codes_pb2.INTERACTIVE_ENGINE_INTERNAL_ERROR
head, bodies = self.run_on_interactive_engine(dag)
# run on learning engine
elif run_dag_on == GSEngine.learning_engine:
error_code = error_codes_pb2.LEARNING_ENGINE_INTERNAL_ERROR
head, bodies = self.run_on_learning_engine(dag)
# run on coordinator
elif run_dag_on == GSEngine.coordinator:
error_code = error_codes_pb2.COORDINATOR_INTERNAL_ERROR
head, bodies = self.run_on_coordinator(
dag, dag_bodies, loader_op_bodies
)
# merge the responses
responses[0].head.results.extend(head.head.results)
responses.extend(bodies)
except grpc.RpcError as exc:
# Not raised by graphscope, maybe socket closed, etc
context.set_code(exc.code())
context.set_details(exc.details())
for response in responses:
yield response
except Exception as exc:
response_head = responses[0]
response_head.head.code = error_code
response_head.head.error_msg = (
"Error occurred during preprocessing, The traceback is: {0}".format(
traceback.format_exc()
)
)
response_head.head.full_exception = pickle.dumps(exc)
for response in responses:
yield response
for response in responses:
yield response
RunStepWrapped = catch_unknown_errors(
message_pb2.RunStepResponse(head=message_pb2.RunStepResponseHead()), True
)(_RunStep)
def _maybe_compile_app(self, op):
app_sig = get_app_sha256(op.attr, self._java_class_path)
# try to get compiled file from GRAPHSCOPE_HOME/precompiled
space = os.path.join(GRAPHSCOPE_HOME, "precompiled", "builtin")
app_lib_path = get_lib_path(os.path.join(space, app_sig), app_sig)
if not os.path.isfile(app_lib_path):
space = self._builtin_workspace
if (types_pb2.GAR in op.attr) or (
op.attr[types_pb2.APP_ALGO].s.decode("utf-8").startswith("giraph:")
):
space = self._udf_app_workspace
# try to get compiled file from workspace
app_lib_path = get_lib_path(os.path.join(space, app_sig), app_sig)
if not os.path.isfile(app_lib_path):
# compile and distribute
compiled_path = self._compile_lib_and_distribute(
compile_app, app_sig, op
)
if app_lib_path != compiled_path:
raise RuntimeError(
f"Computed application library path not equal to compiled path, {app_lib_path} versus {compiled_path}"
)
op.attr[types_pb2.APP_LIBRARY_PATH].CopyFrom(
attr_value_pb2.AttrValue(s=app_lib_path.encode("utf-8"))
)
return op, app_sig, app_lib_path
def _maybe_register_graph(self, op, session_id):
graph_sig = get_graph_sha256(op.attr)
# try to get compiled file from GRAPHSCOPE_HOME/precompiled
space = os.path.join(GRAPHSCOPE_HOME, "precompiled", "builtin")
graph_lib_path = get_lib_path(os.path.join(space, graph_sig), graph_sig)
if not os.path.isfile(graph_lib_path):
space = self._builtin_workspace
# try to get compiled file from workspace
graph_lib_path = get_lib_path(os.path.join(space, graph_sig), graph_sig)
if not os.path.isfile(graph_lib_path):
# compile and distribute
compiled_path = self._compile_lib_and_distribute(
compile_graph_frame, graph_sig, op
)
if graph_lib_path != compiled_path:
raise RuntimeError(
f"Computed graph library path not equal to compiled path, {graph_lib_path} versus {compiled_path}"
)
if graph_sig not in self._object_manager:
# register graph
op_def = op_def_pb2.OpDef(op=types_pb2.REGISTER_GRAPH_TYPE)
op_def.attr[types_pb2.GRAPH_LIBRARY_PATH].CopyFrom(
attr_value_pb2.AttrValue(s=graph_lib_path.encode("utf-8"))
)
op_def.attr[types_pb2.TYPE_SIGNATURE].CopyFrom(
attr_value_pb2.AttrValue(s=graph_sig.encode("utf-8"))
)
op_def.attr[types_pb2.GRAPH_TYPE].CopyFrom(
attr_value_pb2.AttrValue(
graph_type=op.attr[types_pb2.GRAPH_TYPE].graph_type
)
)
dag_def = op_def_pb2.DagDef()
dag_def.op.extend([op_def])
try:
response_head, _ = self.run_on_analytical_engine(dag_def, [], {})
except grpc.RpcError as e:
logger.error(
"Register graph failed, code: %s, details: %s",
e.code().name,
e.details(),
)
if e.code() == grpc.StatusCode.INTERNAL:
raise AnalyticalEngineInternalError(e.details())
else:
raise
self._object_manager.put(
graph_sig,
LibMeta(
response_head.head.results[0].result,
"graph_frame",
graph_lib_path,
),
)
op.attr[types_pb2.TYPE_SIGNATURE].CopyFrom(
attr_value_pb2.AttrValue(s=graph_sig.encode("utf-8"))
)
return op
def FetchLogs(self, request, context):
while self._streaming_logs:
try:
info_message, error_message = self._pipe_merged.poll(timeout=2)
except queue.Empty:
info_message, error_message = "", ""
except Exception as e:
info_message, error_message = "WARNING: failed to read log: %s" % e, ""
if info_message or error_message:
if self._streaming_logs:
yield message_pb2.FetchLogsResponse(
info_message=info_message, error_message=error_message
)
def AddLib(self, request, context):
for result in self.AddLibWrapped(request, context):
return result
def _AddLib(self, request, context):
if request.session_id != self._session_id:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(
f"Session handle not matched, {request.session_id} versus {self._session_id}"
)
os.makedirs(self._resource_dir, exist_ok=True)
gar = request.gar
fp = BytesIO(gar)
filename = None
with zipfile.ZipFile(fp, "r") as zip_ref:
zip_ref.extractall(self._resource_dir)
logger.info(
"Coordinator recieved add lib request contains file {}".format(
zip_ref.namelist()
)
)
if len(zip_ref.namelist()) != 1:
raise RuntimeError("Expect only one resource in one gar")
filename = zip_ref.namelist()[0]
full_filename = os.path.join(self._resource_dir, filename)
self._launcher.distribute_file(full_filename)
logger.info("Successfully distributed {}".format(full_filename))
if full_filename.endswith(".jar"):
logger.info("adding lib to java class path since it ends with .jar")
self._java_class_path = full_filename + ":" + self._java_class_path
logger.info("current java class path: {}".format(self._java_class_path))
return message_pb2.AddLibResponse()
AddLibWrapped = catch_unknown_errors(message_pb2.AddLibResponse())(_AddLib)
def CloseSession(self, request, context):
for result in self.CloseSessionWrapped(request, context):
return result
def _CloseSession(self, request, context):
"""
Disconnect session, note that it doesn't clean up any resources.
"""
if request.session_id != self._session_id:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(
f"Session handle not matched, {request.session_id} versus {self._session_id}"
)
self._cleanup(
cleanup_instance=self._request.cleanup_instance, is_dangling=False
)
self._request = None
# Session closed, stop streaming logs
sys.stdout.drop(True)
self._streaming_logs = False
return message_pb2.CloseSessionResponse()
CloseSessionWrapped = catch_unknown_errors(message_pb2.CloseSessionResponse())(
_CloseSession
)
def _create_interactive_instance(self, op: op_def_pb2.OpDef):
def _match_frontend_endpoint(pattern, lines):
for line in lines.split("\n"):
rlt = re.findall(pattern, line)
if rlt:
return rlt[0].strip()
return ""
# vineyard object id of graph
object_id = op.attr[types_pb2.VINEYARD_ID].i
# maxgraph endpoint pattern
MAXGRAPH_FRONTEND_PATTERN = re.compile("(?<=MAXGRAPH_FRONTEND_ENDPOINT:).*$")
MAXGRAPH_FRONTEND_EXTERNAL_PATTERN = re.compile(
"(?<=MAXGRAPH_FRONTEND_EXTERNAL_ENDPOINT:).*$"
)
# maxgraph endpoint
maxgraph_endpoint = None
# maxgraph external endpoint, for client and gremlin function test
maxgraph_external_endpoint = None
# create instance
proc = self._launcher.create_interactive_instance(op.attr)
try:
# 60 seconds is enough, see also GH#1024; try 120
# already add errs to outs
outs, errs = proc.communicate(timeout=120)
return_code = proc.poll()
if return_code == 0:
# match maxgraph endpoint and check for ready
maxgraph_endpoint = _match_frontend_endpoint(
MAXGRAPH_FRONTEND_PATTERN, outs
)
if check_gremlin_server_ready(maxgraph_endpoint):
logger.info(
"build maxgraph frontend %s for graph %ld",
maxgraph_endpoint,
object_id,
)
maxgraph_external_endpoint = _match_frontend_endpoint(
MAXGRAPH_FRONTEND_EXTERNAL_PATTERN, outs
)
self._object_manager.put(
op.key,
InteractiveQueryManager(op.key, maxgraph_endpoint, object_id),
)
return op_def_pb2.OpResult(
code=error_codes_pb2.OK,
key=op.key,
result=maxgraph_external_endpoint.encode("utf-8")
if maxgraph_external_endpoint
else maxgraph_endpoint.encode("utf-8"),
extra_info=str(object_id).encode("utf-8"),
)
raise RuntimeError("Error code: {0}, message {1}".format(return_code, outs))
except Exception as e:
proc.kill()
self._launcher.close_interactive_instance(object_id)
raise RuntimeError("Create interactive instance failed.") from e
def _execute_gremlin_query(self, op: op_def_pb2.OpDef):
message = op.attr[types_pb2.GIE_GREMLIN_QUERY_MESSAGE].s.decode()
request_options = None
if types_pb2.GIE_GREMLIN_REQUEST_OPTIONS in op.attr:
request_options = json.loads(
op.attr[types_pb2.GIE_GREMLIN_REQUEST_OPTIONS].s.decode()
)
key_of_parent_op = op.parents[0]
gremlin_client = self._object_manager.get(key_of_parent_op)
try:
rlt = gremlin_client.submit(message, request_options=request_options)
except Exception as e:
raise RuntimeError("Gremlin query failed.") from e
self._object_manager.put(op.key, GremlinResultSet(op.key, rlt))
return op_def_pb2.OpResult(code=error_codes_pb2.OK, key=op.key)
def _fetch_gremlin_result(self, op: op_def_pb2.OpDef):
fetch_result_type = op.attr[types_pb2.GIE_GREMLIN_FETCH_RESULT_TYPE].s.decode()
key_of_parent_op = op.parents[0]
result_set = self._object_manager.get(key_of_parent_op).result_set
try:
if fetch_result_type == "one":
rlt = result_set.one()
elif fetch_result_type == "all":
rlt = result_set.all().result()
except Exception as e:
raise RuntimeError("Fetch gremlin result failed") from e
return op_def_pb2.OpResult(
code=error_codes_pb2.OK,
key=op.key,
has_large_result=True,
result=pickle.dumps(rlt),
)
def _process_data_sink(self, op: op_def_pb2.OpDef):
import vineyard
import vineyard.io
storage_options = json.loads(op.attr[types_pb2.STORAGE_OPTIONS].s.decode())
fd = op.attr[types_pb2.FD].s.decode()
df = op.attr[types_pb2.VINEYARD_ID].s.decode()
engine_config = self._analytical_engine_config
vineyard_endpoint = engine_config["vineyard_rpc_endpoint"]
vineyard_ipc_socket = engine_config["vineyard_socket"]
deployment, hosts = self._launcher.get_vineyard_stream_info()
dfstream = vineyard.io.open(
"vineyard://" + str(df),
mode="r",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
deployment=deployment,
hosts=hosts,
)
vineyard.io.open(
fd,
dfstream,
mode="w",
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options=storage_options,
deployment=deployment,
hosts=hosts,
)
return op_def_pb2.OpResult(code=error_codes_pb2.OK, key=op.key)
def _process_data_source(
self, op: op_def_pb2.OpDef, dag_bodies, loader_op_bodies: dict
):
def _spawn_vineyard_io_stream(source, storage_options, read_options):
import vineyard
import vineyard.io
engine_config = self._analytical_engine_config
vineyard_endpoint = engine_config["vineyard_rpc_endpoint"]
vineyard_ipc_socket = engine_config["vineyard_socket"]
deployment, hosts = self._launcher.get_vineyard_stream_info()
num_workers = self._launcher.num_workers
stream_id = repr(
vineyard.io.open(
source,
mode="r",
vineyard_endpoint=vineyard_endpoint,
vineyard_ipc_socket=vineyard_ipc_socket,
hosts=hosts,
num_workers=num_workers,
deployment=deployment,
read_options=read_options,
storage_options=storage_options,
)
)
return "vineyard", stream_id
def _process_loader_func(loader):
# loader is type of attr_value_pb2.Chunk
protocol = loader.attr[types_pb2.PROTOCOL].s.decode()
if protocol in ("hdfs", "hive", "oss", "s3"):
source = loader.attr[types_pb2.SOURCE].s.decode()
storage_options = json.loads(
loader.attr[types_pb2.STORAGE_OPTIONS].s.decode()
)
read_options = json.loads(
loader.attr[types_pb2.READ_OPTIONS].s.decode()
)
new_protocol, new_source = _spawn_vineyard_io_stream(
source, storage_options, read_options
)
loader.attr[types_pb2.PROTOCOL].CopyFrom(utils.s_to_attr(new_protocol))