You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
{{ message }}
This repository has been archived by the owner on May 8, 2024. It is now read-only.
在我本上运行 demo时候, 会在text search example 卡住, ctr+c后的 exception 为:
^Clogging
Traceback (most recent call last):
File "demo.py", line 16, in
print 'logging', log.count()
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in count
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/context.py", line 263, in runJob
for it in self.scheduler.runJob(rdd, func, partitions, allowLocal):
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 292, in runJob
submitStage(finalStage)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 254, in submitStage
submitMissingTasks(stage)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 290, in submitMissingTasks
self.submitTasks(tasks)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 397, in submitTasks
_, reason, result, update = run_task(task, self.nextAttempId())
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 376, in run_task
result = task.run(aid)
File "/home/xzl/hg/spark/dpark/dpark/task.py", line 52, in run
return self.func(self.rdd.iterator(self.split))
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/util.py", line 111, in _
for r in result:
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 205, in getOrCompute
cachedVal = self.cache.get(key)
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 57, in get
locs = self.tracker.getCacheUri(rdd_id, index)
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 195, in getCacheUri
return self.client.call(GetValueMessage('cache:%s-%s' % (rdd_id, index)))
File "/home/xzl/hg/spark/dpark/dpark/tracker.py", line 116, in call
return sock.recv_pyobj()
File "/home/xzl/.virtualenvs/dpark/local/lib/python2.7/site-packages/zmq/sugar/socket.py", line 436, in recv_pyobj
s = self.recv(flags)
File "zmq/backend/cython/socket.pyx", line 674, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6971)
File "zmq/backend/cython/socket.pyx", line 708, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6763)
File "zmq/backend/cython/socket.pyx", line 145, in zmq.backend.cython.socket._recv_copy (zmq/backend/cython/socket.c:1931)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/socket.c:7222)
KeyboardInterrupt
而且得 按两次 ctr+c
在台式机上没啥问题。。。
是zmq的问题么, 看traceback 好像是sock.recv_pyobj()没有收到 trackerServer的消息挂起了。。。
The text was updated successfully, but these errors were encountered:
在我本上运行 demo时候, 会在text search example 卡住, ctr+c后的 exception 为:
^Clogging
Traceback (most recent call last):
File "demo.py", line 16, in
print 'logging', log.count()
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in count
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/context.py", line 263, in runJob
for it in self.scheduler.runJob(rdd, func, partitions, allowLocal):
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 292, in runJob
submitStage(finalStage)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 254, in submitStage
submitMissingTasks(stage)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 290, in submitMissingTasks
self.submitTasks(tasks)
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 397, in submitTasks
_, reason, result, update = run_task(task, self.nextAttempId())
File "/home/xzl/hg/spark/dpark/dpark/schedule.py", line 376, in run_task
result = task.run(aid)
File "/home/xzl/hg/spark/dpark/dpark/task.py", line 52, in run
return self.func(self.rdd.iterator(self.split))
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/rdd.py", line 368, in
return sum(self.ctx.runJob(self, lambda x: sum(1 for i in x)))
File "/home/xzl/hg/spark/dpark/dpark/util.py", line 111, in _
for r in result:
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 205, in getOrCompute
cachedVal = self.cache.get(key)
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 57, in get
locs = self.tracker.getCacheUri(rdd_id, index)
File "/home/xzl/hg/spark/dpark/dpark/cache.py", line 195, in getCacheUri
return self.client.call(GetValueMessage('cache:%s-%s' % (rdd_id, index)))
File "/home/xzl/hg/spark/dpark/dpark/tracker.py", line 116, in call
return sock.recv_pyobj()
File "/home/xzl/.virtualenvs/dpark/local/lib/python2.7/site-packages/zmq/sugar/socket.py", line 436, in recv_pyobj
s = self.recv(flags)
File "zmq/backend/cython/socket.pyx", line 674, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6971)
File "zmq/backend/cython/socket.pyx", line 708, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6763)
File "zmq/backend/cython/socket.pyx", line 145, in zmq.backend.cython.socket._recv_copy (zmq/backend/cython/socket.c:1931)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/socket.c:7222)
KeyboardInterrupt
而且得 按两次 ctr+c
在台式机上没啥问题。。。
是zmq的问题么, 看traceback 好像是sock.recv_pyobj()没有收到 trackerServer的消息挂起了。。。
The text was updated successfully, but these errors were encountered: