Skip to content
This repository has been archived by the owner on Nov 15, 2021. It is now read-only.

Commit

Permalink
Merge a247ee6 into ecdf79f
Browse files Browse the repository at this point in the history
  • Loading branch information
localhuman committed Jul 14, 2018
2 parents ecdf79f + a247ee6 commit f231896
Show file tree
Hide file tree
Showing 5 changed files with 131 additions and 81 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,12 @@ Changelog

All notable changes to this project are documented in this file.

[0.7.3] 2018-07-
[0.7.4-dev] in progress
-----------------------
- Update NodeLeader peer monitoring system
- Add ability to configure size of requests for blocks as well as block processing queue size

[0.7.3] 2018-07-12
-----------------------
- Updated package requirements, removed ``pycrypto`` from all dependencies to fix install error(s) `#485 <https://github.com/CityOfZion/neo-python/issues/485>`_
- Adds option to enter arguments for smart contract in an 'interactive' mode, which allows for much better parsing of input, activated by passing the ``--i`` flag when invoking.
Expand Down
47 changes: 44 additions & 3 deletions neo/Network/NodeLeader.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from neo.Network.NeoNode import NeoNode
from neo.Settings import settings
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet import reactor
from twisted.internet import reactor, task


class NeoClientFactory(ReconnectingClientFactory):
Expand Down Expand Up @@ -37,7 +37,6 @@ class NodeLeader:
_MissedBlocks = []

BREQPART = 100
NREQMAX = 500
BREQMAX = 10000

KnownHashes = []
Expand All @@ -49,6 +48,8 @@ class NodeLeader:

ServiceEnabled = False

peer_check_loop = None

@staticmethod
def Instance():
"""
Expand Down Expand Up @@ -83,6 +84,10 @@ def Setup(self):
self.NodeId = random.randint(1294967200, 4294967200)

def Restart(self):
if self.peer_check_loop:
self.peer_check_loop.stop()
self.peer_check_loop = None

if len(self.Peers) == 0:
self.ADDRS = []
self.Start()
Expand All @@ -97,6 +102,33 @@ def Start(self):
reactor.callLater(start_delay, self.SetupConnection, host, port)
start_delay += 1

# check in on peers every 4 mins
self.peer_check_loop = task.LoopingCall(self.PeerCheckLoop)
self.peer_check_loop.start(240, now=False)

def setBlockReqSizeAndMax(self, breqpart=0, breqmax=0):
if breqpart > 0 and breqmax > 0 and breqmax > breqpart:
self.BREQPART = breqpart
self.BREQMAX = breqmax
logger.info("Set each node to request %s blocks per request with a total of %s in queue" % (self.BREQPART, self.BREQMAX))
else:
logger.info("invalid values. Please specify a block request part and max size for each node, like 30 and 1000")

def setBlockReqSizeByName(self, name):
if name.lower() == 'slow':
self.BREQPART = 15
self.BREQMAX = 5000
elif name.lower() == 'normal':
self.BREQPART = 100
self.BREQMAX = 10000
elif name.lower() == 'fast':
self.BREQPART = 250
self.BREQMAX = 15000
else:
logger.info("configuration name %s not found. use 'slow', 'normal', or 'fast'" % name)

logger.info("Set each node to request %s blocks per request with a total of %s in queue" % (self.BREQPART, self.BREQMAX))

def RemoteNodePeerReceived(self, host, port, index):
addr = '%s:%s' % (host, port)
if addr not in self.ADDRS and len(self.Peers) < settings.CONNECTED_PEER_MAX:
Expand All @@ -109,6 +141,10 @@ def SetupConnection(self, host, port):

def Shutdown(self):
"""Disconnect all connected peers."""
if self.peer_check_loop:
self.peer_check_loop.stop()
self.peer_check_loop = None

for p in self.Peers:
p.Disconnect()

Expand Down Expand Up @@ -140,10 +176,15 @@ def RemoveConnectedPeer(self, peer):
self.Peers.remove(peer)
if peer.Address in self.ADDRS:
self.ADDRS.remove(peer.Address)

def PeerCheckLoop(self):
# often times things will get stuck on 1 peer
# with the below, we disconnect when there is only
# 1 peer, then restart when peers has reached 0
if len(self.Peers) == 1:
self.Peers[0].Disconnect()
elif len(self.Peers) == 0:
reactor.callLater(10, self.Restart)
self.Restart()

def ResetBlockRequestsAndCache(self):
"""Reset the block request counter and its cache."""
Expand Down
3 changes: 0 additions & 3 deletions neo/Network/test_node_leader.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,6 @@ def mock_send_msg(node, message):
for peer in peers:
leader.RemoveConnectedPeer(peer)

# and peers should be equal to the seed list
self.assertEqual(len(leader.Peers), len(settings.SEED_LIST))

# test reset
leader.ResetBlockRequestsAndCache()
self.assertEqual(Blockchain.Default()._block_cache, {})
Expand Down
148 changes: 74 additions & 74 deletions neo/SmartContract/tests/test_migrate_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def on_destroyed(sc_event):

output = Compiler.instance().load('%s/MigrateTest1.py' % os.path.dirname(__file__)).default
script = output.write()
print("script : %s " % script)

tx, results, total_ops, engine = TestBuild(script, ['store_data', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')

self.assertEqual(len(results), 1)
Expand All @@ -69,76 +69,76 @@ def on_destroyed(sc_event):
tx, results, total_ops, engine = TestBuild(script, ['get_data', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')

self.assertEqual(len(results), 1)
# mylist = results[0].GetArray()
#
# self.assertEqual([item.GetByteArray() for item in mylist], [bytearray(b'\x01'), bytearray(b'abc'), bytearray(b'\x01\x02\x03')])
#
# tx, results, total_ops, engine = TestBuild(script, ['do_destroy', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')
#
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBoolean(), True)
# self.assertEqual(len(destroyed_items), 1)
#
# destroyed_hash = destroyed_items[0].contract_hash.ToBytes()
# script_table = engine._Table
# self.assertIsNone(script_table.GetScript(destroyed_hash))

# def test_build_contract_and_migrate(self):
#
# items = []
# migrated_items = []
#
# def on_created(sc_event):
# items.append(sc_event)
#
# def on_migrated(sc_event):
# migrated_items.append(sc_event)
#
# events.on(SmartContractEvent.CONTRACT_CREATED, on_created)
# events.on(SmartContractEvent.CONTRACT_MIGRATED, on_migrated)
#
# output = Compiler.instance().load('%s/MigrateTest1.py' % os.path.dirname(__file__)).default
# script = output.write()
# tx, results, total_ops, engine = TestBuild(script, ['store_data', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')
#
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBoolean(), True)
#
# self.assertEqual(len(items), 1)
#
# created_hash = items[0].contract_hash.ToBytes()
# script_table = engine._Table
# self.assertIsNotNone(script_table.GetScript(created_hash))
#
# migrateScript = Compiler.instance().load('%s/MigrateTest2.py' % os.path.dirname(__file__)).default.write()
# tx, results, total_ops, engine = TestBuild(script, ['do_migrate', migrateScript], self.GetWallet1(), '0705', '05')
#
# self.assertEqual(len(results), 1)
# new_contract = results[0].GetInterface()
# self.assertIsInstance(new_contract, ContractState)
#
# self.assertEqual(len(migrated_items), 1)
# self.assertEqual(new_contract, migrated_items[0].event_payload[0])
#
# # now make sure the original contract isnt there
# script_table = engine._Table
# self.assertIsNone(script_table.GetScript(created_hash))
#
# # and make sure the new one is there
# migrated_hash = migrated_items[0].contract_hash
#
# self.assertIsNotNone(script_table.GetScript(migrated_hash.ToBytes()))
#
# # now make sure the new contract has the same storage
#
# tx, results, total_ops, engine = TestBuild(migrateScript, ['i1'], self.GetWallet1(), '07', '05')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetByteArray(), bytearray(b'\x01'))
#
# tx, results, total_ops, engine = TestBuild(migrateScript, ['s2'], self.GetWallet1(), '07', '05')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetByteArray(), bytearray(b'hello world'))
#
# tx, results, total_ops, engine = TestBuild(migrateScript, ['i4'], self.GetWallet1(), '07', '05')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 400000000000)
mylist = results[0].GetArray()

self.assertEqual([item.GetByteArray() for item in mylist], [bytearray(b'\x01'), bytearray(b'abc'), bytearray(b'\x01\x02\x03')])

tx, results, total_ops, engine = TestBuild(script, ['do_destroy', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')

self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
self.assertEqual(len(destroyed_items), 1)

destroyed_hash = destroyed_items[0].contract_hash.ToBytes()
script_table = engine._Table
self.assertIsNone(script_table.GetScript(destroyed_hash))

def test_build_contract_and_migrate(self):

items = []
migrated_items = []

def on_created(sc_event):
items.append(sc_event)

def on_migrated(sc_event):
migrated_items.append(sc_event)

events.on(SmartContractEvent.CONTRACT_CREATED, on_created)
events.on(SmartContractEvent.CONTRACT_MIGRATED, on_migrated)

output = Compiler.instance().load('%s/MigrateTest1.py' % os.path.dirname(__file__)).default
script = output.write()
tx, results, total_ops, engine = TestBuild(script, ['store_data', bytearray(b'\x10')], self.GetWallet1(), '0705', '05')

self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)

self.assertEqual(len(items), 1)

created_hash = items[0].contract_hash.ToBytes()
script_table = engine._Table
self.assertIsNotNone(script_table.GetScript(created_hash))

migrateScript = Compiler.instance().load('%s/MigrateTest2.py' % os.path.dirname(__file__)).default.write()
tx, results, total_ops, engine = TestBuild(script, ['do_migrate', migrateScript], self.GetWallet1(), '0705', '05')

self.assertEqual(len(results), 1)
new_contract = results[0].GetInterface()
self.assertIsInstance(new_contract, ContractState)

self.assertEqual(len(migrated_items), 1)
self.assertEqual(new_contract, migrated_items[0].event_payload.Value)

# now make sure the original contract isnt there
script_table = engine._Table
self.assertIsNone(script_table.GetScript(created_hash))

# and make sure the new one is there
migrated_hash = migrated_items[0].contract_hash

self.assertIsNotNone(script_table.GetScript(migrated_hash.ToBytes()))

# now make sure the new contract has the same storage

tx, results, total_ops, engine = TestBuild(migrateScript, ['i1'], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetByteArray(), bytearray(b'\x01'))

tx, results, total_ops, engine = TestBuild(migrateScript, ['s2'], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetByteArray(), bytearray(b'hello world'))

tx, results, total_ops, engine = TestBuild(migrateScript, ['i4'], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 400000000000)
7 changes: 7 additions & 0 deletions neo/bin/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ class PromptInterface:
'config debug {on/off}',
'config sc-events {on/off}',
'config maxpeers {num_peers}',
'config node-requests {reqsize} {queuesize}',
'config node-requests {slow/normal/fast}',
'build {path/to/file.py} (test {params} {returntype} {needs_storage} {needs_dynamic_invoke} [{test_params} or --i]) --no-parse-addr (parse address strings to script hash bytearray)',
'load_run {path/to/file.avm} (test {params} {returntype} {needs_storage} {needs_dynamic_invoke} [{test_params} or --i]) --no-parse-addr (parse address strings to script hash bytearray)',
'import wif {wif}',
Expand Down Expand Up @@ -914,6 +916,11 @@ def configure(self, args):
else:
print("Cannot configure VM instruction logging. Please specify on|off")

elif what == 'node-requests':
if len(args) == 3:
NodeLeader.Instance().setBlockReqSizeAndMax(int(args[1]), int(args[2]))
elif len(args) == 2:
NodeLeader.Instance().setBlockReqSizeByName(args[1])
else:
print(
"Cannot configure %s try 'config sc-events on|off', 'config debug on|off', 'config sc-debug-notify on|off' or 'config vm-log on|off'" % what)
Expand Down

0 comments on commit f231896

Please sign in to comment.