Skip to content

Commit

Permalink
output changes
Browse files Browse the repository at this point in the history
== 1 conditions removed (take True/False)
save peers only when updated, backup though temporary file
  • Loading branch information
hclivess committed May 4, 2019
1 parent 5404059 commit bff8064
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 25 deletions.
7 changes: 4 additions & 3 deletions digest.py
Expand Up @@ -116,14 +116,14 @@ def check_signature(block):
tx_presence_check = db_handler.h.fetchone()
if tx_presence_check:
# print(node.last_block)
raise ValueError(f"That transaction {entry_signature[:10]} is already in our ram ledger, block_height {tx_presence_check[0]}")
raise ValueError(f"That transaction {entry_signature[:10]} is already in our ledger, block_height {tx_presence_check[0]}")

db_handler.execute_param(db_handler.c, "SELECT block_height FROM transactions WHERE signature = ?;",
(entry_signature,))
tx_presence_check = db_handler.c.fetchone()
if tx_presence_check:
# print(node.last_block)
raise ValueError(f"That transaction {entry_signature[:10]} is already in our ledger, block_height {tx_presence_check[0]}")
raise ValueError(f"That transaction {entry_signature[:10]} is already in our RAM ledger, block_height {tx_presence_check[0]}")
else:
raise ValueError(f"Empty signature from {peer_ip}")

Expand Down Expand Up @@ -453,7 +453,8 @@ def check_signature(block):

finally:

db_to_drive(node, db_handler)
if node.ram:
db_to_drive(node, db_handler)

node.db_lock.release()
node.logger.app_log.warning(f"Database lock released")
Expand Down
8 changes: 0 additions & 8 deletions essentials.py
Expand Up @@ -158,19 +158,13 @@ def db_to_drive(node, db_handler):
(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11]))
db_handler.commit(db_handler.hdd)

#db_handler.execute_many(db_handler.h, "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", result1)



if not node.is_testnet and node.ram: # we want to save to hyper.db from RAM/hyper.db depending on ram conf
for x in result1:
db_handler.execute_param(db_handler.h2, "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",
(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11]))
db_handler.commit(db_handler.hdd2)

#db_handler.execute_many(db_handler.h2, "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", result1)


db_handler.execute_param(db_handler.c, "SELECT * FROM misc WHERE block_height > ? ORDER BY block_height ASC", (node.hdd_block,))
result2 = db_handler.c.fetchall()

Expand All @@ -187,8 +181,6 @@ def db_to_drive(node, db_handler):
db_handler.execute_param(db_handler.h2, "INSERT INTO misc VALUES (?,?)", (x[0], x[1]))
db_handler.commit(db_handler.hdd2)

#db_handler.execute_many(db_handler.h2, "INSERT INTO misc VALUES (?,?)", result2)

db_handler.execute(db_handler.h, "SELECT max(block_height) FROM transactions")
node.hdd_block = db_handler.h.fetchone()[0]

Expand Down
2 changes: 1 addition & 1 deletion mempool.py
Expand Up @@ -651,6 +651,6 @@ def merge(self, data, peer_ip, c, size_bypass=False, wait=False, revert=False):
# TODO: Here maybe commit() on c to release the write lock?
except Exception as e:
self.app_log.warning("Mempool: Error processing: {} {}".format(data, e))
if self.config.debug == 1:
if self.config.debug:
raise
return mempool_result
4 changes: 2 additions & 2 deletions node.py
Expand Up @@ -1741,8 +1741,8 @@ def ram_init(database):
query = "".join(line for line in source_db.iterdump())
database.to_ram.executescript(query)
source_db.close()

node.logger.app_log.warning("Status: Moved database to RAM")
node.logger.app_log.warning("Status: Hyperblock ledger moved to RAM")

#source = sqlite3.connect('existing_db.db')
#dest = sqlite3.connect(':memory:')
Expand Down
26 changes: 17 additions & 9 deletions peershandler.py
Expand Up @@ -9,6 +9,7 @@
import sys
import threading
import time
import shutil

import socks

Expand All @@ -27,7 +28,7 @@ class Peers:
__slots__ = ('app_log','config','logstats','node','peersync_lock','startup_time','reset_time','warning_list','stats',
'connection_pool','peer_opinion_dict','consensus_percentage','consensus',
'tried','peer_dict','peerfile','suggested_peerfile','banlist','whitelist','ban_threshold',
'ip_to_mainnet', 'peers', 'first_run', 'accept_peers')
'ip_to_mainnet', 'peers', 'first_run', 'accept_peers', 'peerlist_updated')

def __init__(self, app_log, config=None, logstats=True, node=None):
self.app_log = app_log
Expand Down Expand Up @@ -56,6 +57,7 @@ def __init__(self, app_log, config=None, logstats=True, node=None):
self.peerfile = "peers.txt"
self.suggested_peerfile = "suggested_peers.txt"
self.first_run = True
self.peerlist_updated = False

self.node = node

Expand Down Expand Up @@ -118,6 +120,7 @@ def peer_dump(self, file, peer):
def peers_dump(self, file, peerdict):
"""Validates then adds a peer to the peer list on disk"""
# called by Sync, should not be an issue, but check if needs to be thread safe or not.
self.peerlist_updated = False

with open(file, "r") as peer_file:
peers_pairs = json.load(peer_file)
Expand All @@ -130,7 +133,7 @@ def peers_dump(self, file, peerdict):
if peer_ip not in peers_pairs:
self.app_log.warning(f"Testing connectivity to: {peer_ip}")
peer_test = socks.socksocket()
if self.config.tor == 1:
if self.config.tor:
peer_test.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
peer_test.connect((str(peer_ip), int(self.config.port))) # double parentheses mean tuple
self.app_log.info("Inbound: Distant peer connectible")
Expand All @@ -141,17 +144,22 @@ def peers_dump(self, file, peerdict):

peers_pairs[ip] = peer_port

with open(file, "w") as peer_file:
json.dump(peers_pairs, peer_file)

self.app_log.info(f"Inbound: Peer {peer_ip}:{peer_port} saved to peer list")
self.peerlist_updated = True

else:
self.app_log.info("Distant peer already in peer list")

except:
self.app_log.info("Inbound: Distant peer not connectible")
pass

if self.peerlist_updated:
self.app_log.warning("Peerlist updated")
with open(f"{file}.tmp", "w") as peer_file:
json.dump(peers_pairs, peer_file)
shutil.move(f"{file}.tmp",file)

def append_client(self, client):
"""
:param client: a string "ip:port"
Expand Down Expand Up @@ -267,14 +275,14 @@ def peers_test(self, peerfile):
try:
s = socks.socksocket()
s.settimeout(0.6)
if self.config.tor == 1:
if self.config.tor:
s.settimeout(5)
s.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
s.connect((host, port))
s.close()
self.app_log.info(f"Connection to {host} {port} successful, keeping the peer")
except:
if self.config.purge == 1 and not self.is_testnet:
if self.config.purge and not self.is_testnet:
# remove from peerfile if not connectible

peers_remove[key] = value
Expand Down Expand Up @@ -312,7 +320,7 @@ def peersync(self, subdata):
self.app_log.info(f"Outbound: {pair} is a new peer, saving if connectible")
try:
s_purge = socks.socksocket()
if self.config.tor == 1:
if self.config.tor:
s_purge.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)

s_purge.connect((pair[0], int(pair[1]))) # save a new peer file with only active nodes
Expand Down Expand Up @@ -347,7 +355,7 @@ def peersync(self, subdata):
self.app_log.info(f"Outbound: {ip}:{port} is a new peer, saving if connectible")
try:
s_purge = socks.socksocket()
if self.config.tor == 1:
if self.config.tor:
s_purge.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
s_purge.connect((ip, int(port))) # save a new peer file with only active nodes
s_purge.close()
Expand Down
4 changes: 2 additions & 2 deletions send_nogui.py
Expand Up @@ -50,8 +50,8 @@

s = socks.socksocket()
s.settimeout(10)
#s.connect(("bismuth.live", 5658))
s.connect(("127.0.0.1", 5658))
s.connect(("bismuth.live", 5658))
#s.connect(("127.0.0.1", 5658))

connections.send (s, "balanceget", 10)
connections.send (s, address, 10) # change address here to view other people's transactions
Expand Down

0 comments on commit bff8064

Please sign in to comment.