Skip to content

Commit

Permalink
FASTER!!
Browse files Browse the repository at this point in the history
added caching to the hashing
  • Loading branch information
YeahNotSewerSide committed Apr 14, 2021
1 parent 4d5282e commit 1f7f3a5
Show file tree
Hide file tree
Showing 3 changed files with 108 additions and 47 deletions.
36 changes: 20 additions & 16 deletions cluster_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@

# https://github.com/DoctorEenot/DuinoCoin_android_cluster
'''
you must have config file "Miner_config.cfg"
For the simpler usage that miner uses the same config directory as official PC miner:
PCMiner_2.4_resources
So in that folder (PCMiner_2.4_resources) you must have config file "Miner_config.cfg"
For more details go to projects page:
https://github.com/DoctorEenot/DuinoCoin_cluster
https://github.com/DoctorEenot/DuinoCoin_android_cluster
'''

'''
Expand All @@ -29,6 +33,7 @@
INC_COEF = 0
TIME_FOR_DEVICE = 90 #Time for device to update it's aliveness
DISABLE_LOGGING = True
PING_MASTER_SERVER = 40 # Seconds to ping master server

config = configparser.ConfigParser()
serveripfile = ("https://raw.githubusercontent.com/"
Expand Down Expand Up @@ -103,8 +108,6 @@ def loadConfig():





class Device:
def __init__(self,name,address):
self.name = name
Expand Down Expand Up @@ -254,6 +257,7 @@ def ping(dispatcher,event):
JOB_START_SECRET = 'ejnejkfnhiuhwefiy87usdf'
JOBS_TO_PROCESS = {}
HASH_COUNTER = 0
JOB_STARTED_TIME = 0


class Job:
Expand Down Expand Up @@ -288,13 +292,14 @@ def job_start(dispatcher,event):
global JOB_START_SECRET
global algorithm
global JOBS_TO_PROCESS
global JOB_STARTED_TIME

if event.secret != JOB_START_SECRET:
logger.warning('bad secret')
return

logger.info('Job is starting')

JOB_STARTED_TIME = time.time()

counter = 0
jobs = list(JOBS_TO_PROCESS.items())
Expand Down Expand Up @@ -358,10 +363,8 @@ def send_results(dispatcher,result):
'event':'connect_to_master'}
event = Event(event)
dispatcher.add_to_queue(event)
#connect_to_master()
logger.warning('Giving up on that hash')
break
#continue

if feedback == 'GOOD':
logger.info('Hash accepted')
Expand Down Expand Up @@ -458,6 +461,7 @@ def job_done(dispatcher,event):
global algorithm
global JOBS_TO_PROCESS
global HASH_COUNTER
global JOB_STARTED_TIME

logger.info('job done packet')
if (event.result[0] == 'None' \
Expand All @@ -481,7 +485,7 @@ def job_done(dispatcher,event):
device.job_stopped()

if JOB == None:
logger.info('Job is already over')
logger.debug('Job is already over')
data = b'{"t":"a",\
"status":"ok",\
"message":"No job to send"}'
Expand Down Expand Up @@ -519,9 +523,6 @@ def job_done(dispatcher,event):
device.job_stopped()
event.callback.sendto(data,device.address)
yield
#del JOBS_TO_PROCESS[recieved_start_end]
#CURRENT_JOB.unclaim()

else:
logger.debug('Old packet')

Expand All @@ -535,6 +536,7 @@ def job_done(dispatcher,event):
logger.warning('STOP JOB ON WRONG JOB')
return
HASH_COUNTER += event.result[1]
logger.info('HASHRATE: '+str(HASH_COUNTER//(time.time()-JOB_STARTED_TIME))+' H/s')
send_results(dispatcher,event.result)
JOBS_TO_PROCESS = {}
data_dict = {'t':'e',
Expand Down Expand Up @@ -608,10 +610,6 @@ def request_job(dispatcher,event):
master_server_is_connected = True
break
except:
#event = {'t':'e',
# 'event':'connect_to_master'}
#event = Event(event)
#dispatcher.add_to_queue(event)
yield
continue
job = job.split(",")
Expand Down Expand Up @@ -644,7 +642,10 @@ def request_job(dispatcher,event):
job_part = (real_difficulty//parts)

start = 0
end = job_part
if job_part == real_difficulty:
end = job_part+1
else:
end = job_part
while start<real_difficulty:
job_object = Job()
JOBS_TO_PROCESS[(start,end)] = job_object
Expand Down Expand Up @@ -740,6 +741,7 @@ def dispatch_event(self,count=1):
self.active_loop.append(activity)



def server():
global server_socket
global devices
Expand Down Expand Up @@ -768,6 +770,7 @@ def server():


last_devices_cleenup = time.time()
last_ping_master = time.time()

while True:
# recieving events
Expand Down Expand Up @@ -830,6 +833,7 @@ def server():




# cleenup devices
if time.time()-last_devices_cleenup>TIME_FOR_DEVICE:
last_devices_cleenup = time.time()
Expand Down
55 changes: 42 additions & 13 deletions cluster_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import logging
import json
import types
import numpy

logger = logging.getLogger('Cluster_Client')
logger.setLevel(logging.DEBUG)
Expand Down Expand Up @@ -56,22 +57,38 @@ def ducos1(
expectedHash,
start,
end):
'''
Blatently stole it from:
https://github.com/colonelwatch/nonceMiner/blob/master/src/mine_DUCO_S1.c
'''
global END_JOB,calculation_result
hashcount = 0
for ducos1xxres in range(int(start),int(end)):

base_hash = hashlib.sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
cache = []

for ducos1xxres in range(start,end):
if END_JOB:
logger.info('JOB TERMINATED')
calculation_result = [None,0,0,0,None]
return None
ducos1xx = hashlib.sha1(
(str(lastBlockHash) + str(ducos1xxres)).encode('utf-8'))
ducos1xx = ducos1xx.hexdigest()
# Increment hash counter for hashrate calculator
if ducos1xxres<10:
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1xxres).encode('ascii'))
elif ducos1xxres<end//10:
temp_hash = cache[int(ducos1xxres//10)].copy()
temp_hash.update(str(ducos1xxres%10).encode('ascii'))
else:
temp_hash = cache[ducos1xxres//100].copy()
temp_hash.update(str(ducos1xxres%100).encode('ascii'))
if(ducos1xxres<end//100):
cache.append(temp_hash)
ducos1xx = temp_hash.hexdigest()
hashcount += 1
# Check if result was found
if ducos1xx == expectedHash:
END_JOB = True
logger.debug('LEFT '+str(ducos1xxres))
logger.debug(str(ducos1xxres))
calculation_result = [ducos1xxres, hashcount,start,end,expectedHash]
return None
logger.info('Empty block')
Expand All @@ -85,17 +102,29 @@ def ducos1xxh(
end):
global END_JOB,calculation_result
hashcount = 0
for ducos1xxres in range(int(start),int(end)):

base_hash = xxhash.xxh64(str(lastBlockHash).encode('ascii'))
temp_hash = None
cache = []

for ducos1xxres in range(start,end):
if END_JOB:
logger.info('JOB TERMINATED')
calculation_result = [None,0,0,0,None]
return None
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Increment hash counter for hashrate calculator
if ducos1xxres<10:
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1xxres).encode('ascii'))
elif ducos1xxres<end//10:
temp_hash = cache[int(ducos1xxres//10)].copy()
temp_hash.update(str(ducos1xxres%10).encode('ascii'))
else:
temp_hash = cache[ducos1xxres//100].copy()
temp_hash.update(str(ducos1xxres%100).encode('ascii'))
if(ducos1xxres<end//100):
cache.append(temp_hash)
ducos1xx = temp_hash.hexdigest()
hashcount += 1
# Check if result was found
if ducos1xx == expectedHash:
END_JOB = True
logger.debug('LEFT '+str(ducos1xxres))
Expand Down
64 changes: 46 additions & 18 deletions cluster_worker_multiprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,25 +44,41 @@ def ducos1(
expectedHash,
start,
end):
'''
Blatently stole it from:
https://github.com/colonelwatch/nonceMiner/blob/master/src/mine_DUCO_S1.c
'''
global END_JOB,calculation_result
hashcount = 0
for ducos1xxres in range(int(start),int(end)):

base_hash = hashlib.sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
cache = []

for ducos1xxres in range(start,end):
if END_JOB:
#logger.info('JOB TERMINATED')
logger.info('JOB TERMINATED')
calculation_result = [None,0,0,0,None]
return None
ducos1xx = hashlib.sha1(
(str(lastBlockHash) + str(ducos1xxres)).encode('utf-8'))
ducos1xx = ducos1xx.hexdigest()
# Increment hash counter for hashrate calculator
if ducos1xxres<10:
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1xxres).encode('ascii'))
elif ducos1xxres<end//10:
temp_hash = cache[int(ducos1xxres//10)].copy()
temp_hash.update(str(ducos1xxres%10).encode('ascii'))
else:
temp_hash = cache[ducos1xxres//100].copy()
temp_hash.update(str(ducos1xxres%100).encode('ascii'))
if(ducos1xxres<end//100):
cache.append(temp_hash)
ducos1xx = temp_hash.hexdigest()
hashcount += 1
# Check if result was found
if ducos1xx == expectedHash:
END_JOB = True
#logger.debug('LEFT '+str(ducos1xxres))
logger.debug(str(ducos1xxres))
calculation_result = [ducos1xxres, hashcount,start,end,expectedHash]
return None
#logger.info('Empty block')
logger.info('Empty block')
END_JOB = True
calculation_result = [None,hashcount,start,end,expectedHash]

Expand All @@ -73,23 +89,35 @@ def ducos1xxh(
end):
global END_JOB,calculation_result
hashcount = 0
for ducos1xxres in range(int(start),int(end)):

base_hash = xxhash.xxh64(str(lastBlockHash).encode('ascii'))
temp_hash = None
cache = []

for ducos1xxres in range(start,end):
if END_JOB:
#logger.info('JOB TERMINATED')
logger.info('JOB TERMINATED')
calculation_result = [None,0,0,0,None]
return None
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Increment hash counter for hashrate calculator
if ducos1xxres<10:
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1xxres).encode('ascii'))
elif ducos1xxres<end//10:
temp_hash = cache[int(ducos1xxres//10)].copy()
temp_hash.update(str(ducos1xxres%10).encode('ascii'))
else:
temp_hash = cache[ducos1xxres//100].copy()
temp_hash.update(str(ducos1xxres%100).encode('ascii'))
if(ducos1xxres<end//100):
cache.append(temp_hash)
ducos1xx = temp_hash.hexdigest()
hashcount += 1
# Check if result was found
if ducos1xx == expectedHash:
END_JOB = True
#logger.debug('LEFT '+str(ducos1xxres))
logger.debug('LEFT '+str(ducos1xxres))
calculation_result = [ducos1xxres, hashcount,start,end,expectedHash]
return None
#logger.info('Empty block')
logger.info('Empty block')
END_JOB = True
calculation_result = [None,hashcount,start,end,expectedHash]

Expand Down

0 comments on commit 1f7f3a5

Please sign in to comment.