Skip to content

Commit

Permalink
reduce memory allocations
Browse files Browse the repository at this point in the history
  • Loading branch information
fireduck64 committed Jan 23, 2019
1 parent 34818a0 commit e7c19bb
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 31 deletions.
5 changes: 5 additions & 0 deletions RELEASE-NOTES.md
@@ -1,3 +1,8 @@
## master

* Adding SurfMiner using new wave mining method
* Updating Arktika, PoolMiner and SurfMiner to use fewer memory allocations

## 1.5.0

* Adding benchmarking features to Arktika
Expand Down
56 changes: 36 additions & 20 deletions lib/src/PowUtil.java
@@ -1,8 +1,6 @@
package snowblossom.lib;

import com.google.protobuf.ByteString;
import duckutil.TimeRecord;
import duckutil.TimeRecordAuto;
import org.junit.Assert;
import snowblossom.proto.BlockHeader;
import snowblossom.proto.BlockSummary;
Expand All @@ -28,8 +26,6 @@ public static byte[] hashHeaderBits(BlockHeader header, byte[] nonce)
}
public static byte[] hashHeaderBits(BlockHeader header, byte[] nonce, MessageDigest md)
{
try(TimeRecordAuto tra = TimeRecord.openAuto("PowUtil.hashHeaderBits"))
{

byte[] int_data = new byte[3*4 + 1*8];
ByteBuffer bb = ByteBuffer.wrap(int_data);
Expand All @@ -49,7 +45,6 @@ public static byte[] hashHeaderBits(BlockHeader header, byte[] nonce, MessageDig
md.update(header.getTarget().toByteArray());

return md.digest();
}
}

public static long getNextSnowFieldIndex(byte[] context, long word_count)
Expand All @@ -58,52 +53,73 @@ public static long getNextSnowFieldIndex(byte[] context, long word_count)
return getNextSnowFieldIndex(context, word_count, md);

}
public static long getNextSnowFieldIndex(byte[] context, long word_count, MessageDigest md)

public static long getNextSnowFieldIndex(byte[] context, long word_count, MessageDigest md, byte[] tmp_buff)
{
try(TimeRecordAuto tra = TimeRecord.openAuto("PowUtil.getNextSnowFieldIndex"))
try
{
md.update(context);
byte[] hash = md.digest();
md.digest(tmp_buff,0, Globals.BLOCKCHAIN_HASH_LEN);

byte[] longdata = new byte[8];

for(int i=1; i<8; i++)
{
longdata[i] = hash[i];
}
ByteBuffer bb = ByteBuffer.wrap(longdata);
tmp_buff[0] = 0;

ByteBuffer bb = ByteBuffer.wrap(tmp_buff);
long v = bb.getLong();

return v % word_count;

}
catch(java.security.DigestException e)
{
throw new RuntimeException(e);
}
}

@Deprecated
public static long getNextSnowFieldIndex(byte[] context, long word_count, MessageDigest md)
{
return getNextSnowFieldIndex(context, word_count, md, new byte[Globals.BLOCKCHAIN_HASH_LEN]);
}

public static byte[] getNextContext(byte[] prev_context, byte[] found_data)
{
MessageDigest md = DigestUtil.getMD();
return getNextContext(prev_context, found_data, md);

}

@Deprecated
public static byte[] getNextContext(byte[] prev_context, byte[] found_data, MessageDigest md)
{
try(TimeRecordAuto tra = TimeRecord.openAuto("PowUtil.getNextContext"))
{
md.update(prev_context);
md.update(found_data);
return md.digest();
}

/**
* Get next context without allocating anything
*/
public static void getNextContext(byte[] prev_context, byte[] found_data, MessageDigest md, byte[] new_context)
{
try
{
md.update(prev_context);
md.update(found_data);
md.digest(new_context, 0, Globals.BLOCKCHAIN_HASH_LEN);
}
catch(java.security.DigestException e)
{
throw new RuntimeException(e);
}
}

public static boolean lessThanTarget(byte[] found_hash, ByteString target)
{

try(TimeRecordAuto tra = TimeRecord.openAuto("PowUtil.lessThanTarget"))
{
ByteString found = ByteString.copyFrom(found_hash,0, Globals.TARGET_LENGTH);

return (ByteStringComparator.compareStatic(found, target) < 0);
}

}

public static BigInteger calcNextTarget(BlockSummary prev_summary, NetworkParams params, long clock_time)
Expand Down
7 changes: 4 additions & 3 deletions miner/src/PartialWork.java
Expand Up @@ -22,6 +22,7 @@ public class PartialWork implements Comparable<PartialWork>

byte[] word_buff = new byte[SnowMerkle.HASH_LEN];
ByteBuffer word_bb = ByteBuffer.wrap(word_buff);
byte[] tmp_buff = new byte[32];

@VisibleForTesting
public PartialWork(int pass_no)
Expand All @@ -37,7 +38,7 @@ public PartialWork(WorkUnit wu, Random rnd, MessageDigest md, long total_words)
wu.getHeader().getNonce().copyTo(nonce, 0);
context = PowUtil.hashHeaderBits(wu.getHeader(), nonce, md);

next_word_idx = PowUtil.getNextSnowFieldIndex(context, total_words, md);
next_word_idx = PowUtil.getNextSnowFieldIndex(context, total_words, md, tmp_buff);

}

Expand Down Expand Up @@ -68,12 +69,12 @@ public void doPass(byte[] word_buff, MessageDigest md, long total_words)
{
//System.out.println("Pass: " + passes_done);
Assert.assertTrue(next_word_idx >= 0);
context = PowUtil.getNextContext(context, word_buff, md);
PowUtil.getNextContext(context, word_buff, md, context);

passes_done++;
if (passes_done < Globals.POW_LOOK_PASSES)
{
next_word_idx = PowUtil.getNextSnowFieldIndex(context, total_words, md);
next_word_idx = PowUtil.getNextSnowFieldIndex(context, total_words, md, tmp_buff);
}
else
{
Expand Down
5 changes: 3 additions & 2 deletions miner/src/PoolMiner.java
Expand Up @@ -209,6 +209,7 @@ public class MinerThread extends Thread
Random rnd;
MessageDigest md = DigestUtil.getMD();

byte[] tmp_buff = new byte[32];
byte[] word_buff = new byte[SnowMerkle.HASH_LEN];
ByteBuffer word_bb = ByteBuffer.wrap(word_buff);
SnowMerkleProof merkle_proof;
Expand Down Expand Up @@ -267,9 +268,9 @@ private void runPass() throws Exception
{
long word_idx;
((Buffer)word_bb).clear();
word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords(), md);
word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords(), md, tmp_buff);
if (!merkle_proof.readWord(word_idx, word_bb, pass)) { return;}
context = PowUtil.getNextContext(context, word_buff, md);
PowUtil.getNextContext(context, word_buff, md, context);
}
}

Expand Down
14 changes: 8 additions & 6 deletions miner/src/surf/SurfMiner.java
Expand Up @@ -146,7 +146,7 @@ public SurfMiner(Config config) throws Exception
}
total_blocks = (int) (field.getLength() / Globals.MINE_CHUNK_SIZE);

magic_queue = new MagicQueue(config.getIntWithDefault("buffer_size", 100000), total_blocks);
magic_queue = new MagicQueue(config.getIntWithDefault("buffer_size", 10000), total_blocks);
pool_client.subscribe();


Expand Down Expand Up @@ -315,6 +315,7 @@ public class WorkStarter extends Thread
ByteBuffer word_bb = ByteBuffer.wrap(word_buff);
int proof_field;
byte[] nonce = new byte[Globals.NONCE_LENGTH];
byte[] tmp_buff = new byte[32];

public WorkStarter()
{
Expand Down Expand Up @@ -356,7 +357,7 @@ private void runPass() throws Exception

byte[] context = PowUtil.hashHeaderBits(wu.getHeader(), nonce, md);

long word_idx = PowUtil.getNextSnowFieldIndex(context, field.getTotalWords(), md);
long word_idx = PowUtil.getNextSnowFieldIndex(context, field.getTotalWords(), md, tmp_buff);

int block = (int)(word_idx / WORDS_PER_CHUNK);

Expand Down Expand Up @@ -476,6 +477,7 @@ private void processBuffer(byte[] block_data, int block_number, ByteBuffer b, Se

byte[] nonce=new byte[Globals.NONCE_LENGTH];
byte[] context=new byte[Globals.BLOCKCHAIN_HASH_LEN];
byte[] tmp_buff=new byte[Globals.BLOCKCHAIN_HASH_LEN];

byte[] word_buff = new byte[SnowMerkle.HASH_LEN];
MessageDigest md = DigestUtil.getMD();
Expand All @@ -499,8 +501,6 @@ private void processBuffer(byte[] block_data, int block_number, ByteBuffer b, Se
System.arraycopy(block_data, word_offset_bytes, word_buff, 0, Globals.SNOW_MERKLE_HASH_LEN);
//logger.info(String.format("Word: %s", HexUtil.getHexString(word_buff)));

byte[] new_context = PowUtil.getNextContext(context, word_buff, md);
byte new_pass = pass; new_pass++;

if (pass == 6)
{
Expand Down Expand Up @@ -538,12 +538,14 @@ private void processBuffer(byte[] block_data, int block_number, ByteBuffer b, Se
}
else
{
long new_word_idx = PowUtil.getNextSnowFieldIndex(new_context, field.getTotalWords(), md);
byte new_pass = pass; new_pass++;
PowUtil.getNextContext(context, word_buff, md, context);
long new_word_idx = PowUtil.getNextSnowFieldIndex(context, field.getTotalWords(), md, tmp_buff);
int new_block = (int)(new_word_idx / WORDS_PER_CHUNK);

ByteBuffer bucket_buff = magic_queue.openWrite(new_block, getRecordSize());

writeRecord(bucket_buff, work_id, new_pass, new_word_idx, nonce, new_context);
writeRecord(bucket_buff, work_id, new_pass, new_word_idx, nonce, context);

}
}
Expand Down

0 comments on commit e7c19bb

Please sign in to comment.