Skip to content

Commit

Permalink
Fastfail (#86)
Browse files Browse the repository at this point in the history
* fast fail memory mode

* adding min_depth_to_disk

* using constant for min_depth_to_disk default

* fixing bug causing bad fallback to disk

* fixing default

* fixing fallback to non-cached

* Release 1.1.1
  • Loading branch information
tster123 committed Jun 23, 2018
1 parent b1346b7 commit 753e050
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 63 deletions.
6 changes: 4 additions & 2 deletions miner/src/FieldScan.java
Expand Up @@ -2,6 +2,7 @@

import com.google.common.collect.ImmutableSortedMap;
import duckutil.Config;
import snowblossom.lib.Globals;
import snowblossom.lib.NetworkParams;
import snowblossom.lib.SnowFieldInfo;
import snowblossom.lib.Validation;
Expand Down Expand Up @@ -50,15 +51,16 @@ public void scan(boolean report)
try
{
double precacheGig = config.getDoubleWithDefault("memfield_precache_gb", 0);
int minDepthToDisk = config.getIntWithDefault("min_depth_to_disk", 0);
boolean memfield = config.getBoolean("memfield");
long precache = 0;
if (precacheGig > 0.01)
{
memfield = false;
precache = (long)(precacheGig * 1024.0 * 1024.0 * 1024.0);
}
System.out.println("creating field: " + field_folder + " memfield=" + memfield + ", precache=" + precache);
SnowMerkleProof proof = new SnowMerkleProof(field_folder, name, memfield, precache);
logger.info("creating field: " + field_folder + " memfield=" + memfield + ", precache=" + precache + ", minDepthToDisk=" + minDepthToDisk);
SnowMerkleProof proof = new SnowMerkleProof(field_folder, name, memfield, precache, minDepthToDisk);

for(int i = 0; i<16; i++)
{
Expand Down
8 changes: 6 additions & 2 deletions miner/src/PoolMiner.java
Expand Up @@ -321,7 +321,7 @@ private void runPass() throws Exception
long word_idx;
word_bb.clear();
word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords(), md);
merkle_proof.readWord(word_idx, word_bb);
if (!merkle_proof.readWord(word_idx, word_bb, pass)) { return;}
context = PowUtil.getNextContext(context, word_buff, md);
}
}
Expand Down Expand Up @@ -354,7 +354,11 @@ private void submitWork(WorkUnit wu, byte[] nonce, SnowMerkleProof merkle_proof)
word_bb.clear();

long word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords());
merkle_proof.readWord(word_idx, word_bb);
boolean gotData = merkle_proof.readWord(word_idx, word_bb, pass);
if (!gotData)
{
logger.log(Level.SEVERE, "readWord returned false on pass " + pass);
}
SnowPowProof proof = merkle_proof.getProof(word_idx);
header.addPowProof(proof);
context = PowUtil.getNextContext(context, word_buff);
Expand Down
8 changes: 6 additions & 2 deletions miner/src/SnowBlossomMiner.java
Expand Up @@ -327,7 +327,7 @@ private void runPass() throws Exception
long word_idx;
word_bb.clear();
word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords(), md);
merkle_proof.readWord(word_idx, word_bb);
merkle_proof.readWord(word_idx, word_bb, pass);
context = PowUtil.getNextContext(context, word_buff, md);
}
}
Expand Down Expand Up @@ -360,7 +360,11 @@ private void buildBlock(Block b, byte[] nonce, SnowMerkleProof merkle_proof) thr
word_bb.clear();

long word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords());
merkle_proof.readWord(word_idx, word_bb);
boolean gotData = merkle_proof.readWord(word_idx, word_bb, pass);
if (!gotData)
{
logger.log(Level.SEVERE, "readWord returned false on pass " + pass);
}
SnowPowProof proof = merkle_proof.getProof(word_idx);
header.addPowProof(proof);
context = PowUtil.getNextContext(context, word_buff);
Expand Down
93 changes: 36 additions & 57 deletions miner/src/SnowMerkleProof.java
Expand Up @@ -37,21 +37,22 @@ public class SnowMerkleProof
private long bytes_to_precache = 0;
private byte[][] mem_buff;
public static final int MEM_BLOCK = 1024 * 1024;
private int minDepthToDisk;

private final ThreadLocal<SnowMerkleProof> diskProof;

public SnowMerkleProof(File path, String base) throws java.io.IOException
{
this(path, base, false, 0);
this(path, base, false, 0, 6);
}

/**
* Only needed by miners
*/
public SnowMerkleProof(File path, String base, boolean memcache, long bytesToPreCache) throws java.io.IOException
public SnowMerkleProof(File path, String base, boolean memcache, long bytesToPreCache, int minDepthToDisk) throws java.io.IOException
{
this.memcache = memcache;

this.minDepthToDisk = minDepthToDisk;
snow_file = new RandomAccessFile(new File(path, base + ".snow"), "r");
snow_file_channel = snow_file.getChannel();

Expand Down Expand Up @@ -149,12 +150,15 @@ public void readChunk(long offset, ByteBuffer bb) throws java.io.IOException

}

long hits = 0;
long misses = 0;
long nextReportMillis = System.currentTimeMillis();
long reportInterval = 1 * 1000;
long maxReportInterval = 5 * 1000;
public void readWord(long word_index, ByteBuffer bb) throws java.io.IOException
/**
* Reads a 16 byte section of the snowfield
* @param word_index which 16 byte section to read
* @param bb a buffer to put the 16 bytes into
* @param currentDepth a number 0-5 which represents which read into the snowfield it is for the POW algorithm.
* @return true if the word was read, false if it wasn't (aka, too shallow, not precached).
* @throws java.io.IOException
*/
public boolean readWord(long word_index, ByteBuffer bb, int currentDepth) throws java.io.IOException
{
if (bytes_to_precache > 0)
{
Expand All @@ -169,7 +173,7 @@ public void readWord(long word_index, ByteBuffer bb) throws java.io.IOException
if (i % 1000 == 0)
{
int percentage = (int) ((100L * i) / blocksToPrecache);
logger.info("pre-caching snowfield: loaded " + (i / 1000) + " gb of " + (blocksToPrecache/1000) + " (" + percentage + "%)");
logger.info("pre-caching snowfield: loaded " + (i / 1000) + " gb of " + (blocksToPrecache / 1000) + " (" + percentage + "%)");
}
byte[] block_data = new byte[MEM_BLOCK];
long file_offset = i * (long) MEM_BLOCK;
Expand All @@ -181,61 +185,36 @@ public void readWord(long word_index, ByteBuffer bb) throws java.io.IOException
}
}

/*
if (bytes_to_precache == -1)

long word_pos = word_index * SnowMerkle.HASH_LEN_LONG;
int mem_block_index = (int) (word_pos / MEM_BLOCK);
if (mem_buff != null && mem_block_index < mem_buff.length)
{
if ((hits + misses) % 1024 == 0)
int off_in_block = (int) (word_pos % MEM_BLOCK);
if (mem_buff[mem_block_index] == null)
{
if (System.currentTimeMillis() > nextReportMillis)
Assert.assertEquals(0, bytes_to_precache);
//try (TimeRecordAuto tra2 = TimeRecord.openAuto("SnowMerkleProof.readBlock"))
{
double h = hits;
double m = misses;
double t = h + m;
double rate = h / t;
logger.info("memory snowfield hitrate: " + rate);
hits = 0;
misses = 0;
nextReportMillis = System.currentTimeMillis() + reportInterval;
reportInterval = Math.min((long)(reportInterval * 1.5), maxReportInterval);
byte[] block_data = new byte[MEM_BLOCK];
long file_offset = (long) mem_block_index * (long) MEM_BLOCK;
ChannelUtil.readFully(snow_file_channel, ByteBuffer.wrap(block_data), file_offset);
mem_buff[mem_block_index] = block_data;
}
}
bb.put(mem_buff[mem_block_index], off_in_block, SnowMerkle.HASH_LEN);
return true;
}
*/
//try (TimeRecordAuto tra = TimeRecord.openAuto("SnowMerkleProof.readWord"))
{
long word_pos = word_index * SnowMerkle.HASH_LEN_LONG;
int mem_block_index = (int) (word_pos / MEM_BLOCK);
if (mem_buff != null && mem_block_index < mem_buff.length)
{
hits++;
int off_in_block = (int) (word_pos % MEM_BLOCK);
if (mem_buff[mem_block_index] == null)
{
Assert.assertEquals(0, bytes_to_precache);
//try (TimeRecordAuto tra2 = TimeRecord.openAuto("SnowMerkleProof.readBlock"))
{
byte[] block_data = new byte[MEM_BLOCK];
long file_offset = (long) mem_block_index * (long) MEM_BLOCK;
ChannelUtil.readFully(snow_file_channel, ByteBuffer.wrap(block_data), file_offset);
mem_buff[mem_block_index] = block_data;
}
}
bb.put(mem_buff[mem_block_index], off_in_block, SnowMerkle.HASH_LEN);
return;

}
misses++;
if (diskProof != null)
{
diskProof.get().readWord(word_index, bb);
return;
}
//byte[] buff = new byte[SnowMerkle.HASH_LEN];
//ByteBuffer bb = ByteBuffer.wrap(buff);
ChannelUtil.readFully(snow_file_channel, bb, word_pos);

//return buff;
if (bytes_to_precache == -1 && minDepthToDisk > currentDepth) return false;
if (diskProof != null)
{
diskProof.get().readWord(word_index, bb, Globals.POW_LOOK_PASSES);
return true;
}

ChannelUtil.readFully(snow_file_channel, bb, word_pos);
return true;
}

/**
Expand Down

0 comments on commit 753e050

Please sign in to comment.