Skip to content

Commit

Permalink
this doesn't even build
Browse files Browse the repository at this point in the history
  • Loading branch information
fireduck64 committed Jun 30, 2018
1 parent 9f8a613 commit 9c84fbe
Show file tree
Hide file tree
Showing 3 changed files with 181 additions and 6 deletions.
13 changes: 8 additions & 5 deletions miner/src/Arktika.java
Expand Up @@ -62,14 +62,14 @@ public static void main(String args[]) throws Exception
}
}

private volatile WorkUnit last_work_unit;
protected volatile WorkUnit last_work_unit;

private MiningPoolServiceStub asyncStub;
private MiningPoolServiceBlockingStub blockingStub;

private final NetworkParams params;

private AtomicLong op_count = new AtomicLong(0L);
protected AtomicLong op_count = new AtomicLong(0L);
private long last_stats_time = System.currentTimeMillis();
private Config config;

Expand All @@ -78,9 +78,10 @@ public static void main(String args[]) throws Exception
private TimeRecord time_record;
private RateReporter rate_report=new RateReporter();

private AtomicLong share_submit_count = new AtomicLong(0L);
private AtomicLong share_reject_count = new AtomicLong(0L);
private AtomicLong share_block_count = new AtomicLong(0L);
protected AtomicLong share_submit_count = new AtomicLong(0L);
protected AtomicLong share_reject_count = new AtomicLong(0L);
protected AtomicLong share_block_count = new AtomicLong(0L);


private final int selected_field;

Expand Down Expand Up @@ -306,6 +307,8 @@ public void onNext(WorkUnit wu)
.build();

last_work_unit = wu_new;

// If the block number changes, clear the queues
if (last_block != wu_new.getHeader().getBlockHeight())
{
for(MinMaxPriorityQueue<PartialWork> q : layer_to_queue_map.values())
Expand Down
2 changes: 1 addition & 1 deletion miner/src/FieldSource.java
Expand Up @@ -14,7 +14,7 @@ public abstract class FieldSource
protected ImmutableSet<Integer> holding_set;
protected static final Logger logger = Logger.getLogger("snowblossom.miner");

protected final long words_per_chunk = Globals.MINE_CHUNK_SIZE / SnowMerkle.HASH_LEN_LONG;
public final long words_per_chunk = Globals.MINE_CHUNK_SIZE / SnowMerkle.HASH_LEN_LONG;

/**
* Read the 16-byte word at word_index into the byte buffer
Expand Down
172 changes: 172 additions & 0 deletions miner/src/LayerWorkThread.java
@@ -0,0 +1,172 @@
package snowblossom.miner;

import java.util.Random;
import java.security.MessageDigest;
import java.nio.ByteBuffer;
import snowblossom.lib.*;
import snowblossom.proto.*;
import duckutil.TimeRecord;
import duckutil.TimeRecordAuto;


import java.util.logging.Level;
import java.util.logging.Logger;

import com.google.protobuf.ByteString;


public class LayerWorkThread extends Thread
{
private static final Logger logger = Logger.getLogger("snowblossom.miner");

Random rnd;
MessageDigest md = DigestUtil.getMD();

byte[] word_buff = new byte[SnowMerkle.HASH_LEN];
ByteBuffer word_bb = ByteBuffer.wrap(word_buff);
int proof_field;
byte[] nonce = new byte[Globals.NONCE_LENGTH];
FieldSource fs;
Arktika arktika;
Queue<PartialWork> queue;

public LayerWorkThread(Arktika arktika, FieldSource fs, Queue<PartialWork> queue)
{
this.fs = fs;
this.arktika = arktika;
this.queue = queue;
setName("LayerWorkThread(" + fs.toString() + ")");
setDaemon(true);
rnd = new Random();

}

private void runPass() throws Exception
{
PartialWork pw = queue.poll();
if (pw == null)
{
WorkUnit wu = arktika.last_work_unit;
if (wu == null)
{
sleep(250);
return;
}
pw = new PartialWork(wu, rnd, md, total_words);
}
else
{
pw.doPass(fs, md, total_words);
}
processPw(pw);
}

private void processPw(PartialWork pw)
{
if (pw.passes_done == Globals.POW_LOOK_PASSES)
{
if (PowUtil.lessThanTarget(found_hash, wu.getReportTarget()))
{
String str = HashUtils.getHexString(found_hash);
logger.info("Found passable solution: " + str);
submitWork(pw);
}
arktika.op_count.getAndIncrement();
}
else
{
long next_word = pw.getNextWordIdx();
int chunk = (int)(next_word / fs.words_per_chunk);
if (fs.skipQueueOnRehit() && (fs.hasChunk(chunk)))
{
pw.doPass(fs, md, total_words);
processPW(pw);
}
else
{
arktika.enqueue(chunk, pw);
}
}

}

private void submitWork(PartialWork pw) throws Exception
{
byte[] first_hash = PowUtil.hashHeaderBits(wu.getHeader(), nonce);
byte[] context = first_hash;


BlockHeader.Builder header = BlockHeader.newBuilder();
header.mergeFrom(wu.getHeader());
header.setNonce(ByteString.copyFrom(nonce));


for (int pass = 0; pass < Globals.POW_LOOK_PASSES; pass++)
{
word_bb.clear();

long word_idx = PowUtil.getNextSnowFieldIndex(context, merkle_proof.getTotalWords());
boolean gotData = merkle_proof.readWord(word_idx, word_bb, pass);
if (!gotData)
{
logger.log(Level.SEVERE, "readWord returned false on pass " + pass);
}
SnowPowProof proof = merkle_proof.getProof(word_idx);
header.addPowProof(proof);
context = PowUtil.getNextContext(context, word_buff);
}

byte[] found_hash = context;

header.setSnowHash(ByteString.copyFrom(found_hash));

WorkSubmitRequest.Builder req = WorkSubmitRequest.newBuilder();
req.setWorkId(wu.getWorkId());
req.setHeader(header.build());

SubmitReply reply = blockingStub.submitWork( req.build());

if (PowUtil.lessThanTarget(found_hash, header.getTarget()))
{
share_block_count.getAndIncrement();
}
logger.info("Work submit: " + reply);
share_submit_count.getAndIncrement();
if (!reply.getSuccess())
{
share_reject_count.getAndIncrement();
}

}


public void run()
{
while (!arktika.isTerminated())
{
boolean err = false;
try (TimeRecordAuto tra = TimeRecord.openAuto("MinerThread.runPass"))
{
runPass();
}
catch (Throwable t)
{
err = true;
logger.warning("Error: " + t);
}

if (err)
{

try (TimeRecordAuto tra = TimeRecord.openAuto("MinerThread.errorSleep"))
{
Thread.sleep(5000);
}
catch (Throwable t)
{
}
}
}
}
}

0 comments on commit 9c84fbe

Please sign in to comment.