Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

551 hardfork16 #585

Merged
merged 15 commits into from Nov 18, 2016
Merged
Changes from 1 commit
Commits
File filter...
Filter file types
Jump to…
Jump to file or symbol
Failed to load files and symbols.

Always

Just for now

Update witness to mine with equihash

  • Loading branch information...
mvandeberg committed Nov 16, 2016
commit 7b199a99daa9f2a8cba6943bcf08b6781cddd1ad
@@ -33,6 +33,7 @@ namespace steemit { namespace witness_plugin {
using std::string;
using protocol::public_key_type;
using app::application;
using steemit::protocol::block_id_type;

namespace block_production_condition
{
@@ -96,6 +97,7 @@ class witness_plugin : public steemit::app::plugin
uint32_t _mining_threads = 0;

uint64_t _head_block_num = 0;
block_id_type _head_block_id = block_id_type();
uint64_t _total_hashes = 0;
fc::time_point _hash_start_time;

@@ -440,6 +440,7 @@ void witness_plugin::on_applied_block(const steemit::protocol::signed_block& b)


_head_block_num = b.block_num();
_head_block_id = b.id();
/// save these variables to be captured by worker lambda

for( const auto& miner : _miners ) {
@@ -468,88 +469,141 @@ void witness_plugin::start_mining(
const string& miner,
const steemit::protocol::signed_block& b )
{
static uint64_t seed = fc::time_point::now().time_since_epoch().count();
static uint64_t start = fc::city_hash64( (const char*)&seed, sizeof(seed) );
chain::database& db = database();

auto head_block_num = b.block_num();
auto head_block_time = b.timestamp;
auto block_id = b.id();


fc::thread* mainthread = &fc::thread::current();

_total_hashes = 0;
_hash_start_time = fc::time_point::now();

auto stop = head_block_time + fc::seconds( STEEMIT_BLOCK_INTERVAL * 2 );

uint32_t thread_num = 0;
uint32_t num_threads = _mining_threads;
uint32_t target = db.get_pow_summary_target();
const auto& acct_idx = db.get_index< chain::account_index >().indices().get< chain::by_name >();
auto acct_it = acct_idx.find( miner );
bool has_account = (acct_it != acct_idx.end());
for( auto& t : _thread_pool )
{
thread_num++;
t->async( [=]()
{
protocol::pow2_operation op;
protocol::pow2 work;
work.input.prev_block = block_id;
work.input.worker_account = miner;
work.input.nonce = start + thread_num;
op.props = _miner_prop_vote;
while( true )
{
// if( ((op.nonce/num_threads) % 1000) == 0 ) idump((op.nonce));
if( graphene::time::nonblocking_now() > stop )
{
// ilog( "stop mining due to time out, nonce: ${n}", ("n",op.nonce) );
return;
}
if( this->_head_block_num != head_block_num )
{
// wlog( "stop mining due new block arrival, nonce: ${n}", ("n",op.nonce));
return;
}
++this->_total_hashes;

work.input.nonce += num_threads;
work.create( block_id, miner, work.input.nonce );
if( work.pow_summary < target )
{
++this->_head_block_num; /// signal other workers to stop

protocol::signed_transaction trx;
op.work = work;
if( !has_account )
op.new_owner_key = pub;
trx.operations.push_back(op);
trx.ref_block_num = head_block_num;
trx.ref_block_prefix = work.input.prev_block._hash[1];
trx.set_expiration( head_block_time + STEEMIT_MAX_TIME_UNTIL_EXPIRATION );
trx.sign( pk, STEEMIT_CHAIN_ID );
mainthread->async( [this,miner,trx]()
{
try
{
database().push_transaction( trx );
ilog( "Broadcasting Proof of Work for ${miner}", ("miner",miner) );
p2p_node().broadcast( graphene::net::trx_message(trx) );
}
catch( const fc::exception& e )
{
// wdump((e.to_detail_string()));
}
} );
return;
}
}
} );
thread_num++;
}
static uint64_t seed = fc::time_point::now().time_since_epoch().count();
static uint64_t start = fc::city_hash64( (const char*)&seed, sizeof(seed) );
chain::database& db = database();
auto head_block_num = b.block_num();
auto head_block_time = b.timestamp;
auto block_id = b.id();
fc::thread* mainthread = &fc::thread::current();
_total_hashes = 0;
_hash_start_time = fc::time_point::now();
auto stop = head_block_time + fc::seconds( STEEMIT_BLOCK_INTERVAL * 2 );
uint32_t thread_num = 0;
uint32_t num_threads = _mining_threads;
uint32_t target = db.get_pow_summary_target();
const auto& acct_idx = db.get_index< chain::account_index >().indices().get< chain::by_name >();
auto acct_it = acct_idx.find( miner );
bool has_account = (acct_it != acct_idx.end());
bool has_hardfork_16 = db.has_hardfork( STEEMIT_HARDFORK_0_16__551 );
for( auto& t : _thread_pool )
{
thread_num++;
t->async( [=]()
{
if( has_hardfork_16 )
{
protocol::pow2_operation op;
protocol::equihash_pow work;
work.input.prev_block = block_id;
work.input.worker_account = miner;
work.input.nonce = start + thread_num;
op.props = _miner_prop_vote;

while( true )
{
if( graphene::time::nonblocking_now() > stop )
{
// ilog( "stop mining due to time out, nonce: ${n}", ("n",op.nonce) );
return;
}
if( this->_head_block_num != head_block_num )
{
// wlog( "stop mining due new block arrival, nonce: ${n}", ("n",op.nonce));
return;
}

++this->_total_hashes;
work.input.nonce += num_threads;
work.create( block_id, miner, work.input.nonce );

if( work.proof.is_valid() && work.pow_summary < target )
{
protocol::signed_transaction trx;
work.prev_block = this->_head_block_id;
op.work = work;
if( !has_account )
op.new_owner_key = pub;
trx.operations.push_back( op );
trx.ref_block_num = head_block_num;
trx.ref_block_prefix = work.input.prev_block._hash[1];
trx.set_expiration( head_block_time + STEEMIT_MAX_TIME_UNTIL_EXPIRATION );
trx.sign( pk, STEEMIT_CHAIN_ID );
++this->_head_block_num;
mainthread->async( [this,miner,trx]()
{
try
{
database().push_transaction( trx );
ilog( "Broadcasting Proof of Work for ${miner}", ("miner",miner) );
p2p_node().broadcast( graphene::net::trx_message(trx) );
}
catch( const fc::exception& e )
{
// wdump((e.to_detail_string()));
}
});
return;
}
}
}
else // delete after hardfork 16
{
protocol::pow2_operation op;
protocol::pow2 work;
work.input.prev_block = block_id;
work.input.worker_account = miner;
work.input.nonce = start + thread_num;
op.props = _miner_prop_vote;
while( true )
{
// if( ((op.nonce/num_threads) % 1000) == 0 ) idump((op.nonce));
if( graphene::time::nonblocking_now() > stop )
{
// ilog( "stop mining due to time out, nonce: ${n}", ("n",op.nonce) );
return;
}
if( this->_head_block_num != head_block_num )
{
// wlog( "stop mining due new block arrival, nonce: ${n}", ("n",op.nonce));
return;
}

++this->_total_hashes;
work.input.nonce += num_threads;
work.create( block_id, miner, work.input.nonce );
if( work.pow_summary < target )
{
++this->_head_block_num; /// signal other workers to stop
protocol::signed_transaction trx;
op.work = work;
if( !has_account )
op.new_owner_key = pub;
trx.operations.push_back(op);
trx.ref_block_num = head_block_num;
trx.ref_block_prefix = work.input.prev_block._hash[1];
trx.set_expiration( head_block_time + STEEMIT_MAX_TIME_UNTIL_EXPIRATION );
trx.sign( pk, STEEMIT_CHAIN_ID );
mainthread->async( [this,miner,trx]()
{
try
{
database().push_transaction( trx );
ilog( "Broadcasting Proof of Work for ${miner}", ("miner",miner) );
p2p_node().broadcast( graphene::net::trx_message(trx) );
}
catch( const fc::exception& e )
{
// wdump((e.to_detail_string()));
}
} );
return;
}
}
}
} );
thread_num++;
}
}

STEEMIT_DEFINE_PLUGIN( witness, steemit::witness_plugin::witness_plugin )
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.