diff --git a/contracts/eosio.system/delegate_bandwidth.cpp b/contracts/eosio.system/delegate_bandwidth.cpp index a2920d70295..a5e9ad14efe 100644 --- a/contracts/eosio.system/delegate_bandwidth.cpp +++ b/contracts/eosio.system/delegate_bandwidth.cpp @@ -193,7 +193,7 @@ namespace eosiosystem { auto fee = ( tokens_out.amount + 199 ) / 200; /// .5% fee (round up) // since tokens_out.amount was asserted to be at least 2 earlier, fee.amount < tokens_out.amount - + if( fee > 0 ) { INLINE_ACTION_SENDER(eosio::token, transfer)( N(eosio.token), {account,N(active)}, { account, N(eosio.ramfee), asset(fee), std::string("sell ram fee") } ); diff --git a/contracts/eosio.system/eosio.system.abi b/contracts/eosio.system/eosio.system.abi index 87937c787f9..f2f9f394cf9 100644 --- a/contracts/eosio.system/eosio.system.abi +++ b/contracts/eosio.system/eosio.system.abi @@ -575,4 +575,4 @@ ], "ricardian_clauses": [], "abi_extensions": [] -} +} \ No newline at end of file diff --git a/contracts/eosio.system/exchange_state.hpp b/contracts/eosio.system/exchange_state.hpp index 3705a9b8b98..e6434e32b49 100644 --- a/contracts/eosio.system/exchange_state.hpp +++ b/contracts/eosio.system/exchange_state.hpp @@ -28,7 +28,7 @@ namespace eosiosystem { uint64_t primary_key()const { return supply.symbol; } - asset convert_to_exchange( connector& c, asset in ); + asset convert_to_exchange( connector& c, asset in ); asset convert_from_exchange( connector& c, asset in ); asset convert( asset from, symbol_type to ); diff --git a/contracts/eosiolib/eosiolib.cpp b/contracts/eosiolib/eosiolib.cpp index 48d80b1037b..35bc7460c70 100644 --- a/contracts/eosiolib/eosiolib.cpp +++ b/contracts/eosiolib/eosiolib.cpp @@ -55,6 +55,13 @@ namespace eosio { ds >> params; } + void set_upgrade_parameters(const eosio::upgrade_parameters& params) { + char buf[sizeof(eosio::upgrade_parameters)]; + eosio::datastream ds( buf, sizeof(buf) ); + ds << params; + set_upgrade_parameters_packed( buf, ds.tellp() ); + } + using ::memset; using ::memcpy; diff --git a/contracts/eosiolib/privileged.h b/contracts/eosiolib/privileged.h index 8943a09db23..d6d6761e9e3 100644 --- a/contracts/eosiolib/privileged.h +++ b/contracts/eosiolib/privileged.h @@ -92,6 +92,7 @@ extern "C" { */ uint32_t get_blockchain_parameters_packed(char* data, uint32_t datalen); + void set_upgrade_parameters_packed(char* data, uint32_t datalen); /** * @brief Activate new feature * Activate new feature diff --git a/contracts/eosiolib/privileged.hpp b/contracts/eosiolib/privileged.hpp index 3091acf8b3b..e6541f31eb6 100644 --- a/contracts/eosiolib/privileged.hpp +++ b/contracts/eosiolib/privileged.hpp @@ -108,6 +108,14 @@ namespace eosio { ) }; + struct upgrade_parameters { + uint32_t target_block_num; + + EOSLIB_SERIALIZE(upgrade_parameters, + (target_block_num) + ) + }; + /** * @brief Set the blockchain parameters * Set the blockchain parameters @@ -122,6 +130,8 @@ namespace eosio { */ void get_blockchain_parameters(eosio::blockchain_parameters& params); + void set_upgrade_parameters(const eosio::upgrade_parameters& params); + ///@} priviledgedcppapi /** diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..8f765f91ec1 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -14,6 +14,8 @@ add_library( eosio_chain block_header_state.cpp block_state.cpp fork_database.cpp + pbft_database.cpp + pbft.cpp controller.cpp authorization_manager.cpp resource_limits.cpp diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 70bcfa3a236..96838c2fc25 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -33,7 +33,7 @@ namespace eosio { namespace chain { * contain a transaction mroot, action mroot, or new_producers as those components * are derived from chain state. */ - block_header_state block_header_state::generate_next( block_timestamp_type when )const { + block_header_state block_header_state::generate_next( block_timestamp_type when, bool new_version )const { block_header_state result; if( when != block_timestamp_type() ) { @@ -62,9 +62,17 @@ namespace eosio { namespace chain { result.pending_schedule = pending_schedule; result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; result.bft_irreversible_blocknum = bft_irreversible_blocknum; + result.pbft_stable_checkpoint_blocknum = pbft_stable_checkpoint_blocknum; + + + if (new_version) { + result.dpos_irreversible_blocknum = dpos_irreversible_blocknum; + } else { + result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); + } + - result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); /// grow the confirmed count static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); @@ -73,23 +81,30 @@ namespace eosio { namespace chain { auto num_active_producers = active_schedule.producers.size(); uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; - if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { - result.confirm_count.reserve( confirm_count.size() + 1 ); - result.confirm_count = confirm_count; - result.confirm_count.resize( confirm_count.size() + 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } else { - result.confirm_count.resize( confirm_count.size() ); - memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); - result.confirm_count.back() = (uint8_t)required_confs; + if (!new_version) { + if (confirm_count.size() < config::maximum_tracked_dpos_confirmations) { + result.confirm_count.reserve(confirm_count.size() + 1); + result.confirm_count = confirm_count; + result.confirm_count.resize(confirm_count.size() + 1); + result.confirm_count.back() = (uint8_t) required_confs; + } else { + result.confirm_count.resize(confirm_count.size()); + memcpy(&result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1); + result.confirm_count.back() = (uint8_t) required_confs; + } } return result; } /// generate_next - bool block_header_state::maybe_promote_pending() { - if( pending_schedule.producers.size() && - dpos_irreversible_blocknum >= pending_schedule_lib_num ) + bool block_header_state::maybe_promote_pending( bool new_version ) { + + bool should_promote_pending = pending_schedule.producers.size(); + if ( !new_version ) { + should_promote_pending = should_promote_pending && dpos_irreversible_blocknum >= pending_schedule_lib_num; + } + + if (should_promote_pending) { active_schedule = move( pending_schedule ); @@ -99,7 +114,13 @@ namespace eosio { namespace chain { if( existing != producer_to_last_produced.end() ) { new_producer_to_last_produced[pro.producer_name] = existing->second; } else { - new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + //TODO: max of bft and dpos lib + if (new_version) { + new_producer_to_last_produced[pro.producer_name] = bft_irreversible_blocknum; + } else { + new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + } + } } @@ -109,7 +130,13 @@ namespace eosio { namespace chain { if( existing != producer_to_last_implied_irb.end() ) { new_producer_to_last_implied_irb[pro.producer_name] = existing->second; } else { - new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + //TODO: max of bft and dpos lib + if (new_version) { + new_producer_to_last_implied_irb[pro.producer_name] = bft_irreversible_blocknum; + } else { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + } + } } @@ -141,13 +168,13 @@ namespace eosio { namespace chain { * * If the header specifies new_producers then apply them accordingly. */ - block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { + block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee, bool new_version )const { EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); //EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); - auto result = generate_next( h.timestamp ); + auto result = generate_next( h.timestamp, new_version); EOS_ASSERT( result.header.producer == h.producer, wrong_producer, "wrong producer specified" ); EOS_ASSERT( result.header.schedule_version == h.schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); @@ -161,9 +188,11 @@ namespace eosio { namespace chain { /// below this point is state changes that cannot be validated with headers alone, but never-the-less, /// must result in header state changes - result.set_confirmed( h.confirmed ); - auto was_pending_promoted = result.maybe_promote_pending(); + result.set_confirmed(h.confirmed, new_version); + + + auto was_pending_promoted = result.maybe_promote_pending(new_version); if( h.new_producers ) { EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); @@ -185,7 +214,7 @@ namespace eosio { namespace chain { return result; } /// next - void block_header_state::set_confirmed( uint16_t num_prev_blocks ) { + void block_header_state::set_confirmed( uint16_t num_prev_blocks, bool new_version ) { /* idump((num_prev_blocks)(confirm_count.size())); @@ -193,6 +222,10 @@ namespace eosio { namespace chain { std::cerr << "confirm_count["<() ) { static_cast(*block) = header; } - block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ) - :block_header_state( prev.next( *b, skip_validate_signee )), block( move(b) ) + block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee, bool new_version ) + :block_header_state( prev.next( *b, skip_validate_signee, new_version)), block( move(b) ) { } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2a5581a1c20..93c5b0c63fb 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -35,6 +35,7 @@ using controller_index_set = index_set< global_property_multi_index, global_property2_multi_index, dynamic_global_property_multi_index, + upgrade_property_multi_index, block_summary_multi_index, transaction_multi_index, generated_transaction_multi_index, @@ -122,6 +123,12 @@ struct controller_impl { chainbase::database reversible_blocks; ///< a special database to persist blocks that have successfully been applied but are still reversible block_log blog; optional pending; + optional pending_pbft_lib; + optional pending_pbft_checkpoint; + vector proposed_schedule_blocks; + vector promoted_schedule_blocks; + block_state_ptr pbft_prepared; + block_state_ptr my_prepare; block_state_ptr head; fork_database fork_db; wasm_interface wasmif; @@ -374,6 +381,9 @@ struct controller_impl { } } + //do upgrade migration if necessary; + migrate_upgrade(); + if( shutdown() ) return; const auto& ubi = reversible_blocks.get_index(); @@ -411,6 +421,17 @@ struct controller_impl { //*bos end* } + void migrate_upgrade() { + //generate upo. + try { + db.get(); + } catch( const boost::exception& e) { + wlog("no upo found, generating..."); + db.create([](auto&){}); + } + + } + ~controller_impl() { pending.reset(); @@ -497,6 +518,10 @@ struct controller_impl { section.add_row(conf.genesis, db); }); + snapshot->write_section([this]( auto §ion ){ + section.add_row(batch_pbft_snapshot_migration{}, db); + }); + snapshot->write_section([this]( auto §ion ){ section.template add_row(*fork_db.head(), db); }); @@ -529,18 +554,33 @@ struct controller_impl { header.validate(); }); + bool migrated = snapshot->has_section(); + if(migrated) { + snapshot->read_section([this](auto §ion) { + block_header_state head_header_state; + section.read_row(head_header_state, db); + + auto head_state = std::make_shared(head_header_state); + fork_db.set(head_state); + fork_db.set_validity(head_state, true); + fork_db.mark_in_current_chain(head_state, true); + head = head_state; + snapshot_head_block = head->block_num; + }); + }else{ + snapshot->read_section([this](snapshot_reader::section_reader §ion) { + block_header_state head_header_state; + section.read_pbft_migrate_row(head_header_state, db); + + auto head_state = std::make_shared(head_header_state); + fork_db.set(head_state); + fork_db.set_validity(head_state, true); + fork_db.mark_in_current_chain(head_state, true); + head = head_state; + snapshot_head_block = head->block_num; + }); - snapshot->read_section([this]( auto §ion ){ - block_header_state head_header_state; - section.read_row(head_header_state, db); - - auto head_state = std::make_shared(head_header_state); - fork_db.set(head_state); - fork_db.set_validity(head_state, true); - fork_db.mark_in_current_chain(head_state, true); - head = head_state; - snapshot_head_block = head->block_num; - }); + } controller_index_set::walk_indices([this, &snapshot]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; @@ -550,14 +590,16 @@ struct controller_impl { return; } - snapshot->read_section([this]( auto& section ) { - bool more = !section.empty(); - while(more) { - decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { - more = section.read_row(row, db); - }); - } - }); + if(snapshot->has_section()){ + snapshot->read_section([this]( auto& section ) { + bool more = !section.empty(); + while(more) { + decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, db); + }); + } + }); + } }); read_contract_tables_from_snapshot(snapshot); @@ -659,6 +701,7 @@ struct controller_impl { // *bos end* + authorization.initialize_database(); resource_limits.initialize_database(); @@ -691,7 +734,7 @@ struct controller_impl { // remove action db_list U msig_list -> msig_list db_list U conf_list -> conf_list // msig_list-name_list -> msig_list conf_list - name_list -> conf_list msig_list->db_list // producer api set_whitelist_blacklist - // blacklst -> conf.xxx_blacklist conf_list U msig_list -> conf_list + // blacklst -> conf.xxx_blacklist conf_list U msig_list -> conf_list // remove_grey_list // check if remove acount in msig_list then assert fail could not remove account in msig blacklist void set_name_list(list_type list, list_action_type action, std::vector name_list) @@ -774,7 +817,7 @@ struct controller_impl { void check_msig_blacklist(list_type blacklist_type,account_name account) { auto check_blacklist = [&](const flat_set& msig_blacklist){ - EOS_ASSERT(msig_blacklist.find(account) == msig_blacklist.end(), transaction_exception, + EOS_ASSERT(msig_blacklist.find(account) == msig_blacklist.end(), transaction_exception, " do not remove account in multisig blacklist , account: ${account}", ("account", account)); }; @@ -799,7 +842,7 @@ struct controller_impl { { try{ auto merge_blacklist = [&](const shared_vector& msig_blacklist_in_db,flat_set& conf_blacklist){ - + for (auto& a : msig_blacklist_in_db) { conf_blacklist.insert(a); @@ -818,19 +861,63 @@ struct controller_impl { } // "bos end" + optional upgrade_target_block() { + + const auto& upo = db.get(); + if (upo.upgrade_target_block_num > 0) { + return upo.upgrade_target_block_num; + } else { + return optional{}; + } + } + + optional upgrade_complete_block() { + + const auto& upo = db.get(); + if (upo.upgrade_complete_block_num > 0) { + return upo.upgrade_complete_block_num; + } else { + return optional{}; + } + } + + bool is_new_version() { + auto ucb = upgrade_complete_block(); + //new version starts from the next block of ucb, this is to avoid inconsistency after pre calculation inside schedule loop. + if (ucb) return head->block_num > *ucb; + return false; + } + + bool is_upgrading() { + auto utb = upgrade_target_block(); + auto ucb = upgrade_complete_block(); + auto is_upgrading = false; + if (utb) is_upgrading = head->block_num >= *utb; + if (ucb) is_upgrading = is_upgrading && head->block_num <= *ucb; + return is_upgrading; + } + /** * @post regardless of the success of commit block there is no active pending block */ void commit_block( bool add_to_fork_db ) { auto reset_pending_on_exit = fc::make_scoped_exit([this]{ pending.reset(); + set_pbft_lib(); + set_pbft_lscb(); }); try { + set_pbft_lib(); + set_pbft_lscb(); if (add_to_fork_db) { pending->_pending_block_state->validated = true; - auto new_bsp = fork_db.add(pending->_pending_block_state, true); + + auto new_version = is_new_version(); + + auto new_bsp = fork_db.add(pending->_pending_block_state, true, new_version); emit(self.accepted_block_header, pending->_pending_block_state); + head = fork_db.head(); EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); } @@ -1263,39 +1350,95 @@ struct controller_impl { pending.emplace(maybe_session()); } + auto utb = upgrade_target_block(); + auto ucb = upgrade_complete_block(); + if (utb && !ucb) { + if (head->dpos_irreversible_blocknum >= *utb) { + const auto& upo = db.get(); + db.modify( upo, [&]( auto& up ) { + up.upgrade_complete_block_num = head->block_num; + }); + wlog("system is going to be new version after the block ${b}", ("b", head->block_num)); + } + } + + auto new_version = is_new_version(); + auto upgrading = is_upgrading(); + pending->_block_status = s; pending->_producer_block_id = producer_block_id; pending->_signer = signer; - pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active + pending->_pending_block_state = std::make_shared( *head, when, new_version); // promotes pending schedule (if any) to active pending->_pending_block_state->in_current_chain = true; - pending->_pending_block_state->set_confirmed(confirm_block_count); + pending->_pending_block_state->set_confirmed(confirm_block_count, new_version); + - auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(); + auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(new_version); //modify state in speculative block only if we are speculative reads mode (other wise we need clean state for head or irreversible reads) if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { const auto& gpo = db.get(); - if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... - ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ) && // ... that has now become irreversible ... - pending->_pending_block_state->pending_schedule.producers.size() == 0 && // ... and there is room for a new pending schedule ... - !was_pending_promoted // ... and not just because it was promoted to active at the start of this block, then: - ) - { - // Promote proposed schedule to pending schedule. - if( !replaying ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) - ("lib", pending->_pending_block_state->dpos_irreversible_blocknum) - ("schedule", static_cast(gpo.proposed_schedule) ) ); - } - pending->_pending_block_state->set_new_producers( gpo.proposed_schedule ); - db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); - }); - } + + auto lib_num = std::max(pending->_pending_block_state->dpos_irreversible_blocknum, pending->_pending_block_state->bft_irreversible_blocknum); + auto lscb_num = pending->_pending_block_state->pbft_stable_checkpoint_blocknum; + + if (new_version && gpo.proposed_schedule_block_num) { + proposed_schedule_blocks.emplace_back(*gpo.proposed_schedule_block_num); + for ( auto itr = proposed_schedule_blocks.begin(); itr != proposed_schedule_blocks.end();) { + if ((*itr) < lscb_num) { + itr = proposed_schedule_blocks.erase(itr); + } else { + ++itr; + } + } + } + + bool should_promote_pending_schedule = false; + + should_promote_pending_schedule = gpo.proposed_schedule_block_num.valid() // if there is a proposed schedule that was proposed in a block ... + && pending->_pending_block_state->pending_schedule.producers.size() == 0 // ... and there is room for a new pending schedule ... + && !was_pending_promoted; // ... and not just because it was promoted to active at the start of this block, then: + + if (new_version) { + should_promote_pending_schedule = should_promote_pending_schedule + && pending->_pending_block_state->block_num > *gpo.proposed_schedule_block_num; + } else { + should_promote_pending_schedule = should_promote_pending_schedule + && ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ); + } + + if ( upgrading && !replaying) wlog("system is upgrading, no producer schedule promotion will happen until fully upgraded."); + + if ( should_promote_pending_schedule ) + { + if (!upgrading) { + // Promote proposed schedule to pending schedule. + if (!replaying) { + ilog("promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) + ("lib", lib_num) + ("schedule", static_cast(gpo.proposed_schedule))); + } + pending->_pending_block_state->set_new_producers(gpo.proposed_schedule); + + if (new_version) { + promoted_schedule_blocks.emplace_back(pending->_pending_block_state->block_num); + for ( auto itr = promoted_schedule_blocks.begin(); itr != promoted_schedule_blocks.end();) { + if ((*itr) < lscb_num) { + itr = promoted_schedule_blocks.erase(itr); + } else { + ++itr; + } + } + } + } + db.modify( gpo, [&]( auto& gp ) { + gp.proposed_schedule_block_num = optional(); + gp.proposed_schedule.clear(); + }); + } try { auto onbtrx = std::make_shared( get_on_block_transaction() ); @@ -1333,7 +1476,7 @@ struct controller_impl { void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { - //EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); +// EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); auto producer_block_id = b->id(); start_block( b->timestamp, b->confirmed, s , producer_block_id); @@ -1351,7 +1494,15 @@ struct controller_impl { } pending->_pending_block_state->block->header_extensions = b->header_extensions; - pending->_pending_block_state->block->block_extensions = b->block_extensions; + + extensions_type pending_block_extensions; + for ( const auto& extn: b->block_extensions) { + if (extn.first != static_cast(block_extension_type::pbft_stable_checkpoint)) { + pending_block_extensions.emplace_back(extn); + } + } + + pending->_pending_block_state->block->block_extensions = pending_block_extensions; transaction_trace_ptr trace; @@ -1425,9 +1576,11 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + auto new_version = is_new_version(); + + return async_thread_pool( thread_pool, [b, prev, new_version]() { const bool skip_validate_signee = false; - return std::make_shared( *prev, move( b ), skip_validate_signee ); + return std::make_shared( *prev, move( b ), skip_validate_signee, new_version); } ); } @@ -1442,17 +1595,21 @@ struct controller_impl { auto& b = new_header_state->block; emit( self.pre_accepted_block, b ); - fork_db.add( new_header_state, false ); + auto new_version = is_new_version(); + fork_db.add( new_header_state, false, new_version); if (conf.trusted_producers.count(b->producer)) { trusted_producer_light_validation = true; - }; + } emit( self.accepted_block_header, new_header_state ); + set_pbft_lib(); + if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + maybe_switch_forks( s ); } + set_pbft_lscb(); } FC_LOG_AND_RETHROW( ) } @@ -1468,7 +1625,10 @@ struct controller_impl { block_validate_exception, "invalid block status for replay" ); emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto new_header_state = fork_db.add( b, skip_validate_signee ); + + auto new_version = is_new_version(); + + auto new_header_state = fork_db.add( b, skip_validate_signee, new_version); emit( self.accepted_block_header, new_header_state ); @@ -1476,6 +1636,17 @@ struct controller_impl { maybe_switch_forks( s ); } + // apply stable checkpoint when there is one + // TODO: verify required one more time? + for (const auto &extn: b->block_extensions) { + if (extn.first == static_cast(block_extension_type::pbft_stable_checkpoint)) { + pbft_commit_local(b->id()); + set_pbft_lib(); + set_pbft_latest_checkpoint(b->id()); + set_pbft_lscb(); + break; + } + } // on replay irreversible is not emitted by fork database, so emit it explicitly here if( s == controller::block_status::irreversible ) emit( self.irreversible_block, new_header_state ); @@ -1483,6 +1654,47 @@ struct controller_impl { } FC_LOG_AND_RETHROW( ) } + void pbft_commit_local( const block_id_type& id ) { + pending_pbft_lib.reset(); + pending_pbft_lib.emplace(id); + } + + void set_pbft_lib() { + + if ((!pending || pending->_block_status != controller::block_status::incomplete) && pending_pbft_lib ) { + fork_db.set_bft_irreversible(*pending_pbft_lib); + pending_pbft_lib.reset(); + + if (read_mode != db_read_mode::IRREVERSIBLE) { + maybe_switch_forks(controller::block_status::complete); + } + } + } + + void set_pbft_latest_checkpoint( const block_id_type& id ) { + pending_pbft_checkpoint.reset(); + pending_pbft_checkpoint.emplace(id); + } + + void set_pbft_lscb() { + if ((!pending || pending->_block_status != controller::block_status::incomplete) && pending_pbft_checkpoint ) { + + auto checkpoint_block_state = fork_db.get_block(*pending_pbft_checkpoint); + if (checkpoint_block_state) { + fork_db.set_latest_checkpoint(*pending_pbft_checkpoint); + auto checkpoint_num = checkpoint_block_state->block_num; + if (pbft_prepared && pbft_prepared->block_num < checkpoint_num) { + pbft_prepared.reset(); + } + if (my_prepare && my_prepare->block_num < checkpoint_num) { + my_prepare.reset(); + } + } + pending_pbft_checkpoint.reset(); + + } + } + void maybe_switch_forks( controller::block_status s ) { auto new_head = fork_db.head(); @@ -1583,7 +1795,14 @@ struct controller_impl { void set_ext_merkle() { vector ext_digests; - const auto& exts = pending->_pending_block_state->block->block_extensions; + extensions_type exts; + for ( const auto& extn: pending->_pending_block_state->block->block_extensions) { + if (extn.first != static_cast(block_extension_type::pbft_stable_checkpoint)) + { + exts.emplace_back(extn); + } + } + ext_digests.reserve( exts.size()); for( const auto& a : exts ) ext_digests.emplace_back( digest_type::hash(a) ); @@ -1912,6 +2131,13 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } +std::map controller::my_signature_providers()const{ + return my->conf.my_signature_providers; +} + +void controller::set_my_signature_providers(std::map msp){ + my->conf.my_signature_providers = msp; +} void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count, std::function signer) { validate_db_available_size(); @@ -1952,6 +2178,20 @@ void controller::push_block( std::future& block_state_future ) my->push_block( block_state_future ); } +void controller::pbft_commit_local( const block_id_type& id ) { + validate_db_available_size(); + my->pbft_commit_local(id); +} + +bool controller::pending_pbft_lib() { + if (my->pending_pbft_lib) return true; + return false; +} + +void controller::set_pbft_latest_checkpoint( const block_id_type& id ) { + my->set_pbft_latest_checkpoint(id); +} + transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us ) { validate_db_available_size(); EOS_ASSERT( get_read_mode() != chain::db_read_mode::READ_ONLY, transaction_type_exception, "push transaction not allowed in read-only mode" ); @@ -2079,6 +2319,33 @@ block_id_type controller::last_irreversible_block_id() const { } +uint32_t controller::last_stable_checkpoint_block_num() const { + return my->head->pbft_stable_checkpoint_blocknum; +} + +block_id_type controller::last_stable_checkpoint_block_id() const { + auto lscb_num = last_stable_checkpoint_block_num(); + const auto& tapos_block_summary = db().get((uint16_t)lscb_num); + + if( block_header::num_from_id(tapos_block_summary.block_id) == lscb_num ) + return tapos_block_summary.block_id; + + return fetch_block_by_number(lscb_num)->id(); +} + + +vector controller::proposed_schedule_block_nums() const { + return my->proposed_schedule_blocks; +} + +vector controller::promoted_schedule_block_nums() const { + return my->promoted_schedule_blocks; +} + +bool controller::is_replaying() const { + return my->replaying; +} + const dynamic_global_property_object& controller::get_dynamic_global_properties()const { return my->db.get(); } @@ -2253,6 +2520,34 @@ chain_id_type controller::get_chain_id()const { return my->chain_id; } +void controller::set_pbft_prepared(const block_id_type& id) const { + my->pbft_prepared.reset(); + auto bs = fetch_block_state_by_id(id); + if (bs) { + my->pbft_prepared = bs; + my->fork_db.mark_pbft_prepared_fork(bs); + } +} + +void controller::set_pbft_my_prepare(const block_id_type& id) const { + my->my_prepare.reset(); + auto bs = fetch_block_state_by_id(id); + if (bs) { + my->my_prepare = bs; + my->fork_db.mark_pbft_my_prepare_fork(bs); + } +} + +block_id_type controller::get_pbft_my_prepare() const { + if (my->my_prepare) return my->my_prepare->id; + return block_id_type{}; +} + +void controller::reset_pbft_my_prepare() const { + my->fork_db.remove_pbft_my_prepare_fork(); + if (my->my_prepare) my->my_prepare.reset(); +} + db_read_mode controller::get_read_mode()const { return my->read_mode; } @@ -2355,6 +2650,19 @@ void controller::validate_reversible_available_size() const { EOS_ASSERT(free >= guard, reversible_guard_exception, "reversible free: ${f}, guard size: ${g}", ("f", free)("g",guard)); } +path controller::state_dir() const { + return my->conf.state_dir; +} + +path controller::blocks_dir() const { + return my->conf.blocks_dir; +} + +producer_schedule_type controller::initial_schedule() const { + return producer_schedule_type{ 0, {{eosio::chain::config::system_account_name, my->conf.genesis.initial_key}} }; +} + + bool controller::is_known_unexpired_transaction( const transaction_id_type& id) const { return db().find(id); } @@ -2391,5 +2699,28 @@ void controller::set_name_list(int64_t list, int64_t action, std::vectordb.get(); +} + +bool controller::is_upgraded() const { + return my->is_new_version(); +} + +bool controller::under_upgrade() const { + return my->is_upgrading(); +} + +// this will be used in unit_test only, should not be called anywhere else. +void controller::set_upo(uint32_t target_block_num) { + try { + const auto& upo = my->db.get(); + my->db.modify( upo, [&]( auto& up ) { up.upgrade_target_block_num = (block_num_type)target_block_num;}); + } catch( const boost::exception& e) { + my->db.create([&](auto& up){ + up.upgrade_target_block_num = (block_num_type)target_block_num; + }); + } +} } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 441677bb8a8..5509ff631b2 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -31,12 +31,14 @@ namespace eosio { namespace chain { composite_key_compare< std::less, std::greater > >, ordered_non_unique< tag, - composite_key< block_header_state, + composite_key< block_state, member, member, + member, + member, member >, - composite_key_compare< std::greater, std::greater, std::greater > + composite_key_compare< std::greater, std::greater, std::greater, std::greater, std::greater > > > > fork_multi_index_type; @@ -59,18 +61,59 @@ namespace eosio { namespace chain { if( fc::exists( fork_db_dat ) ) { string content; fc::read_file_contents( fork_db_dat, content ); - fc::datastream ds( content.data(), content.size() ); - unsigned_int size; fc::raw::unpack( ds, size ); - for( uint32_t i = 0, n = size.value; i < n; ++i ) { - block_state s; - fc::raw::unpack( ds, s ); - set( std::make_shared( move( s ) ) ); + + string version_label = content.substr(1,7);//start from position 1 because fc pack type in pos 0 + bool is_version_1 = version_label != "version"; + if(is_version_1){ + /*start upgrade migration and this is a hack and ineffecient, but lucky we only need to do it once */ + + auto start = ds.pos(); + unsigned_int size; fc::raw::unpack( ds, size ); + auto skipped_size_pos = ds.pos(); + + vector data(content.begin()+(skipped_size_pos - start), content.end()); + + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + vector tmp = data; + tmp.insert(tmp.begin(), {0,0,0,0}); + fc::datastream tmp_ds(tmp.data(), tmp.size()); + block_state s; + fc::raw::unpack( tmp_ds, s ); + //prepend 4bytes for pbft_stable_checkpoint_blocknum and append 2 bytes for pbft_prepared and pbft_my_prepare + auto tmp_data_length = tmp_ds.tellp() - 6; + data.erase(data.begin(),data.begin()+tmp_data_length); + s.pbft_prepared = false; + s.pbft_my_prepare = false; + set( std::make_shared( move( s ) ) ); + } + fc::datastream head_id_stream(data.data(), data.size()); + block_id_type head_id; + fc::raw::unpack( head_id_stream, head_id ); + + my->head = get_block( head_id ); + /*end upgrade migration*/ + }else{ + //get version number + fc::raw::unpack( ds, version_label ); + EOS_ASSERT(version_label=="version", fork_database_exception, "invalid version label in forkdb.dat"); + uint8_t version_num; + fc::raw::unpack( ds, version_num ); + + EOS_ASSERT(version_num==2, fork_database_exception, "invalid version num in forkdb.dat"); + + unsigned_int size; fc::raw::unpack( ds, size ); + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + block_state s; + fc::raw::unpack( ds, s ); + set( std::make_shared( move( s ) ) ); + } + block_id_type head_id; + fc::raw::unpack( ds, head_id ); + + my->head = get_block( head_id ); } - block_id_type head_id; - fc::raw::unpack( ds, head_id ); - my->head = get_block( head_id ); fc::remove( fork_db_dat ); } @@ -81,6 +124,12 @@ namespace eosio { namespace chain { auto fork_db_dat = my->datadir / config::forkdb_filename; std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); + + string version_label = "version"; + fc::raw::pack( out, version_label ); + uint8_t version_num = 2; + fc::raw::pack( out, version_num ); + uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); for( const auto& s : my->index ) { @@ -95,9 +144,10 @@ namespace eosio { namespace chain { /// we cannot normally prune the lib if it is the head block because /// the next block needs to build off of the head block. We are exiting /// now so we can prune this block as irreversible before exiting. - auto lib = my->head->dpos_irreversible_blocknum; + auto lib = std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + auto checkpoint = my->head->pbft_stable_checkpoint_blocknum; auto oldest = *my->index.get().begin(); - if( oldest->block_num <= lib ) { + if( oldest->block_num < lib && oldest->block_num < checkpoint ) { prune( oldest ); } @@ -123,7 +173,7 @@ namespace eosio { namespace chain { } } - block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous ) { + block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous, bool new_version ) { EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); @@ -136,19 +186,35 @@ namespace eosio { namespace chain { auto inserted = my->index.insert(n); EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added?" ); + auto prior = my->index.find( n->block->previous ); + + //TODO: to be optimised. + if (prior != my->index.end()) { + if ((*prior)->pbft_prepared) mark_pbft_prepared_fork(*prior); + if ((*prior)->pbft_my_prepare) mark_pbft_my_prepare_fork(*prior); + } + my->head = *my->index.get().begin(); - auto lib = my->head->dpos_irreversible_blocknum; + auto lib = std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + auto checkpoint = my->head->pbft_stable_checkpoint_blocknum; + auto oldest = *my->index.get().begin(); - if( oldest->block_num < lib ) { - prune( oldest ); + auto should_prune_oldest = oldest->block_num < lib; + + if (new_version) { + should_prune_oldest = should_prune_oldest && oldest->block_num < checkpoint; + } + + if ( should_prune_oldest ) { + prune( oldest ); } return n; } - block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee ) { + block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee, bool new_version ) { EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); const auto& by_id_idx = my->index.get(); @@ -158,9 +224,9 @@ namespace eosio { namespace chain { auto prior = by_id_idx.find( b->previous ); EOS_ASSERT( prior != by_id_idx.end(), unlinkable_block_exception, "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); - auto result = std::make_shared( **prior, move(b), skip_validate_signee ); + auto result = std::make_shared( **prior, move(b), skip_validate_signee, new_version); EOS_ASSERT( result, fork_database_exception , "fail to add new block state" ); - return add(result, true); + return add(result, true, new_version); } const block_state_ptr& fork_database::head()const { return my->head; } @@ -275,16 +341,111 @@ namespace eosio { namespace chain { } } - block_state_ptr fork_database::get_block(const block_id_type& id)const { + block_state_ptr fork_database::get_block(const block_id_type& id) const { auto itr = my->index.find( id ); if( itr != my->index.end() ) return *itr; return block_state_ptr(); } + void fork_database::mark_pbft_prepared_fork(const block_state_ptr& h) { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_prepared = true; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_prepared = true; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ h->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + + void fork_database::mark_pbft_my_prepare_fork(const block_state_ptr& h) { + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_my_prepare = true; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_my_prepare = true; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ h->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + + void fork_database::remove_pbft_my_prepare_fork() { + auto oldest = *my->index.get().begin(); + + auto& by_id_idx = my->index.get(); + auto itr = by_id_idx.find( oldest->id ); + by_id_idx.modify( itr, [&]( auto& bsp ) { bsp->pbft_my_prepare = false; }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + bsp->pbft_my_prepare = false; + updated.push_back( bsp->id ); + }); + ++pitr; + } + } + return updated; + }; + + vector queue{ oldest->id }; + while(!queue.empty()) { + queue = update( queue ); + } + my->head = *my->index.get().begin(); + } + block_state_ptr fork_database::get_block_in_current_chain_by_num( uint32_t n )const { const auto& numidx = my->index.get(); auto nitr = numidx.lower_bound( n ); + // following asserts removed so null can be returned //FC_ASSERT( nitr != numidx.end() && (*nitr)->block_num == n, // "could not find block in fork database with block number ${block_num}", ("block_num", n) ); @@ -314,10 +475,13 @@ namespace eosio { namespace chain { * This will require a search over all forks */ void fork_database::set_bft_irreversible( block_id_type id ) { - auto& idx = my->index.get(); - auto itr = idx.find(id); - uint32_t block_num = (*itr)->block_num; - idx.modify( itr, [&]( auto& bsp ) { + auto b = get_block( id ); + EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",id)); + + auto& idx = my->index.get(); + auto itr = idx.find(id); + uint32_t block_num = (*itr)->block_num; + idx.modify( itr, [&]( auto& bsp ) { bsp->bft_irreversible_blocknum = bsp->block_num; }); @@ -330,27 +494,65 @@ namespace eosio { namespace chain { auto update = [&]( const vector& in ) { vector updated; - for( const auto& i : in ) { - auto& pidx = my->index.get(); - auto pitr = pidx.lower_bound( i ); - auto epitr = pidx.upper_bound( i ); - while( pitr != epitr ) { - pidx.modify( pitr, [&]( auto& bsp ) { - if( bsp->bft_irreversible_blocknum < block_num ) { - bsp->bft_irreversible_blocknum = block_num; - updated.push_back( bsp->id ); - } - }); - ++pitr; - } - } - return updated; - }; + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + if( bsp->bft_irreversible_blocknum < block_num ) { + bsp->bft_irreversible_blocknum = block_num; + updated.push_back( bsp->id ); + } + }); + ++pitr; + } + } + return updated; + }; + + vector queue{id}; + while( queue.size() ) { + queue = update( queue ); + } + } - vector queue{id}; - while( queue.size() ) { - queue = update( queue ); - } + void fork_database::set_latest_checkpoint( block_id_type id) { + auto b = get_block( id ); + EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",id)); + + auto& idx = my->index.get(); + auto itr = idx.find(id); + uint32_t block_num = (*itr)->block_num; + idx.modify( itr, [&]( auto& bsp ) { + bsp->pbft_stable_checkpoint_blocknum = bsp->block_num; + }); + + auto update = [&]( const vector& in ) { + vector updated; + + for( const auto& i : in ) { + auto& pidx = my->index.get(); + auto pitr = pidx.lower_bound( i ); + auto epitr = pidx.upper_bound( i ); + while( pitr != epitr ) { + pidx.modify( pitr, [&]( auto& bsp ) { + if( bsp->pbft_stable_checkpoint_blocknum < block_num ) { + bsp->pbft_stable_checkpoint_blocknum = block_num; + updated.push_back( bsp->id ); + } + }); + ++pitr; + } + } + return updated; + }; + + vector queue{id}; + while( queue.size() ) { + queue = update( queue ); + } } -} } /// eosio::chain + + } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/block.hpp b/libraries/chain/include/eosio/chain/block.hpp index 9cd942026cf..246515e1200 100644 --- a/libraries/chain/include/eosio/chain/block.hpp +++ b/libraries/chain/include/eosio/chain/block.hpp @@ -52,7 +52,8 @@ namespace eosio { namespace chain { }; enum class block_extension_type : uint16_t { - bpsig_action_time_seed + bpsig_action_time_seed, + pbft_stable_checkpoint }; /** diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index 723824b5310..e1943741bc9 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -24,7 +24,7 @@ namespace eosio { namespace chain { * behavior. When producing a block a producer is always confirming at least the block he * is building off of. A producer cannot confirm "this" block, only prior blocks. */ - uint16_t confirmed = 1; + uint16_t confirmed = 1; block_id_type previous; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index c318843d5df..26ba42cc9f8 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -10,12 +10,14 @@ namespace eosio { namespace chain { * @brief defines the minimum state necessary to validate transaction headers */ struct block_header_state { + uint32_t pbft_stable_checkpoint_blocknum = 0; block_id_type id; uint32_t block_num = 0; signed_block_header header; uint32_t dpos_proposed_irreversible_blocknum = 0; uint32_t dpos_irreversible_blocknum = 0; uint32_t bft_irreversible_blocknum = 0; + uint32_t pending_schedule_lib_num = 0; /// last irr block num digest_type pending_schedule_hash; producer_schedule_type pending_schedule; @@ -27,13 +29,13 @@ struct block_header_state { vector confirm_count; vector confirmations; - block_header_state next( const signed_block_header& h, bool trust = false )const; - block_header_state generate_next( block_timestamp_type when )const; + block_header_state next( const signed_block_header& h, bool trust = false, bool new_version = false)const; + block_header_state generate_next( block_timestamp_type when, bool new_version = false )const; void set_new_producers( producer_schedule_type next_pending ); - void set_confirmed( uint16_t num_prev_blocks ); + void set_confirmed( uint16_t num_prev_blocks, bool new_version = false ); void add_confirmation( const header_confirmation& c ); - bool maybe_promote_pending(); + bool maybe_promote_pending( bool new_version = false); bool has_pending_producers()const { return pending_schedule.producers.size(); } @@ -60,7 +62,9 @@ struct block_header_state { } } /// namespace eosio::chain FC_REFLECT( eosio::chain::block_header_state, + (pbft_stable_checkpoint_blocknum) (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum)(bft_irreversible_blocknum) + (pending_schedule_lib_num)(pending_schedule_hash) (pending_schedule)(active_schedule)(blockroot_merkle) (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 2292392ade4..3745beaaeaf 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -13,14 +13,16 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { explicit block_state( const block_header_state& cur ):block_header_state(cur){} - block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ); - block_state( const block_header_state& prev, block_timestamp_type when ); + block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee, bool new_version ); + block_state( const block_header_state& prev, block_timestamp_type when, bool new_version ); block_state() = default; /// weak_ptr prev_block_state.... signed_block_ptr block; bool validated = false; bool in_current_chain = false; + bool pbft_prepared = false; + bool pbft_my_prepare = false; /// this data is redundant with the data stored in block, but facilitates /// recapturing transactions when we pop a block @@ -31,4 +33,4 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain) ) +FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain)(pbft_prepared)(pbft_my_prepare) ) diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index 3b3e64f264f..884293360a5 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -29,6 +29,11 @@ struct chain_snapshot_header { } }; +struct batch_pbft_snapshot_migration{ + bool migrated = true; +}; + } } -FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file +FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) +FC_REFLECT(eosio::chain::batch_pbft_snapshot_migration,(migrated)) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index f34b7702095..cb294eecc7f 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -20,9 +20,12 @@ const static auto default_reversible_guard_size = 2*1024*1024ll;/// 1MB * 340 bl const static auto default_state_dir_name = "state"; const static auto forkdb_filename = "forkdb.dat"; +const static auto pbftdb_filename = "pbftdb.dat"; const static auto default_state_size = 1*1024*1024*1024ll; const static auto default_state_guard_size = 128*1024*1024ll; +const static auto checkpoints_filename = "checkpoints.dat"; + const static uint64_t system_account_name = N(eosio); const static uint64_t null_account_name = N(eosio.null); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 2257fa56210..392a052819d 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -30,6 +30,7 @@ namespace eosio { namespace chain { class dynamic_global_property_object; class global_property_object; class global_property2_object; // *bos* + class upgrade_property_object; class permission_object; class account_object; using resource_limits::resource_limits_manager; @@ -65,6 +66,8 @@ namespace eosio { namespace chain { // *bos end* + using signature_provider_type = std::function; + class controller { public: @@ -82,6 +85,8 @@ namespace eosio { namespace chain { uint64_t state_guard_size = chain::config::default_state_guard_size; uint64_t reversible_cache_size = chain::config::default_reversible_cache_size; uint64_t reversible_guard_size = chain::config::default_reversible_guard_size; + path checkpoints_dir = blocks_dir; + uint32_t sig_cpu_bill_pct = chain::config::default_sig_cpu_bill_pct; uint16_t thread_pool_size = chain::config::default_controller_thread_pool_size; bool read_only = false; @@ -98,6 +103,10 @@ namespace eosio { namespace chain { flat_set resource_greylist; flat_set trusted_producers; + + + std::map my_signature_providers; + std::set my_producers; }; enum class block_status { @@ -155,8 +164,24 @@ namespace eosio { namespace chain { const chainbase::database& db()const; + void pbft_commit_local( const block_id_type& id ); + + bool pending_pbft_lib(); + + vector proposed_schedule_block_nums()const; + vector promoted_schedule_block_nums()const; + + void set_pbft_latest_checkpoint( const block_id_type& id ); + uint32_t last_stable_checkpoint_block_num()const; + block_id_type last_stable_checkpoint_block_id()const; + + const fork_database& fork_db()const; + std::map my_signature_providers()const; + void set_my_signature_providers(std::map msp); + + const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; const dynamic_global_property_object& get_dynamic_global_properties()const; @@ -229,7 +254,7 @@ namespace eosio { namespace chain { // *bos begin* const global_property2_object& get_global_properties2()const; // *bos* void set_name_list(int64_t list, int64_t action, std::vector name_list); - + // *bos end* bool is_resource_greylisted(const account_name &name) const; @@ -259,6 +284,16 @@ namespace eosio { namespace chain { void set_subjective_cpu_leeway(fc::microseconds leeway); + path state_dir()const; + path blocks_dir()const; + producer_schedule_type initial_schedule()const; + bool is_replaying()const; + + void set_pbft_prepared(const block_id_type& id)const; + void set_pbft_my_prepare(const block_id_type& id)const; + block_id_type get_pbft_my_prepare()const; + void reset_pbft_my_prepare()const; + signal pre_accepted_block; signal accepted_block_header; signal accepted_block; @@ -268,6 +303,11 @@ namespace eosio { namespace chain { signal accepted_confirmation; signal bad_alloc; + const upgrade_property_object& get_upgrade_properties()const; + bool is_upgraded()const; + bool under_upgrade()const; + void set_upo(uint32_t target_block_num); + /* signal pre_apply_block; signal post_apply_block; @@ -324,6 +364,7 @@ FC_REFLECT( eosio::chain::controller::config, (state_dir) (state_size) (reversible_cache_size) + (checkpoints_dir) (read_only) (force_all_checks) (disable_replay_opts) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c3e504d349..65d9e8c0ce6 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -136,6 +136,8 @@ namespace eosio { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( fork_db_block_not_found, fork_database_exception, 3020001, "Block can not be found" ) + FC_DECLARE_DERIVED_EXCEPTION( pbft_exception, chain_exception, + 4010000, "PBFT exception" ) FC_DECLARE_DERIVED_EXCEPTION( block_validate_exception, chain_exception, 3030000, "Block exception" ) diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 998157ab41a..10eb41a6852 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -40,8 +40,8 @@ namespace eosio { namespace chain { * block_state and will return a pointer to the new block state or * throw on error. */ - block_state_ptr add( signed_block_ptr b, bool skip_validate_signee ); - block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); + block_state_ptr add( signed_block_ptr b, bool skip_validate_signee, bool new_version ); + block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous, bool new_version ); void remove( const block_id_type& id ); void add( const header_confirmation& c ); @@ -69,9 +69,18 @@ namespace eosio { namespace chain { * it is removed unless it is the head block. */ signal irreversible; - - private: + void set_bft_irreversible( block_id_type id ); + + void set_latest_checkpoint( block_id_type id); + + void mark_pbft_prepared_fork(const block_state_ptr& h); + + void mark_pbft_my_prepare_fork(const block_state_ptr& h); + + void remove_pbft_my_prepare_fork(); + + private: unique_ptr my; }; diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index bdb49d3ce06..98f86939ad6 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -44,6 +44,16 @@ namespace eosio { namespace chain { guaranteed_minimum_resources gmr;//guaranteed_minimum_resources }; + class upgrade_property_object : public chainbase::object + { + OBJECT_CTOR(upgrade_property_object) + //TODO: should use a more complicated struct to include id, digest and status of every single upgrade. + + id_type id; + block_num_type upgrade_target_block_num = 0; + block_num_type upgrade_complete_block_num = 0; + }; + /** * @class dynamic_global_property_object @@ -89,6 +99,15 @@ namespace eosio { namespace chain { > > >; + + using upgrade_property_multi_index = chainbase::shared_multi_index_container< + upgrade_property_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(upgrade_property_object, upgrade_property_object::id_type, id) + > + > + >; }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) @@ -96,6 +115,7 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) // *bos* CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property2_object, eosio::chain::global_property2_multi_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::upgrade_property_object, eosio::chain::upgrade_property_multi_index) FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) @@ -107,4 +127,7 @@ FC_REFLECT(eosio::chain::global_property_object, // *bos* FC_REFLECT(eosio::chain::global_property2_object, (cfg)(gmr) + ) +FC_REFLECT(eosio::chain::upgrade_property_object, + (upgrade_target_block_num)(upgrade_complete_block_num) ) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/pbft.hpp b/libraries/chain/include/eosio/chain/pbft.hpp new file mode 100644 index 00000000000..bbca6a494ae --- /dev/null +++ b/libraries/chain/include/eosio/chain/pbft.hpp @@ -0,0 +1,232 @@ +#pragma once + +#include +#include +#include + +namespace eosio { + namespace chain { + using namespace std; + using namespace fc; + + struct psm_cache { + vector prepares_cache; + vector commits_cache; + vector view_changes_cache; + vector prepared_certificate; + vector view_changed_certificate; + }; + + class psm_machine { + class psm_state *current; + + public: + explicit psm_machine(pbft_database& pbft_db); + ~psm_machine(); + + void set_current(psm_state *s) { + current = s; + } + + void on_prepare(pbft_prepare &e); + void send_prepare(); + + void on_commit(pbft_commit &e); + void send_commit(); + + void on_view_change(pbft_view_change &e); + void send_view_change(); + + void on_new_view(pbft_new_view &e); + + template + void transit_to_committed_state(T const & s, bool to_new_view); + + template + void transit_to_prepared_state(T const & s); + + void send_pbft_view_change(); + + template + void transit_to_view_change_state(T const & s); + + template + void transit_to_new_view(const pbft_new_view &new_view, T const &s); + + const vector &get_prepares_cache() const; + + void set_prepares_cache(const vector &prepares_cache); + + const vector &get_commits_cache() const; + + void set_commits_cache(const vector &commits_cache); + + const vector &get_view_changes_cache() const; + + void set_view_changes_cache(const vector &view_changes_cache); + + const uint32_t &get_current_view() const; + + void set_current_view(const uint32_t ¤t_view); + + const vector &get_prepared_certificate() const; + + void set_prepared_certificate(const vector &prepared_certificate); + + const vector &get_view_changed_certificate() const; + + void set_view_changed_certificate(const vector &view_changed_certificate); + + const uint32_t &get_target_view_retries() const; + + void set_target_view_retries(const uint32_t &target_view_reties); + + const uint32_t &get_target_view() const; + + void set_target_view(const uint32_t &target_view); + + const uint32_t &get_view_change_timer() const; + + void set_view_change_timer(const uint32_t &view_change_timer); + + void manually_set_current_view(const uint32_t ¤t_view); + + protected: + psm_cache cache; + uint32_t current_view; + uint32_t target_view_retries; + uint32_t target_view; + uint32_t view_change_timer; + + private: + pbft_database &pbft_db; + + }; + + class psm_state { + + public: + psm_state(); + ~psm_state(); + + virtual void on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) = 0; + + virtual void send_prepare(psm_machine *m, pbft_database &pbft_db) = 0; + + virtual void on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) = 0; + + virtual void send_commit(psm_machine *m, pbft_database &pbft_db) = 0; + + virtual void on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) = 0; + + virtual void send_view_change(psm_machine *m, pbft_database &pbft_db) = 0; + + virtual void on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) = 0; + + virtual void manually_set_view(psm_machine *m, const uint32_t &view) = 0; + + }; + + class psm_prepared_state final: public psm_state { + + public: + psm_prepared_state(); + ~psm_prepared_state(); + + void on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine *m, pbft_database &pbft_db) override; + + void on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) override; + + void send_commit(psm_machine *m, pbft_database &pbft_db) override; + + void on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) override; + + void send_view_change(psm_machine *m, pbft_database &pbft_db) override; + + void on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) override; + + void manually_set_view(psm_machine *m, const uint32_t &view) override; + + bool pending_commit_local; + + }; + + class psm_committed_state final: public psm_state { + public: + psm_committed_state(); + ~psm_committed_state(); + + void on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine *m, pbft_database &pbft_db) override; + + void on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) override; + + void send_commit(psm_machine *m, pbft_database &pbft_db) override; + + void on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) override; + + void send_view_change(psm_machine *m, pbft_database &pbft_db) override; + + void on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) override; + + void manually_set_view(psm_machine *m, const uint32_t &view) override; + + bool pending_commit_local; + }; + + class psm_view_change_state final: public psm_state { + public: + void on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) override; + + void send_prepare(psm_machine *m, pbft_database &pbft_db) override; + + void on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) override; + + void send_commit(psm_machine *m, pbft_database &pbft_db) override; + + void on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) override; + + void send_view_change(psm_machine *m, pbft_database &pbft_db) override; + + void on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) override; + + void manually_set_view(psm_machine *m, const uint32_t &view) override; + }; + + struct pbft_config { + uint32_t view_change_timeout = 6; + bool bp_candidate = false; + }; + + class pbft_controller { + public: + pbft_controller(controller& ctrl); + ~pbft_controller(); + + pbft_database pbft_db; + psm_machine state_machine; + pbft_config config; + + void maybe_pbft_prepare(); + void maybe_pbft_commit(); + void maybe_pbft_view_change(); + void send_pbft_checkpoint(); + + void on_pbft_prepare(pbft_prepare &p); + void on_pbft_commit(pbft_commit &c); + void on_pbft_view_change(pbft_view_change &vc); + void on_pbft_new_view(pbft_new_view &nv); + void on_pbft_checkpoint(pbft_checkpoint &cp); + + private: + fc::path datadir; + + + }; + } +} /// namespace eosio::chain + +FC_REFLECT(eosio::chain::pbft_controller, (pbft_db)(state_machine)(config)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/pbft_database.hpp b/libraries/chain/include/eosio/chain/pbft_database.hpp new file mode 100644 index 00000000000..01c36132a6b --- /dev/null +++ b/libraries/chain/include/eosio/chain/pbft_database.hpp @@ -0,0 +1,650 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace eosio { + namespace chain { + using boost::multi_index_container; + using namespace boost::multi_index; + using namespace std; + using boost::uuids::uuid; + + + struct block_info { + block_id_type block_id; + block_num_type block_num = 0; + }; + + struct pbft_prepare { + string uuid; + uint32_t view; + block_num_type block_num = 0; + block_id_type block_id; + public_key_type public_key; + chain_id_type chain_id = chain_id_type(""); + signature_type producer_signature; + time_point timestamp = time_point::now(); + + + bool operator==(const pbft_prepare &rhs) const { + return view == rhs.view + && block_num == rhs.block_num + && block_id == rhs.block_id + && public_key == rhs.public_key + && chain_id == rhs.chain_id + && timestamp == rhs.timestamp; + } + + bool operator!=(const pbft_prepare &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_prepare &rhs) const { + if (block_num < rhs.block_num) { + return true; + } else return block_num == rhs.block_num && view < rhs.view; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, view); + fc::raw::pack(enc, block_num); + fc::raw::pack(enc, block_id); + fc::raw::pack(enc, public_key); + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, timestamp); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_commit { + string uuid; + uint32_t view; + block_num_type block_num = 0; + block_id_type block_id; + public_key_type public_key; + chain_id_type chain_id = chain_id_type(""); + signature_type producer_signature; + time_point timestamp = time_point::now(); + + + bool operator==(const pbft_commit &rhs) const { + return view == rhs.view + && block_num == rhs.block_num + && block_id == rhs.block_id + && public_key == rhs.public_key + && chain_id == rhs.chain_id + && timestamp == rhs.timestamp; + } + + bool operator!=(const pbft_commit &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_commit &rhs) const { + if (block_num < rhs.block_num) { + return true; + } else return block_num == rhs.block_num && view < rhs.view; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, view); + fc::raw::pack(enc, block_num); + fc::raw::pack(enc, block_id); + fc::raw::pack(enc, public_key); + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, timestamp); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_checkpoint { + string uuid; + block_num_type block_num = 0; + block_id_type block_id; + public_key_type public_key; + chain_id_type chain_id = chain_id_type(""); + signature_type producer_signature; + time_point timestamp = time_point::now(); + + bool operator==(const pbft_checkpoint &rhs) const { + return block_num == rhs.block_num + && block_id == rhs.block_id + && public_key == rhs.public_key + && chain_id == rhs.chain_id + && timestamp == rhs.timestamp; + + } + + bool operator!=(const pbft_checkpoint &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_checkpoint &rhs) const { + return block_num < rhs.block_num; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, block_num); + fc::raw::pack(enc, block_id); + fc::raw::pack(enc, public_key); + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, timestamp); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_stable_checkpoint { + block_num_type block_num = 0; + block_id_type block_id; + vector checkpoints; + chain_id_type chain_id = chain_id_type(""); + + bool operator==(const pbft_stable_checkpoint &rhs) const { + return block_id == rhs.block_id + && block_num == rhs.block_num + && checkpoints == rhs.checkpoints + && chain_id == rhs.chain_id; + } + + bool operator!=(const pbft_stable_checkpoint &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_stable_checkpoint &rhs) const { + return block_num < rhs.block_num; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, block_num); + fc::raw::pack(enc, block_id); + fc::raw::pack(enc, checkpoints); + fc::raw::pack(enc, chain_id); + return enc.result(); + } + }; + + struct pbft_prepared_certificate { + block_id_type block_id; + block_num_type block_num = 0; + vector prepares; + + public_key_type public_key; + signature_type producer_signature; + + bool operator==(const pbft_prepared_certificate &rhs) const { + return block_num == rhs.block_num + && block_id == rhs.block_id + && prepares == rhs.prepares + && public_key == rhs.public_key; + } + + bool operator!=(const pbft_prepared_certificate &rhs) const { + return !(*this == rhs); + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, block_id); + fc::raw::pack(enc, block_num); + fc::raw::pack(enc, prepares); + fc::raw::pack(enc, public_key); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_view_change { + string uuid; + uint32_t current_view; + uint32_t target_view; + pbft_prepared_certificate prepared; + pbft_stable_checkpoint stable_checkpoint; + public_key_type public_key; + chain_id_type chain_id = chain_id_type(""); + signature_type producer_signature; + time_point timestamp = time_point::now(); + + bool operator==(const pbft_view_change &rhs) const { + return current_view == rhs.current_view + && target_view == rhs.target_view + && prepared == rhs.prepared + && stable_checkpoint == rhs.stable_checkpoint + && public_key == rhs.public_key + && chain_id == rhs.chain_id + && timestamp == rhs.timestamp; + } + + bool operator!=(const pbft_view_change &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_view_change &rhs) const { + return target_view < rhs.target_view; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, current_view); + fc::raw::pack(enc, target_view); + fc::raw::pack(enc, prepared); + fc::raw::pack(enc, stable_checkpoint); + fc::raw::pack(enc, public_key); + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, timestamp); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_view_changed_certificate { + uint32_t view; + vector view_changes; + + public_key_type public_key; + signature_type producer_signature; + + bool operator==(const pbft_view_changed_certificate &rhs) const { + return view == rhs.view + && view_changes == rhs.view_changes + && public_key == rhs.public_key; + } + + bool operator!=(const pbft_view_changed_certificate &rhs) const { + return !(*this == rhs); + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, view); + fc::raw::pack(enc, view_changes); + fc::raw::pack(enc, public_key); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_new_view { + string uuid; + uint32_t view; + pbft_prepared_certificate prepared; + pbft_stable_checkpoint stable_checkpoint; + pbft_view_changed_certificate view_changed; + public_key_type public_key; + chain_id_type chain_id = chain_id_type(""); + signature_type producer_signature; + time_point timestamp = time_point::now(); + + bool operator==(const pbft_new_view &rhs) const { + return view == rhs.view + && prepared == rhs.prepared + && stable_checkpoint == rhs.stable_checkpoint + && view_changed == rhs.view_changed + && public_key == rhs.public_key + && chain_id == rhs.chain_id + && timestamp == rhs.timestamp; + } + + bool operator!=(const pbft_new_view &rhs) const { + return !(*this == rhs); + } + + bool operator<(const pbft_new_view &rhs) const { + return view < rhs.view; + } + + digest_type digest() const { + digest_type::encoder enc; + fc::raw::pack(enc, view); + fc::raw::pack(enc, prepared); + fc::raw::pack(enc, stable_checkpoint); + fc::raw::pack(enc, view_changed); + fc::raw::pack(enc, public_key); + fc::raw::pack(enc, chain_id); + fc::raw::pack(enc, timestamp); + return enc.result(); + } + + bool is_signature_valid() const { + try { + auto pk = crypto::public_key(producer_signature, digest(), true); + return public_key == pk; + } catch (fc::exception & /*e*/) { + return false; + } + } + }; + + struct pbft_state { + block_id_type block_id; + block_num_type block_num = 0; + vector prepares; + bool should_prepared = false; + vector commits; + bool should_committed = false; + }; + + struct pbft_view_state { + uint32_t view; + vector view_changes; + bool should_view_changed = false; + }; + + struct pbft_checkpoint_state { + block_id_type block_id; + block_num_type block_num = 0; + vector checkpoints; + bool is_stable = false; + }; + + using pbft_state_ptr = std::shared_ptr; + using pbft_view_state_ptr = std::shared_ptr; + using pbft_checkpoint_state_ptr = std::shared_ptr; + + struct by_block_id; + struct by_num; + struct by_prepare_and_num; + struct by_commit_and_num; + typedef multi_index_container< + pbft_state_ptr, + indexed_by< + hashed_unique< + tag, + member, + std::hash + >, + ordered_non_unique< + tag, + composite_key< + pbft_state, + member + >, + composite_key_compare> + >, + ordered_non_unique< + tag, + composite_key< + pbft_state, + member, + member + >, + composite_key_compare, greater<>> + >, + ordered_non_unique< + tag, + composite_key< + pbft_state, + member, + member + >, + composite_key_compare, greater<>> + > + > + > + pbft_state_multi_index_type; + + struct by_view; + struct by_count_and_view; + typedef multi_index_container< + pbft_view_state_ptr, + indexed_by< + hashed_unique< + tag, + member, + std::hash + >, + ordered_non_unique< + tag, + composite_key< + pbft_view_state, + member, + member + >, + composite_key_compare, greater<>> + > + > + > + pbft_view_state_multi_index_type; + + struct by_block_id; + struct by_num; + typedef multi_index_container< + pbft_checkpoint_state_ptr, + indexed_by< + hashed_unique< + tag, + member, + std::hash + >, + ordered_non_unique< + tag, + composite_key< + pbft_checkpoint_state, + member + >, + composite_key_compare> + > + > + > + pbft_checkpoint_state_multi_index_type; + + class pbft_database { + public: + explicit pbft_database(controller &ctrl); + + ~pbft_database(); + + void close(); + + bool should_prepared(); + + bool should_committed(); + + uint32_t should_view_change(); + + bool should_new_view(uint32_t target_view); + + bool is_new_primary(uint32_t target_view); + + uint32_t get_proposed_new_view_num(); + + void add_pbft_prepare(pbft_prepare &p); + + void add_pbft_commit(pbft_commit &c); + + void add_pbft_view_change(pbft_view_change &vc); + + void add_pbft_checkpoint(pbft_checkpoint &cp); + + vector send_and_add_pbft_prepare( + const vector &pv = vector{}, + uint32_t current_view = 0); + + vector send_and_add_pbft_commit( + const vector &cv = vector{}, + uint32_t current_view = 0); + + vector send_and_add_pbft_view_change( + const vector &vcv = vector{}, + const vector &ppc = vector{}, + uint32_t current_view = 0, + uint32_t new_view = 1); + + pbft_new_view send_pbft_new_view( + const vector &vcc = vector{}, + uint32_t current_view = 1); + + vector generate_and_add_pbft_checkpoint(); + + bool is_valid_prepare(const pbft_prepare &p); + + bool is_valid_commit(const pbft_commit &c); + + void commit_local(); + + bool pending_pbft_lib(); + + void prune_pbft_index(); + + uint32_t get_committed_view(); + + chain_id_type chain_id(); + + vector generate_prepared_certificate(); + + vector generate_view_changed_certificate(uint32_t target_view); + + pbft_stable_checkpoint get_stable_checkpoint_by_id(const block_id_type &block_id); + + pbft_stable_checkpoint fetch_stable_checkpoint_from_blk_extn(const signed_block_ptr &b); + + block_info cal_pending_stable_checkpoint() const; + + bool should_send_pbft_msg(); + + bool should_recv_pbft_msg(const public_key_type &pub_key); + + void send_pbft_checkpoint(); + + bool is_valid_checkpoint(const pbft_checkpoint &cp); + + bool is_valid_stable_checkpoint(const pbft_stable_checkpoint &scp); + + signal pbft_outgoing_prepare; + signal pbft_incoming_prepare; + + signal pbft_outgoing_commit; + signal pbft_incoming_commit; + + signal pbft_outgoing_view_change; + signal pbft_incoming_view_change; + + signal pbft_outgoing_new_view; + signal pbft_incoming_new_view; + + signal pbft_outgoing_checkpoint; + signal pbft_incoming_checkpoint; + + bool is_valid_view_change(const pbft_view_change &vc); + + bool is_valid_new_view(const pbft_new_view &nv); + + bool should_stop_view_change(const pbft_view_change &vc); + + block_num_type get_current_pbft_watermark(); + controller &ctrl; + private: + pbft_state_multi_index_type pbft_state_index; + pbft_view_state_multi_index_type view_state_index; + pbft_checkpoint_state_multi_index_type checkpoint_index; + fc::path pbft_db_dir; + fc::path checkpoints_dir; + boost::uuids::random_generator uuid_generator; + vector prepare_watermarks; + + bool is_valid_prepared_certificate(const pbft_prepared_certificate &certificate); + + public_key_type get_new_view_primary_key(uint32_t target_view); + + vector> fetch_fork_from(vector block_infos); + + vector fetch_first_fork_from(vector &bi); + + producer_schedule_type lib_active_producers() const; + + template + void emit(const Signal &s, Arg &&a); + + void set(pbft_state_ptr s); + + void set(pbft_checkpoint_state_ptr s); + + void prune(const pbft_state_ptr &h); + + }; + + } +} /// namespace eosio::chain + +FC_REFLECT(eosio::chain::block_info, (block_id)(block_num)) +FC_REFLECT(eosio::chain::pbft_prepare, + (uuid)(view)(block_num)(block_id)(public_key)(chain_id)(producer_signature)(timestamp)) +FC_REFLECT(eosio::chain::pbft_commit, + (uuid)(view)(block_num)(block_id)(public_key)(chain_id)(producer_signature)(timestamp)) +FC_REFLECT(eosio::chain::pbft_view_change, + (uuid)(current_view)(target_view)(prepared)(stable_checkpoint)(public_key)(chain_id)(producer_signature)( + timestamp)) +FC_REFLECT(eosio::chain::pbft_new_view, + (uuid)(view)(prepared)(stable_checkpoint)(view_changed)(public_key)(chain_id)(producer_signature)(timestamp)) +FC_REFLECT(eosio::chain::pbft_state, (block_id)(block_num)(prepares)(should_prepared)(commits)(should_committed)) +FC_REFLECT(eosio::chain::pbft_prepared_certificate, (block_id)(block_num)(prepares)(public_key)(producer_signature)) +FC_REFLECT(eosio::chain::pbft_view_changed_certificate, (view)(view_changes)(public_key)(producer_signature)) +FC_REFLECT(eosio::chain::pbft_checkpoint, + (uuid)(block_num)(block_id)(public_key)(chain_id)(producer_signature)(timestamp)) +FC_REFLECT(eosio::chain::pbft_stable_checkpoint, (block_num)(block_id)(checkpoints)(chain_id)) +FC_REFLECT(eosio::chain::pbft_checkpoint_state, (block_id)(block_num)(checkpoints)(is_stable)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 499fbe29960..70486b747f9 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -219,10 +220,53 @@ namespace eosio { namespace chain { T& data; }; + template + struct snapshot_pbft_migrate_row_reader : abstract_snapshot_row_reader { + explicit snapshot_pbft_migrate_row_reader( T& data ) + :data(data) {} + + + void provide(std::istream& in) const override { + row_validation_helper::apply(data, [&in,this](){ + if(typeid(T)== typeid(eosio::chain::block_header_state)){ + std::ostringstream sstream; + sstream << in.rdbuf(); + std::string str(sstream.str()); + //prepend uint32_t 0 + std::vector tmp(str.begin(), str.end()); + tmp.insert(tmp.begin(), {0,0,0,0}); + fc::datastream tmp_ds(tmp.data(), tmp.size()); + fc::raw::unpack(tmp_ds, data); + auto original_data_length = tmp_ds.tellp() - 4; + in.seekg(original_data_length); + }else{ + fc::raw::unpack(in, data); + } + }); + } + + void provide(const fc::variant& var) const override { + row_validation_helper::apply(data, [&var,this]() { + fc::from_variant(var, data); + }); + } + + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + + T& data; + }; + template snapshot_row_reader make_row_reader( T& data ) { return snapshot_row_reader(data); } + + template + snapshot_pbft_migrate_row_reader make_pbft_migrate_row_reader( T& data ) { + return snapshot_pbft_migrate_row_reader(data); + } } class snapshot_reader { @@ -249,6 +293,12 @@ namespace eosio { namespace chain { return result; } + template + auto read_pbft_migrate_row( T& out, chainbase::database& db ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + auto reader = detail::make_pbft_migrate_row_reader(out); + return _reader.read_row(reader); + } + bool empty() { return _reader.empty(); } diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index bddeb1dd553..f8360c0bc7d 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -189,6 +189,7 @@ namespace eosio { namespace chain { account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, + upgrade_property_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; diff --git a/libraries/chain/pbft.cpp b/libraries/chain/pbft.cpp new file mode 100644 index 00000000000..08c52087fd4 --- /dev/null +++ b/libraries/chain/pbft.cpp @@ -0,0 +1,631 @@ +#include +#include +#include + +namespace eosio { + namespace chain { + + pbft_controller::pbft_controller(controller &ctrl) : pbft_db(ctrl), state_machine(pbft_db) { + config.view_change_timeout = 6; + config.bp_candidate = true; + datadir = ctrl.state_dir(); + + if (!fc::is_directory(datadir)) + fc::create_directories(datadir); + + auto pbft_db_dat = datadir / config::pbftdb_filename; + if (fc::exists(pbft_db_dat)) { + string content; + fc::read_file_contents(pbft_db_dat, content); + + fc::datastream ds(content.data(), content.size()); + uint32_t current_view; + fc::raw::unpack(ds, current_view); + state_machine.set_current_view(current_view); + + state_machine.set_target_view(state_machine.get_current_view() + 1); + ilog("current view: ${cv}", ("cv", current_view)); + } + + fc::remove(pbft_db_dat); + } + + pbft_controller::~pbft_controller() { + fc::path pbft_db_dat = datadir / config::pbftdb_filename; + std::ofstream out(pbft_db_dat.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::trunc); + + uint32_t current_view = state_machine.get_current_view(); + fc::raw::pack(out, current_view); + } + + void pbft_controller::maybe_pbft_prepare() { + if (!pbft_db.should_send_pbft_msg()) return; + state_machine.send_prepare(); + } + + void pbft_controller::maybe_pbft_commit() { + if (!pbft_db.should_send_pbft_msg()) return; + state_machine.send_commit(); + } + + void pbft_controller::maybe_pbft_view_change() { + if (!pbft_db.should_send_pbft_msg()) return; + if (state_machine.get_view_change_timer() <= config.view_change_timeout) { + if (!state_machine.get_view_changes_cache().empty()) { + pbft_db.send_and_add_pbft_view_change(state_machine.get_view_changes_cache()); + } + state_machine.set_view_change_timer(state_machine.get_view_change_timer() + 1); + } else { + state_machine.set_view_change_timer(0); + state_machine.send_view_change(); + } + } + + void pbft_controller::on_pbft_prepare(pbft_prepare &p) { + if (!config.bp_candidate) return; + state_machine.on_prepare(p); + } + + void pbft_controller::on_pbft_commit(pbft_commit &c) { + if (!config.bp_candidate) return; + state_machine.on_commit(c); + } + + void pbft_controller::on_pbft_view_change(pbft_view_change &vc) { + if (!config.bp_candidate) return; + state_machine.on_view_change(vc); + } + + void pbft_controller::on_pbft_new_view(pbft_new_view &nv) { + if (!config.bp_candidate) return; + state_machine.on_new_view(nv); + } + + void pbft_controller::send_pbft_checkpoint() { + if (!pbft_db.should_send_pbft_msg()) return; + pbft_db.send_pbft_checkpoint(); + } + + void pbft_controller::on_pbft_checkpoint(pbft_checkpoint &cp) { + pbft_db.add_pbft_checkpoint(cp); + } + + psm_state::psm_state() = default; + + psm_state::~psm_state() = default; + + + psm_machine::psm_machine(pbft_database &pbft_db) : pbft_db(pbft_db) { + this->set_current(new psm_committed_state); + + this->set_prepares_cache(vector{}); + this->set_commits_cache(vector{}); + this->set_view_changes_cache(vector{}); + + this->set_prepared_certificate(vector{}); + this->set_view_changed_certificate(vector{}); + + this->view_change_timer = 0; + this->target_view_retries = 0; + this->current_view = 0; + this->target_view = this->current_view + 1; + } + + psm_machine::~psm_machine() = default; + + void psm_machine::on_prepare(pbft_prepare &e) { + current->on_prepare(this, e, pbft_db); + } + + void psm_machine::send_prepare() { + current->send_prepare(this, pbft_db); + } + + void psm_machine::on_commit(pbft_commit &e) { + current->on_commit(this, e, pbft_db); + } + + void psm_machine::send_commit() { + current->send_commit(this, pbft_db); + } + + void psm_machine::on_view_change(pbft_view_change &e) { + current->on_view_change(this, e, pbft_db); + } + + void psm_machine::send_view_change() { + current->send_view_change(this, pbft_db); + } + + void psm_machine::on_new_view(pbft_new_view &e) { + current->on_new_view(this, e, pbft_db); + } + + void psm_machine::manually_set_current_view(const uint32_t ¤t_view) { + current->manually_set_view(this, current_view); + } + + /** + * psm_prepared_state + */ + + psm_prepared_state::psm_prepared_state() { + pending_commit_local = false; + } + + psm_prepared_state::~psm_prepared_state() = default; + + void psm_prepared_state::on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) { + //ignore + } + + void psm_prepared_state::send_prepare(psm_machine *m, pbft_database &pbft_db) { + //retry + if (m->get_prepares_cache().empty()) return; + + pbft_db.send_and_add_pbft_prepare(m->get_prepares_cache(), m->get_current_view()); + } + + void psm_prepared_state::on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) { + + if (e.view < m->get_current_view()) return; + + pbft_db.add_pbft_commit(e); + + //`pending_commit_local` is used to mark committed local status in psm machine; + //`pbft_db.pending_pbft_lib()` is used to mark commit local status in controller; + // following logic is implemented to resolve async problem during lib committing; + + if (pbft_db.should_committed() && !pending_commit_local) { + pbft_db.commit_local(); + pending_commit_local = true; + } + + if (pending_commit_local && !pbft_db.pending_pbft_lib()) { + pbft_db.send_pbft_checkpoint(); + m->transit_to_committed_state(this, false); + } + } + + + void psm_prepared_state::send_commit(psm_machine *m, pbft_database &pbft_db) { + auto commits = pbft_db.send_and_add_pbft_commit(m->get_commits_cache(), m->get_current_view()); + + if (!commits.empty()) { + m->set_commits_cache(commits); + } + + if (pbft_db.should_committed() && !pending_commit_local) { + pbft_db.commit_local(); + pending_commit_local = true; + } + + if (pending_commit_local && !pbft_db.pending_pbft_lib()) { + pbft_db.send_pbft_checkpoint(); + m->transit_to_committed_state(this, false); + } + } + + void psm_prepared_state::on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) { + + if (e.target_view <= m->get_current_view()) return; + + pbft_db.add_pbft_view_change(e); + + //if received >= f+1 view_change on some view, transit to view_change and send view change + auto target_view = pbft_db.should_view_change(); + if (target_view > 0 && target_view > m->get_current_view()) { + m->set_target_view(target_view); + m->transit_to_view_change_state(this); + } + } + + void psm_prepared_state::send_view_change(psm_machine *m, pbft_database &pbft_db) { + m->transit_to_view_change_state(this); + } + + void psm_prepared_state::on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) { + + if (e.view <= m->get_current_view()) return; + + try { + m->transit_to_new_view(e, this); + } catch(const fc::exception& ex) { + wlog("bad new view, ${s} ", ("s",ex.to_string())); + } + } + + void psm_prepared_state::manually_set_view(psm_machine *m, const uint32_t ¤t_view) { + m->set_current_view(current_view); + m->set_target_view(current_view+1); + m->transit_to_view_change_state(this); + } + + psm_committed_state::psm_committed_state() { + pending_commit_local = false; + } + + psm_committed_state::~psm_committed_state() = default; + + /** + * psm_committed_state + */ + void psm_committed_state::on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) { + //validate + if (e.view < m->get_current_view()) return; + + //do action add prepare + pbft_db.add_pbft_prepare(e); + + //if prepare >= 2f+1, transit to prepared + if (pbft_db.should_prepared()) m->transit_to_prepared_state(this); + } + + void psm_committed_state::send_prepare(psm_machine *m, pbft_database &pbft_db) { + + auto prepares = pbft_db.send_and_add_pbft_prepare(m->get_prepares_cache(), m->get_current_view()); + + if (!prepares.empty()) { + m->set_prepares_cache(prepares); + } + + //if prepare >= 2f+1, transit to prepared + if (pbft_db.should_prepared()) m->transit_to_prepared_state(this); + } + + void psm_committed_state::on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) { + + if (e.view < m->get_current_view()) return; + + pbft_db.add_pbft_commit(e); + } + + void psm_committed_state::send_commit(psm_machine *m, pbft_database &pbft_db) { + + if (m->get_commits_cache().empty()) return; + pbft_db.send_and_add_pbft_commit(m->get_commits_cache(), m->get_current_view()); + + } + + void psm_committed_state::on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) { + + if (e.target_view <= m->get_current_view()) return; + + pbft_db.add_pbft_view_change(e); + + //if received >= f+1 view_change on some view, transit to view_change and send view change + auto new_view = pbft_db.should_view_change(); + if (new_view > 0 && new_view > m->get_current_view()) { + m->set_target_view(new_view); + m->transit_to_view_change_state(this); + } + } + + void psm_committed_state::send_view_change(psm_machine *m, pbft_database &pbft_db) { + m->transit_to_view_change_state(this); + } + + void psm_committed_state::on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) { + + if (e.view <= m->get_current_view()) return; + + try { + m->transit_to_new_view(e, this); + } catch(const fc::exception& ex) { + wlog("bad new view, ${s} ", ("s",ex.to_string())); + } + } + + void psm_committed_state::manually_set_view(psm_machine *m, const uint32_t ¤t_view) { + m->set_current_view(current_view); + m->set_target_view(current_view+1); + m->transit_to_view_change_state(this); + } + + /** + * psm_view_change_state + */ + void psm_view_change_state::on_prepare(psm_machine *m, pbft_prepare &e, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::send_prepare(psm_machine *m, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::on_commit(psm_machine *m, pbft_commit &e, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::send_commit(psm_machine *m, pbft_database &pbft_db) { + //ignore; + } + + void psm_view_change_state::on_view_change(psm_machine *m, pbft_view_change &e, pbft_database &pbft_db) { + + //skip from view change state if my lib is higher than my view change state height. + auto vc = m->get_view_changes_cache(); + if (!vc.empty() && pbft_db.should_stop_view_change(vc.front())) { + m->transit_to_committed_state(this, false); + return; + } + + if (e.target_view <= m->get_current_view()) return; + + pbft_db.add_pbft_view_change(e); + + //if view_change >= 2f+1, calculate next primary, send new view if is primary + auto nv = m->get_target_view(); + if (pbft_db.should_new_view(nv) && pbft_db.is_new_primary(nv)) { + + m->set_view_changed_certificate(pbft_db.generate_view_changed_certificate(nv)); + + auto new_view = pbft_db.get_proposed_new_view_num(); + if (new_view != nv) return; + + auto nv_msg = pbft_db.send_pbft_new_view( + m->get_view_changed_certificate(), + new_view); + + if (nv_msg == pbft_new_view{}) return; + + try { + m->transit_to_new_view(nv_msg, this); + } catch(const fc::exception& ex) { + wlog("bad new view, ${s} ", ("s",ex.to_string())); + } + return; + } + } + + void psm_view_change_state::send_view_change(psm_machine *m, pbft_database &pbft_db) { + + //skip from view change state if my lib is higher than my view change state height. + auto vc = m->get_view_changes_cache(); + if (!vc.empty() && pbft_db.should_stop_view_change(vc.front())) { + m->transit_to_committed_state(this, false); + return; + } + + m->send_pbft_view_change(); + + //if view_change >= 2f+1, calculate next primary, send new view if is primary + auto nv = m->get_target_view(); + if (pbft_db.should_new_view(nv) && pbft_db.is_new_primary(nv)) { + + m->set_view_changed_certificate(pbft_db.generate_view_changed_certificate(nv)); + + auto new_view = pbft_db.get_proposed_new_view_num(); + if (new_view != nv) return; + + auto nv_msg = pbft_db.send_pbft_new_view( + m->get_view_changed_certificate(), + new_view); + + if (nv_msg == pbft_new_view{}) return; + + try { + m->transit_to_new_view(nv_msg, this); + } catch(const fc::exception& ex) { + wlog("bad new view, ${s} ", ("s",ex.to_string())); + } + return; + } + } + + + void psm_view_change_state::on_new_view(psm_machine *m, pbft_new_view &e, pbft_database &pbft_db) { + + if (e.view <= m->get_current_view()) return; + + try { + m->transit_to_new_view(e, this); + } catch(const fc::exception& ex) { + wlog("bad new view, ${s} ", ("s",ex.to_string())); + } + } + + void psm_view_change_state::manually_set_view(psm_machine *m, const uint32_t ¤t_view) { + m->set_current_view(current_view); + m->set_target_view(current_view+1); + m->transit_to_view_change_state(this); + } + + template + void psm_machine::transit_to_committed_state(T const & s, bool to_new_view) { + + if (!to_new_view) { + auto nv = pbft_db.get_committed_view(); + if (nv > this->get_current_view()) this->set_current_view(nv); + this->set_target_view(this->get_current_view() + 1); + } + + auto prepares = this->pbft_db.send_and_add_pbft_prepare(vector{}, this->get_current_view()); + set_prepares_cache(prepares); + + this->set_view_changes_cache(vector{}); + this->set_view_change_timer(0); + + this->set_current(new psm_committed_state); + delete s; + } + + template + void psm_machine::transit_to_prepared_state(T const & s) { + + auto commits = this->pbft_db.send_and_add_pbft_commit(vector{}, this->get_current_view()); + set_commits_cache(commits); + + this->set_view_changes_cache(vector{}); + + this->set_current(new psm_prepared_state); + delete s; + } + + template + void psm_machine::transit_to_view_change_state(T const &s) { + + this->set_commits_cache(vector{}); + this->set_prepares_cache(vector{}); + + this->set_view_change_timer(0); + this->set_target_view_retries(0); + + this->set_current(new psm_view_change_state); + if (pbft_db.should_send_pbft_msg()) this->send_pbft_view_change(); + + delete s; + } + + template + void psm_machine::transit_to_new_view(const pbft_new_view &new_view, T const &s) { + + auto valid_nv = false; + try { + valid_nv = pbft_db.is_valid_new_view(new_view); + } catch (const fc::exception& ex) { + throw; + } + EOS_ASSERT(valid_nv, pbft_exception, "new view is not valid, waiting for next round.."); + + this->set_current_view(new_view.view); + this->set_target_view(new_view.view + 1); + + this->set_prepares_cache(vector{}); + + this->set_view_change_timer(0); + this->set_target_view_retries(0); + + this->pbft_db.prune_pbft_index(); + + if (!(new_view.stable_checkpoint == pbft_stable_checkpoint{})) { + for (auto cp :new_view.stable_checkpoint.checkpoints) { + try { + pbft_db.add_pbft_checkpoint(cp); + } catch (...) { + wlog( "checkpoint insertion failure: ${cp}", ("cp", cp)); + } + } + } + + if (!new_view.prepared.prepares.empty()) { + for (auto p: new_view.prepared.prepares) { + try { + pbft_db.add_pbft_prepare(p); + } catch (...) { + wlog("prepare insertion failure: ${p}", ("p", p)); + } + } + if (pbft_db.should_prepared()) { + transit_to_prepared_state(s); + return; + } + } + + transit_to_committed_state(s, true); + } + + void psm_machine::send_pbft_view_change() { + + if (this->get_target_view_retries() == 0) { + this->set_view_changes_cache(vector{}); + this->set_prepared_certificate(pbft_db.generate_prepared_certificate()); + } + + EOS_ASSERT((this->get_target_view() > this->get_current_view()), pbft_exception, + "target view should be always greater than current view"); + + if (this->get_target_view_retries() < pow(2, this->get_target_view() - this->get_current_view() - 1)) { + this->set_target_view_retries(this->get_target_view_retries() + 1); + } else { + this->set_target_view_retries(0); + this->set_target_view(this->get_target_view() + 1); + this->set_view_changes_cache(vector{}); + } + + auto view_changes = pbft_db.send_and_add_pbft_view_change( + this->get_view_changes_cache(), + this->get_prepared_certificate(), + this->get_current_view(), + this->get_target_view()); + + if (!view_changes.empty()) { + this->set_view_changes_cache(view_changes); + } + } + + const vector &psm_machine::get_prepares_cache() const { + return this->cache.prepares_cache; + } + + void psm_machine::set_prepares_cache(const vector &prepares_cache) { + this->cache.prepares_cache = prepares_cache; + } + + const vector &psm_machine::get_commits_cache() const { + return this->cache.commits_cache; + } + + void psm_machine::set_commits_cache(const vector &commits_cache) { + this->cache.commits_cache = commits_cache; + } + + const vector &psm_machine::get_view_changes_cache() const { + return this->cache.view_changes_cache; + } + + void psm_machine::set_view_changes_cache(const vector &view_changes_cache) { + this->cache.view_changes_cache = view_changes_cache; + } + + const uint32_t &psm_machine::get_current_view() const { + return this->current_view; + } + + void psm_machine::set_current_view(const uint32_t ¤t_view) { + this->current_view = current_view; + } + + const vector &psm_machine::get_prepared_certificate() const { + return this->cache.prepared_certificate; + } + + void psm_machine::set_prepared_certificate(const vector &prepared_certificate) { + this->cache.prepared_certificate = prepared_certificate; + } + + const vector &psm_machine::get_view_changed_certificate() const { + return this->cache.view_changed_certificate; + } + + void psm_machine::set_view_changed_certificate( + const vector &view_changed_certificate) { + this->cache.view_changed_certificate = view_changed_certificate; + } + + const uint32_t &psm_machine::get_target_view_retries() const { + return this->target_view_retries; + } + + void psm_machine::set_target_view_retries(const uint32_t &target_view_reties) { + this->target_view_retries = target_view_reties; + } + + const uint32_t &psm_machine::get_target_view() const { + return this->target_view; + } + + void psm_machine::set_target_view(const uint32_t &target_view) { + this->target_view = target_view; + } + + const uint32_t &psm_machine::get_view_change_timer() const { + return this->view_change_timer; + } + + void psm_machine::set_view_change_timer(const uint32_t &view_change_timer) { + this->view_change_timer = view_change_timer; + } + } +} \ No newline at end of file diff --git a/libraries/chain/pbft_database.cpp b/libraries/chain/pbft_database.cpp new file mode 100644 index 00000000000..6d6f7b4e575 --- /dev/null +++ b/libraries/chain/pbft_database.cpp @@ -0,0 +1,1339 @@ +#include +#include +#include +#include + +namespace eosio { + namespace chain { + + pbft_database::pbft_database(controller &ctrl) : + ctrl(ctrl) { + checkpoint_index = pbft_checkpoint_state_multi_index_type{}; + view_state_index = pbft_view_state_multi_index_type{}; + prepare_watermarks = vector{}; + pbft_db_dir = ctrl.state_dir(); + checkpoints_dir = ctrl.blocks_dir(); + + if (!fc::is_directory(pbft_db_dir)) fc::create_directories(pbft_db_dir); + + auto pbft_db_dat = pbft_db_dir / config::pbftdb_filename; + if (fc::exists(pbft_db_dat)) { + string content; + fc::read_file_contents(pbft_db_dat, content); + + fc::datastream ds(content.data(), content.size()); + + // keep these unused variables. + uint32_t current_view; + fc::raw::unpack(ds, current_view); + + unsigned_int size; + fc::raw::unpack(ds, size); + for (uint32_t i = 0, n = size.value; i < n; ++i) { + pbft_state s; + fc::raw::unpack(ds, s); + set(std::make_shared(move(s))); + } + + unsigned_int watermarks_size; + fc::raw::unpack(ds, watermarks_size); + for (uint32_t i = 0, n = watermarks_size.value; i < n; ++i) { + block_num_type h; + fc::raw::unpack(ds, h); + prepare_watermarks.emplace_back(h); + } + sort(prepare_watermarks.begin(), prepare_watermarks.end()); + + } else { + pbft_state_index = pbft_state_multi_index_type{}; + } + + if (!fc::is_directory(checkpoints_dir)) fc::create_directories(checkpoints_dir); + + auto checkpoints_db = checkpoints_dir / config::checkpoints_filename; + if (fc::exists(checkpoints_db)) { + string content; + fc::read_file_contents(checkpoints_db, content); + + fc::datastream ds(content.data(), content.size()); + + unsigned_int checkpoint_size; + fc::raw::unpack(ds, checkpoint_size); + for (uint32_t j = 0, m = checkpoint_size.value; j < m; ++j) { + pbft_checkpoint_state cs; + fc::raw::unpack(ds, cs); + set(std::make_shared(move(cs))); + } + ilog("checkpoint index size: ${cs}", ("cs", checkpoint_index.size())); + } else { + checkpoint_index = pbft_checkpoint_state_multi_index_type{}; + } + } + + void pbft_database::close() { + + + fc::path checkpoints_db = checkpoints_dir / config::checkpoints_filename; + std::ofstream c_out(checkpoints_db.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::trunc); + + uint32_t num_records_in_checkpoint_db = checkpoint_index.size(); + fc::raw::pack(c_out, unsigned_int{num_records_in_checkpoint_db}); + + for (const auto &s: checkpoint_index) { + fc::raw::pack(c_out, *s); + } + + fc::path pbft_db_dat = pbft_db_dir / config::pbftdb_filename; + std::ofstream out(pbft_db_dat.generic_string().c_str(), + std::ios::out | std::ios::binary | std::ofstream::app); + uint32_t num_records_in_db = pbft_state_index.size(); + fc::raw::pack(out, unsigned_int{num_records_in_db}); + + for (const auto &s : pbft_state_index) { + fc::raw::pack(out, *s); + } + + + uint32_t watermarks_size = prepare_watermarks.size(); + fc::raw::pack(out, unsigned_int{watermarks_size}); + + for (const auto &n: prepare_watermarks) { + fc::raw::pack(out, n); + } + + pbft_state_index.clear(); + checkpoint_index.clear(); + prepare_watermarks.clear(); + } + + pbft_database::~pbft_database() { + close(); + } + + + void pbft_database::add_pbft_prepare(pbft_prepare &p) { + + if (!is_valid_prepare(p)) return; + + auto &by_block_id_index = pbft_state_index.get(); + + auto current = ctrl.fetch_block_state_by_id(p.block_id); + + while ((current) && (current->block_num > ctrl.last_irreversible_block_num())) { + auto curr_itr = by_block_id_index.find(current->id); + + if (curr_itr == by_block_id_index.end()) { + try { + auto curr_ps = pbft_state{current->id, current->block_num, {p}}; + auto curr_psp = make_shared(curr_ps); + pbft_state_index.insert(curr_psp); + } catch (...) { + wlog( "prepare insert failure: ${p}", ("p", p)); + } + } else { + auto prepares = (*curr_itr)->prepares; + auto p_itr = find_if(prepares.begin(), prepares.end(), + [&](const pbft_prepare &prep) { + return prep.public_key == p.public_key && prep.view == p.view; + }); + if (p_itr == prepares.end()) { + by_block_id_index.modify(curr_itr, [&](const pbft_state_ptr &psp) { + psp->prepares.emplace_back(p); + std::sort(psp->prepares.begin(), psp->prepares.end(), less<>()); + }); + } + } + curr_itr = by_block_id_index.find(current->id); + if (curr_itr == by_block_id_index.end()) return; + + auto prepares = (*curr_itr)->prepares; + auto as = current->active_schedule.producers; + flat_map prepare_count; + for (const auto &pre: prepares) { + if (prepare_count.find(pre.view) == prepare_count.end()) prepare_count[pre.view] = 0; + } + + if (!(*curr_itr)->should_prepared) { + for (auto const &sp: as) { + for (auto const &pp: prepares) { + if (sp.block_signing_key == pp.public_key) prepare_count[pp.view] += 1; + } + } + for (auto const &e: prepare_count) { + if (e.second >= as.size() * 2 / 3 + 1) { + by_block_id_index.modify(curr_itr, + [&](const pbft_state_ptr &psp) { psp->should_prepared = true; }); + } + } + } + current = ctrl.fetch_block_state_by_id(current->prev()); + } + } + + + vector pbft_database::send_and_add_pbft_prepare(const vector &pv, uint32_t current_view) { + + auto head_block_num = ctrl.head_block_num(); + if (head_block_num <= 1) return vector{}; + auto my_prepare = ctrl.get_pbft_my_prepare(); + + auto reserve_prepare = [&](const block_id_type &in) { + if (in == block_id_type{} || !ctrl.fetch_block_state_by_id(in)) return false; + auto lib = ctrl.last_irreversible_block_id(); + if (lib == block_id_type{}) return true; + auto forks = ctrl.fork_db().fetch_branch_from(in, lib); + return !forks.first.empty() && forks.second.empty(); + }; + + vector new_pv; + if (!pv.empty()) { + for (auto p : pv) { + //change uuid, sign again, update cache, then emit + auto uuid = boost::uuids::to_string(uuid_generator()); + p.uuid = uuid; + p.timestamp = time_point::now(); + p.producer_signature = ctrl.my_signature_providers()[p.public_key](p.digest()); + emit(pbft_outgoing_prepare, p); + } + return vector{}; + } else if (reserve_prepare(my_prepare)) { + for (auto const &sp : ctrl.my_signature_providers()) { + auto uuid = boost::uuids::to_string(uuid_generator()); + auto my_prepare_num = ctrl.fetch_block_state_by_id(my_prepare)->block_num; + auto p = pbft_prepare{uuid, current_view, my_prepare_num, my_prepare, sp.first, chain_id()}; + p.producer_signature = sp.second(p.digest()); + emit(pbft_outgoing_prepare, p); + new_pv.emplace_back(p); + } + return new_pv; + } else { + + auto current_watermark = get_current_pbft_watermark(); + auto lib = ctrl.last_irreversible_block_num(); + + uint32_t high_water_mark_block_num = head_block_num; + + if ( current_watermark > 0 ) { + high_water_mark_block_num = std::min(head_block_num, current_watermark); + } + + if (high_water_mark_block_num <= lib) return vector{}; + + block_id_type high_water_mark_block_id = ctrl.get_block_id_for_num(high_water_mark_block_num); + for (auto const &sp : ctrl.my_signature_providers()) { + auto uuid = boost::uuids::to_string(uuid_generator()); + auto p = pbft_prepare{uuid, current_view, high_water_mark_block_num, high_water_mark_block_id, + sp.first, chain_id()}; + p.producer_signature = sp.second(p.digest()); + add_pbft_prepare(p); + emit(pbft_outgoing_prepare, p); + new_pv.emplace_back(p); + ctrl.set_pbft_my_prepare(high_water_mark_block_id); + } + return new_pv; + } + } + + bool pbft_database::should_prepared() { + + const auto &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) return false; + + pbft_state_ptr psp = *itr; + auto current_watermark = get_current_pbft_watermark(); + + if (psp->block_num > current_watermark && current_watermark > 0) return false; + + if (psp->should_prepared && (psp->block_num > ctrl.last_irreversible_block_num())) { + ctrl.set_pbft_prepared((*itr)->block_id); + return true; + } + return false; + } + + bool pbft_database::is_valid_prepare(const pbft_prepare &p) { + if (p.chain_id != chain_id()) return false; + // a prepare msg under lscb (which is no longer in fork_db), can be treated as null, thus true. + if (p.block_num <= ctrl.last_stable_checkpoint_block_num()) return true; + if (!p.is_signature_valid()) return false; + return should_recv_pbft_msg(p.public_key); + } + + void pbft_database::add_pbft_commit(pbft_commit &c) { + + if (!is_valid_commit(c)) return; + auto &by_block_id_index = pbft_state_index.get(); + + auto current = ctrl.fetch_block_state_by_id(c.block_id); + + while ((current) && (current->block_num > ctrl.last_irreversible_block_num())) { + + auto curr_itr = by_block_id_index.find(current->id); + + if (curr_itr == by_block_id_index.end()) { + try { + auto curr_ps = pbft_state{current->id, current->block_num, .commits={c}}; + auto curr_psp = make_shared(curr_ps); + pbft_state_index.insert(curr_psp); + } catch (...) { + wlog("commit insertion failure: ${c}", ("c", c)); + } + } else { + auto commits = (*curr_itr)->commits; + auto p_itr = find_if(commits.begin(), commits.end(), + [&](const pbft_commit &comm) { + return comm.public_key == c.public_key && comm.view == c.view; + }); + if (p_itr == commits.end()) { + by_block_id_index.modify(curr_itr, [&](const pbft_state_ptr &psp) { + psp->commits.emplace_back(c); + std::sort(psp->commits.begin(), psp->commits.end(), less<>()); + }); + } + } + + curr_itr = by_block_id_index.find(current->id); + if (curr_itr == by_block_id_index.end()) return; + + auto commits = (*curr_itr)->commits; + + auto as = current->active_schedule; + flat_map commit_count; + for (const auto &com: commits) { + if (commit_count.find(com.view) == commit_count.end()) commit_count[com.view] = 0; + } + + if (!(*curr_itr)->should_committed) { + + for (auto const &sp: as.producers) { + for (auto const &pc: commits) { + if (sp.block_signing_key == pc.public_key) commit_count[pc.view] += 1; + } + } + + for (auto const &e: commit_count) { + if (e.second >= current->active_schedule.producers.size() * 2 / 3 + 1) { + by_block_id_index.modify(curr_itr, + [&](const pbft_state_ptr &psp) { psp->should_committed = true; }); + } + } + } + current = ctrl.fetch_block_state_by_id(current->prev()); + } + } + + vector pbft_database::send_and_add_pbft_commit(const vector &cv, uint32_t current_view) { + if (!cv.empty()) { + for (auto c : cv) { + //change uuid, sign again, update cache, then emit + auto uuid = boost::uuids::to_string(uuid_generator()); + c.uuid = uuid; + c.timestamp = time_point::now(); + c.producer_signature = ctrl.my_signature_providers()[c.public_key](c.digest()); + emit(pbft_outgoing_commit, c); + } + return vector{}; + } else { + const auto &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) { + return vector{}; + } + vector new_cv; + pbft_state_ptr psp = *itr; + auto bs = ctrl.fork_db().get_block(psp->block_id); + + if (psp->should_prepared && (psp->block_num > ctrl.last_irreversible_block_num())) { + + for (auto const &sp : ctrl.my_signature_providers()) { + auto uuid = boost::uuids::to_string(uuid_generator()); + auto c = pbft_commit{uuid, current_view, psp->block_num, psp->block_id, sp.first, chain_id()}; + c.producer_signature = sp.second(c.digest()); + add_pbft_commit(c); + emit(pbft_outgoing_commit, c); + new_cv.emplace_back(c); + } + } + return new_cv; + } + } + + bool pbft_database::should_committed() { + const auto &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end()) return false; + pbft_state_ptr psp = *itr; + + auto current_watermark = get_current_pbft_watermark(); + + if (psp->block_num > current_watermark && current_watermark > 0) return false; + + return (psp->should_committed && (psp->block_num > ctrl.last_irreversible_block_num())); + } + + uint32_t pbft_database::get_committed_view() { + uint32_t new_view = 0; + if (!should_committed()) return new_view; + + const auto &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + pbft_state_ptr psp = *itr; + + auto blk_state = ctrl.fetch_block_state_by_id((*itr)->block_id); + if (!blk_state) return new_view; + auto as = blk_state->active_schedule.producers; + + auto commits = (*itr)->commits; + + flat_map commit_count; + for (const auto &com: commits) { + if (commit_count.find(com.view) == commit_count.end()) { + commit_count[com.view] = 1; + } else { + commit_count[com.view] += 1; + } + } + + for (auto const &e: commit_count) { + if (e.second >= as.size() * 2 / 3 + 1 && e.first > new_view) { + new_view = e.first; + } + } + return new_view; + } + + bool pbft_database::is_valid_commit(const pbft_commit &c) { + if (c.chain_id != chain_id()) return false; + if (c.block_num <= ctrl.last_stable_checkpoint_block_num()) return true; + if (!c.is_signature_valid()) return false; + return should_recv_pbft_msg(c.public_key); + } + + void pbft_database::commit_local() { + const auto &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end()) return; + + pbft_state_ptr psp = *itr; + + ctrl.pbft_commit_local(psp->block_id); + } + + bool pbft_database::pending_pbft_lib() { + return ctrl.pending_pbft_lib(); + } + + void pbft_database::add_pbft_view_change(pbft_view_change &vc) { + if (!is_valid_view_change(vc)) return; + auto active_bps = lib_active_producers().producers; + + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(vc.target_view); + if (itr == by_view_index.end()) { + auto vs = pbft_view_state{vc.target_view, .view_changes={vc}}; + auto vsp = make_shared(vs); + view_state_index.insert(vsp); + } else { + auto pvs = (*itr); + auto view_changes = pvs->view_changes; + auto p_itr = find_if(view_changes.begin(), view_changes.end(), + [&](const pbft_view_change &existed) { + return existed.public_key == vc.public_key; + }); + if (p_itr == view_changes.end()) { + by_view_index.modify(itr, [&](const pbft_view_state_ptr &pvsp) { + pvsp->view_changes.emplace_back(vc); + }); + } + } + + itr = by_view_index.find(vc.target_view); + if (itr == by_view_index.end()) return; + + auto vc_count = 0; + if (!(*itr)->should_view_changed) { + for (auto const &sp: active_bps) { + for (auto const &v: (*itr)->view_changes) { + if (sp.block_signing_key == v.public_key) vc_count += 1; + } + } + if (vc_count >= active_bps.size() * 2 / 3 + 1) { + by_view_index.modify(itr, [&](const pbft_view_state_ptr &pvsp) { pvsp->should_view_changed = true; }); + } + } + } + + uint32_t pbft_database::should_view_change() { + uint32_t nv = 0; + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.begin(); + if (itr == by_view_index.end()) return nv; + + while (itr != by_view_index.end()) { + auto active_bps = lib_active_producers().producers; + auto vc_count = 0; + auto pvs = (*itr); + + for (auto const &bp: active_bps) { + for (auto const &pp: pvs->view_changes) { + if (bp.block_signing_key == pp.public_key) vc_count += 1; + } + } + //if contains self or view_change >= f+1, transit to view_change and send view change + if (vc_count >= active_bps.size() / 3 + 1) { + nv = pvs->view; + break; + } + ++itr; + } + return nv; + } + + vector pbft_database::send_and_add_pbft_view_change( + const vector &vcv, + const vector &ppc, + uint32_t current_view, + uint32_t new_view) { + if (!vcv.empty()) { + for (auto vc : vcv) { + //change uuid, sign again, update cache, then emit + auto uuid = boost::uuids::to_string(uuid_generator()); + vc.uuid = uuid; + vc.timestamp = time_point::now(); + vc.producer_signature = ctrl.my_signature_providers()[vc.public_key](vc.digest()); + emit(pbft_outgoing_view_change, vc); + } + return vector{}; + } else { + vector new_vcv; + + for (auto const &my_sp : ctrl.my_signature_providers()) { + auto ppc_ptr = find_if(ppc.begin(), ppc.end(), + [&](const pbft_prepared_certificate &v) { + return v.public_key == my_sp.first; + }); + + auto my_ppc = pbft_prepared_certificate{}; + if (ppc_ptr != ppc.end()) my_ppc = *ppc_ptr; + auto my_lsc = get_stable_checkpoint_by_id(ctrl.last_stable_checkpoint_block_id()); + auto uuid = boost::uuids::to_string(uuid_generator()); + auto vc = pbft_view_change{uuid, current_view, new_view, my_ppc, my_lsc, my_sp.first, chain_id()}; + vc.producer_signature = my_sp.second(vc.digest()); + emit(pbft_outgoing_view_change, vc); + add_pbft_view_change(vc); + new_vcv.emplace_back(vc); + } + return new_vcv; + } + } + + bool pbft_database::should_new_view(const uint32_t target_view) { + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(target_view); + if (itr == by_view_index.end()) return false; + return (*itr)->should_view_changed; + } + + uint32_t pbft_database::get_proposed_new_view_num() { + auto &by_count_and_view_index = view_state_index.get(); + auto itr = by_count_and_view_index.begin(); + if (itr == by_count_and_view_index.end() || !(*itr)->should_view_changed) return 0; + return (*itr)->view; + } + + bool pbft_database::is_new_primary(const uint32_t target_view) { + + auto primary_key = get_new_view_primary_key(target_view); + if (primary_key == public_key_type{}) return false; + auto sps = ctrl.my_signature_providers(); + auto sp_itr = sps.find(primary_key); + return sp_itr != sps.end(); + } + + void pbft_database::prune_pbft_index() { + pbft_state_index.clear(); + view_state_index.clear(); + ctrl.reset_pbft_my_prepare(); + } + + pbft_new_view pbft_database::send_pbft_new_view( + const vector &vcc, + uint32_t current_view) { + + auto primary_key = get_new_view_primary_key(current_view); + if (!is_new_primary(current_view)) return pbft_new_view{}; + + //`sp_itr` is not possible to be the end iterator, since it's already been checked in `is_new_primary`. + auto my_sps = ctrl.my_signature_providers(); + auto sp_itr = my_sps.find(primary_key); + + auto vcc_ptr = find_if(vcc.begin(), vcc.end(), + [&](const pbft_view_changed_certificate &v) { return v.public_key == primary_key; }); + + if (vcc_ptr == vcc.end()) return pbft_new_view{}; + + auto highest_ppc = pbft_prepared_certificate{}; + auto highest_sc = pbft_stable_checkpoint{}; + + for (const auto &vc: vcc_ptr->view_changes) { + if (vc.prepared.block_num > highest_ppc.block_num && is_valid_prepared_certificate(vc.prepared)) { + highest_ppc = vc.prepared; + } + + if (vc.stable_checkpoint.block_num > highest_sc.block_num && + is_valid_stable_checkpoint(vc.stable_checkpoint)) { + highest_sc = vc.stable_checkpoint; + } + } + + auto uuid = boost::uuids::to_string(uuid_generator()); + + auto nv = pbft_new_view{uuid, current_view, highest_ppc, highest_sc, *vcc_ptr, sp_itr->first, chain_id()}; + + nv.producer_signature = sp_itr->second(nv.digest()); + emit(pbft_outgoing_new_view, nv); + return nv; + } + + vector pbft_database::generate_prepared_certificate() { + auto ppc = vector{}; + const auto &by_prepare_and_num_index = pbft_state_index.get(); + auto itr = by_prepare_and_num_index.begin(); + if (itr == by_prepare_and_num_index.end()) return vector{}; + pbft_state_ptr psp = *itr; + + auto prepared_block_state = ctrl.fetch_block_state_by_id(psp->block_id); + if (!prepared_block_state) return vector{}; + + auto as = prepared_block_state->active_schedule.producers; + if (psp->should_prepared && (psp->block_num > (ctrl.last_irreversible_block_num()))) { + for (auto const &my_sp : ctrl.my_signature_providers()) { + auto prepares = psp->prepares; + auto valid_prepares = vector{}; + + flat_map prepare_count; + flat_map> prepare_msg; + + for (const auto &pre: prepares) { + if (prepare_count.find(pre.view) == prepare_count.end()) prepare_count[pre.view] = 0; + prepare_msg[pre.view].emplace_back(pre); + } + + for (auto const &sp: as) { + for (auto const &pp: prepares) { + if (sp.block_signing_key == pp.public_key) prepare_count[pp.view] += 1; + } + } + + for (auto const &e: prepare_count) { + if (e.second >= as.size() * 2 / 3 + 1) { + valid_prepares = prepare_msg[e.first]; + } + } + + if (valid_prepares.empty()) return vector{}; + + auto pc = pbft_prepared_certificate{psp->block_id, psp->block_num, valid_prepares, my_sp.first}; + pc.producer_signature = my_sp.second(pc.digest()); + ppc.emplace_back(pc); + } + return ppc; + } else return vector{}; + } + + vector pbft_database::generate_view_changed_certificate(uint32_t target_view) { + auto vcc = vector{}; + + auto &by_view_index = view_state_index.get(); + auto itr = by_view_index.find(target_view); + if (itr == by_view_index.end()) return vcc; + + auto pvs = *itr; + + if (pvs->should_view_changed) { + for (auto const &my_sp : ctrl.my_signature_providers()) { + auto pc = pbft_view_changed_certificate{pvs->view, pvs->view_changes, my_sp.first}; + pc.producer_signature = my_sp.second(pc.digest()); + vcc.emplace_back(pc); + } + return vcc; + } else return vector{}; + } + + bool pbft_database::is_valid_prepared_certificate(const eosio::chain::pbft_prepared_certificate &certificate) { + // an empty certificate is valid since it acts as a null digest in pbft. + if (certificate == pbft_prepared_certificate{}) return true; + // a certificate under lscb (no longer in fork_db) is also treated as null. + if (certificate.block_num <= ctrl.last_stable_checkpoint_block_num()) return true; + + auto valid = true; + valid = valid && certificate.is_signature_valid(); + for (auto const &p : certificate.prepares) { + valid = valid && is_valid_prepare(p); + if (!valid) return false; + } + + auto cert_id = certificate.block_id; + auto cert_bs = ctrl.fetch_block_state_by_id(cert_id); + auto producer_schedule = lib_active_producers(); + if (certificate.block_num > 0 && cert_bs) { + producer_schedule = cert_bs->active_schedule; + } + auto bp_threshold = producer_schedule.producers.size() * 2 / 3 + 1; + + auto prepares = certificate.prepares; + flat_map prepare_count; + + for (const auto &pre: prepares) { + if (prepare_count.find(pre.view) == prepare_count.end()) prepare_count[pre.view] = 0; + } + + for (auto const &sp: producer_schedule.producers) { + for (auto const &pp: prepares) { + if (sp.block_signing_key == pp.public_key) prepare_count[pp.view] += 1; + } + } + + auto should_prepared = false; + + for (auto const &e: prepare_count) { + if (e.second >= bp_threshold) { + should_prepared = true; + } + } + + if (!should_prepared) return false; + + { + //validate prepare + auto lscb = ctrl.last_stable_checkpoint_block_num(); + auto non_fork_bp_count = 0; + vector prepare_infos(certificate.prepares.size()); + for (auto const &p : certificate.prepares) { + //only search in fork db + if (p.block_num <= lscb) { + ++non_fork_bp_count; + } else { + prepare_infos.emplace_back(block_info{p.block_id, p.block_num}); + } + } + + auto prepare_forks = fetch_fork_from(prepare_infos); + vector longest_fork; + for (auto const &f : prepare_forks) { + if (f.size() > longest_fork.size()) { + longest_fork = f; + } + } + if (longest_fork.size() + non_fork_bp_count < bp_threshold) return false; + + if (longest_fork.empty()) return true; + + auto calculated_block_info = longest_fork.back(); + + auto current_bs = ctrl.fetch_block_state_by_id(calculated_block_info.block_id); + while (current_bs) { + if (certificate.block_id == current_bs->id && certificate.block_num == current_bs->block_num) { + return true; + } + current_bs = ctrl.fetch_block_state_by_id(current_bs->prev()); + } + return false; + } + } + + bool pbft_database::is_valid_view_change(const pbft_view_change &vc) { + if (vc.chain_id != chain_id()) return false; + + return vc.is_signature_valid() + && should_recv_pbft_msg(vc.public_key); + // No need to check prepared cert and stable checkpoint, until generate or validate a new view msg + } + + + bool pbft_database::is_valid_new_view(const pbft_new_view &nv) { + //all signatures should be valid + + EOS_ASSERT(nv.chain_id == chain_id(), pbft_exception, "wrong chain."); + + EOS_ASSERT(is_valid_prepared_certificate(nv.prepared), pbft_exception, + "bad prepared certificate: ${pc}", ("pc", nv.prepared)); + + EOS_ASSERT(is_valid_stable_checkpoint(nv.stable_checkpoint), pbft_exception, + "bad stable checkpoint: ${scp}", ("scp", nv.stable_checkpoint)); + + EOS_ASSERT(nv.view_changed.is_signature_valid(), pbft_exception, "bad view changed signature"); + + EOS_ASSERT(nv.is_signature_valid(), pbft_exception, "bad new view signature"); + + EOS_ASSERT(nv.view_changed.view == nv.view, pbft_exception, "target view not match"); + + vector lib_producers; + for (const auto& pk: lib_active_producers().producers) { + lib_producers.emplace_back(pk.block_signing_key); + } + auto schedule_threshold = lib_producers.size() * 2 / 3 + 1; + + vector view_change_producers; + + for (auto vc: nv.view_changed.view_changes) { + if (is_valid_view_change(vc)) { + add_pbft_view_change(vc); + view_change_producers.emplace_back(vc.public_key); + } + } + + vector intersection; + std::set_intersection(lib_producers.begin(),lib_producers.end(), + view_change_producers.begin(),view_change_producers.end(), + back_inserter(intersection)); + + EOS_ASSERT(intersection.size() >= schedule_threshold, pbft_exception, "view changes count not enough"); + + EOS_ASSERT(should_new_view(nv.view), pbft_exception, "should not enter new view: ${nv}", ("nv", nv.view)); + + auto highest_ppc = pbft_prepared_certificate{}; + auto highest_scp = pbft_stable_checkpoint{}; + + for (const auto &vc: nv.view_changed.view_changes) { + if (vc.prepared.block_num > highest_ppc.block_num + && is_valid_prepared_certificate(vc.prepared)) { + highest_ppc = vc.prepared; + } + + if (vc.stable_checkpoint.block_num > highest_scp.block_num + && is_valid_stable_checkpoint(vc.stable_checkpoint)) { + highest_scp = vc.stable_checkpoint; + } + } + + EOS_ASSERT(highest_ppc == nv.prepared, pbft_exception, + "prepared certificate not match, should be ${hpcc} but ${pc} given", + ("hpcc",highest_ppc)("pc", nv.prepared)); + + EOS_ASSERT(highest_scp == nv.stable_checkpoint, pbft_exception, + "stable checkpoint not match, should be ${hscp} but ${scp} given", + ("hpcc",highest_scp)("pc", nv.stable_checkpoint)); + + return true; + } + + bool pbft_database::should_stop_view_change(const pbft_view_change &vc) { + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + return lscb_num > vc.prepared.block_num + && lscb_num > vc.stable_checkpoint.block_num; + } + + vector> pbft_database::fetch_fork_from(const vector block_infos) { + auto bi = block_infos; + + vector> result; + if (bi.empty()) { + return result; + } + if (bi.size() == 1) { + result.emplace_back(initializer_list{bi.front()}); + return result; + } + + sort(bi.begin(), bi.end(), + [](const block_info &a, const block_info &b) -> bool { return a.block_num > b.block_num; }); + + while (!bi.empty()) { + auto fork = fetch_first_fork_from(bi); + if (!fork.empty()) { + result.emplace_back(fork); + } + } + return result; + } + + vector pbft_database::fetch_first_fork_from(vector &bi) { + vector result; + if (bi.empty()) { + return result; + } + if (bi.size() == 1) { + result.emplace_back(bi.front()); + bi.clear(); + return result; + } + //bi should be sorted desc + auto high = bi.front().block_num; + auto low = bi.back().block_num; + + auto id = bi.front().block_id; + auto num = bi.front().block_num; + while (num <= high && num >= low && !bi.empty()) { + auto bs = ctrl.fetch_block_state_by_id(id); + + for (auto it = bi.begin(); it != bi.end();) { + if (it->block_id == id) { + if (bs) { + //add to result only if b exist + result.emplace_back((*it)); + } + it = bi.erase(it); + } else { + it++; + } + } + if (bs) { + id = bs->prev(); + num--; + } else { + break; + } + } + + return result; + } + + pbft_stable_checkpoint pbft_database::fetch_stable_checkpoint_from_blk_extn(const signed_block_ptr &b) { + try { + if (b) { + auto &ext = b->block_extensions; + + for (auto it = ext.begin(); it != ext.end();) { + if (it->first == static_cast(block_extension_type::pbft_stable_checkpoint)) { + auto scp_v = it->second; + fc::datastream ds_decode(scp_v.data(), scp_v.size()); + + pbft_stable_checkpoint scp_decode; + fc::raw::unpack(ds_decode, scp_decode); + + if (is_valid_stable_checkpoint(scp_decode)) { + return scp_decode; + } else { + it = ext.erase(it); + } + } else { + it++; + } + } + } + } catch(...) { + wlog("no stable checkpoints found in the block extension"); + } + return pbft_stable_checkpoint{}; + } + + pbft_stable_checkpoint pbft_database::get_stable_checkpoint_by_id(const block_id_type &block_id) { + const auto &by_block = checkpoint_index.get(); + auto itr = by_block.find(block_id); + if (itr == by_block.end()) { + auto blk = ctrl.fetch_block_by_id(block_id); + return fetch_stable_checkpoint_from_blk_extn(blk); + } + + auto cpp = *itr; + + if (cpp->is_stable) { + if (ctrl.my_signature_providers().empty()) return pbft_stable_checkpoint{}; + auto psc = pbft_stable_checkpoint{cpp->block_num, cpp->block_id, cpp->checkpoints, chain_id()}; + return psc; + } else return pbft_stable_checkpoint{}; + } + + block_info pbft_database::cal_pending_stable_checkpoint() const { + + //TODO: maybe use watermarks instead? + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + auto lscb_id = ctrl.last_stable_checkpoint_block_id(); + auto lscb_info = block_info{lscb_id, lscb_num}; + + const auto &by_blk_num = checkpoint_index.get(); + auto itr = by_blk_num.lower_bound(lscb_num); + if (itr == by_blk_num.end()) return lscb_info; + + while (itr != by_blk_num.end()) { + if ((*itr)->is_stable && ctrl.fetch_block_state_by_id((*itr)->block_id)) { + auto lscb = ctrl.fetch_block_state_by_number(ctrl.last_stable_checkpoint_block_num()); + + auto head_checkpoint_schedule = ctrl.fetch_block_state_by_id( + (*itr)->block_id)->active_schedule; + + producer_schedule_type current_schedule; + producer_schedule_type new_schedule; + + if (lscb_num == 0) { + const auto& ucb = ctrl.get_upgrade_properties().upgrade_complete_block_num; + if (ucb == 0) return lscb_info; + auto bs = ctrl.fetch_block_state_by_number(ucb); + if (!bs) return lscb_info; + current_schedule = bs->active_schedule; + new_schedule = bs->pending_schedule; + } else if (lscb) { + current_schedule = lscb->active_schedule; + new_schedule = lscb->pending_schedule; + } else { + return lscb_info; + } + + if ((*itr)->is_stable + && (head_checkpoint_schedule == current_schedule || head_checkpoint_schedule == new_schedule)) { + lscb_info.block_id = (*itr)->block_id; + lscb_info.block_num = (*itr)->block_num; + } + } + ++itr; + } + return lscb_info; + } + + vector pbft_database::generate_and_add_pbft_checkpoint() { + auto new_pc = vector{}; + + const auto &by_commit_and_num_index = pbft_state_index.get(); + auto itr = by_commit_and_num_index.begin(); + if (itr == by_commit_and_num_index.end() || !(*itr)->should_committed) return new_pc; + + pbft_state_ptr psp = (*itr); + + vector pending_checkpoint_block_num; + + block_num_type my_latest_checkpoint = 0; + + auto checkpoint = [&](const block_num_type &in) { + const auto& ucb = ctrl.get_upgrade_properties().upgrade_complete_block_num; + if (ucb == 0) return false; + return in >= ucb + && (in % 100 == 1 || std::find(prepare_watermarks.begin(), prepare_watermarks.end(), in) != prepare_watermarks.end()); + }; + + for (auto i = psp->block_num; + i > std::max(ctrl.last_stable_checkpoint_block_num(), static_cast(1)); --i) { + if (checkpoint(i)) { + my_latest_checkpoint = max(i, my_latest_checkpoint); + auto &by_block = checkpoint_index.get(); + auto c_itr = by_block.find(ctrl.get_block_id_for_num(i)); + if (c_itr == by_block.end()) { + pending_checkpoint_block_num.emplace_back(i); + } else { + auto checkpoints = (*c_itr)->checkpoints; + bool contains_mine = false; + for (auto const &my_sp : ctrl.my_signature_providers()) { + auto p_itr = find_if(checkpoints.begin(), checkpoints.end(), + [&](const pbft_checkpoint &ext) { return ext.public_key == my_sp.first; }); + if (p_itr != checkpoints.end()) contains_mine = true; + } + if (!contains_mine) { + pending_checkpoint_block_num.emplace_back(i); + } + } + } + } + + if (!pending_checkpoint_block_num.empty()) { + std::sort(pending_checkpoint_block_num.begin(), pending_checkpoint_block_num.begin()); + for (auto h: pending_checkpoint_block_num) { + for (auto const &my_sp : ctrl.my_signature_providers()) { + auto uuid = boost::uuids::to_string(uuid_generator()); + auto cp = pbft_checkpoint{uuid, h, ctrl.get_block_id_for_num(h), + my_sp.first, .chain_id=chain_id()}; + cp.producer_signature = my_sp.second(cp.digest()); + add_pbft_checkpoint(cp); + new_pc.emplace_back(cp); + } + } + } else if (my_latest_checkpoint > 1) { + auto lscb_id = ctrl.get_block_id_for_num(my_latest_checkpoint); + auto &by_block = checkpoint_index.get(); + auto h_itr = by_block.find(lscb_id); + if (h_itr != by_block.end()) { + auto checkpoints = (*h_itr)->checkpoints; + for (auto const &my_sp : ctrl.my_signature_providers()) { + for (auto const &cp: checkpoints) { + if (my_sp.first == cp.public_key) { + auto retry_cp = cp; + auto uuid = boost::uuids::to_string(uuid_generator()); + retry_cp.uuid = uuid; + retry_cp.timestamp = time_point::now(); + retry_cp.producer_signature = my_sp.second(retry_cp.digest()); + new_pc.emplace_back(retry_cp); + } + } + } + } + } + + return new_pc; + } + + void pbft_database::add_pbft_checkpoint(pbft_checkpoint &cp) { + + if (!is_valid_checkpoint(cp)) return; + + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + + auto cp_block_state = ctrl.fetch_block_state_by_id(cp.block_id); + if (!cp_block_state) return; + auto active_bps = cp_block_state->active_schedule.producers; + auto checkpoint_count = count_if(active_bps.begin(), active_bps.end(), [&](const producer_key &p) { + return p.block_signing_key == cp.public_key; + }); + if (checkpoint_count == 0) return; + + auto &by_block = checkpoint_index.get(); + auto itr = by_block.find(cp.block_id); + if (itr == by_block.end()) { + auto cs = pbft_checkpoint_state{cp.block_id, cp.block_num, .checkpoints={cp}}; + auto csp = make_shared(cs); + checkpoint_index.insert(csp); + itr = by_block.find(cp.block_id); + } else { + auto csp = (*itr); + auto checkpoints = csp->checkpoints; + auto p_itr = find_if(checkpoints.begin(), checkpoints.end(), + [&](const pbft_checkpoint &existed) { + return existed.public_key == cp.public_key; + }); + if (p_itr == checkpoints.end()) { + by_block.modify(itr, [&](const pbft_checkpoint_state_ptr &pcp) { + csp->checkpoints.emplace_back(cp); + }); + } + } + + auto csp = (*itr); + auto cp_count = 0; + if (!csp->is_stable) { + for (auto const &sp: active_bps) { + for (auto const &pp: csp->checkpoints) { + if (sp.block_signing_key == pp.public_key) cp_count += 1; + } + } + if (cp_count >= active_bps.size() * 2 / 3 + 1) { + by_block.modify(itr, [&](const pbft_checkpoint_state_ptr &pcp) { csp->is_stable = true; }); + auto id = csp->block_id; + auto blk = ctrl.fetch_block_by_id(id); + + if (blk && (blk->block_extensions.empty() || blk->block_extensions.back().first != static_cast(block_extension_type::pbft_stable_checkpoint))) { + auto scp = get_stable_checkpoint_by_id(id); + auto scp_size = fc::raw::pack_size(scp); + + auto buffer = std::make_shared>(scp_size); + fc::datastream ds( buffer->data(), scp_size); + fc::raw::pack( ds, scp ); + + blk->block_extensions.emplace_back(); + auto &extension = blk->block_extensions.back(); + extension.first = static_cast(block_extension_type::pbft_stable_checkpoint ); + extension.second.resize(scp_size); + std::copy(buffer->begin(),buffer->end(), extension.second.data()); + } + } + } + + auto lscb_info = cal_pending_stable_checkpoint(); + auto pending_num = lscb_info.block_num; + auto pending_id = lscb_info.block_id; + if (pending_num > lscb_num) { + ctrl.set_pbft_latest_checkpoint(pending_id); + if (ctrl.last_irreversible_block_num() < pending_num) ctrl.pbft_commit_local(pending_id); + const auto &by_block_id_index = pbft_state_index.get(); + auto pitr = by_block_id_index.find(pending_id); + if (pitr != by_block_id_index.end()) { + prune(*pitr); + } + } + + } + + void pbft_database::send_pbft_checkpoint() { + auto cps_to_send = generate_and_add_pbft_checkpoint(); + for (auto const &cp: cps_to_send) { + emit(pbft_outgoing_checkpoint, cp); + } + } + + bool pbft_database::is_valid_checkpoint(const pbft_checkpoint &cp) { + + if (cp.block_num > ctrl.head_block_num() + || cp.block_num <= ctrl.last_stable_checkpoint_block_num() + || !cp.is_signature_valid()) + return false; + auto bs = ctrl.fetch_block_state_by_id(cp.block_id); + if (bs) { + auto active_bps = bs->active_schedule.producers; + for (const auto &bp: active_bps) { + if (bp.block_signing_key == cp.public_key) return true; + } + } + return false; + } + + bool pbft_database::is_valid_stable_checkpoint(const pbft_stable_checkpoint &scp) { + if (scp.block_num <= ctrl.last_stable_checkpoint_block_num()) + // the stable checkpoint is way behind lib, no way getting the block state, + // it will not be applied nor saved, thus considered safe. + return true; + + auto valid = true; + for (const auto &c: scp.checkpoints) { + valid = valid && is_valid_checkpoint(c) + && c.block_id == scp.block_id + && c.block_num == scp.block_num; + if (!valid) return false; + } + + auto bs = ctrl.fetch_block_state_by_number(scp.block_num); + if (bs) { + auto as = bs->active_schedule; + auto cp_count = 0; + for (auto const &sp: as.producers) { + for (auto const &v: scp.checkpoints) { + if (sp.block_signing_key == v.public_key) cp_count += 1; + } + } + valid = valid && cp_count >= as.producers.size() * 2 / 3 + 1; + } else { + return false; + } + return valid; + } + + bool pbft_database::should_send_pbft_msg() { + + //use last_stable_checkpoint producer schedule + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + + auto as = lib_active_producers(); + auto my_sp = ctrl.my_signature_providers(); + + for (auto i = lscb_num; i <= ctrl.head_block_num(); ++i) { + for (auto const &bp: as.producers) { + for (auto const &my: my_sp) { + if (bp.block_signing_key == my.first) return true; + } + } + auto bs = ctrl.fetch_block_state_by_number(i); + if (bs && bs->active_schedule != as) as = bs->active_schedule; + } + return false; + } + + bool pbft_database::should_recv_pbft_msg(const public_key_type &pub_key) { + auto lscb_num = ctrl.last_stable_checkpoint_block_num(); + + auto as = lib_active_producers(); + auto my_sp = ctrl.my_signature_providers(); + + for (auto i = lscb_num; i <= ctrl.head_block_num(); ++i) { + for (auto const &bp: as.producers) { + if (bp.block_signing_key == pub_key) return true; + } + auto bs = ctrl.fetch_block_state_by_number(i); + if (bs && bs->active_schedule != as) as = bs->active_schedule; + } + return false; + } + + public_key_type pbft_database::get_new_view_primary_key(const uint32_t target_view) { + + auto active_bps = lib_active_producers().producers; + if (active_bps.empty()) return public_key_type{}; + + return active_bps[target_view % active_bps.size()].block_signing_key; + } + + producer_schedule_type pbft_database::lib_active_producers() const { + auto lib_num = ctrl.last_irreversible_block_num(); + if (lib_num == 0) return ctrl.initial_schedule(); + + auto lib_state = ctrl.fetch_block_state_by_number(lib_num); + if (!lib_state) return ctrl.initial_schedule(); + + if (lib_state->pending_schedule.producers.empty()) return lib_state->active_schedule; + return lib_state->pending_schedule; + } + + chain_id_type pbft_database::chain_id() { + return ctrl.get_chain_id(); + } + + block_num_type pbft_database::get_current_pbft_watermark() { + auto unique_merge = [&](vector &v1, vector &v2) + { + std::sort(v1.begin(), v1.end()); + v1.reserve(v1.size() + v2.size()); + v1.insert(v1.end(), v2.begin(), v2.end()); + + sort( v1.begin(), v1.end() ); + v1.erase( unique( v1.begin(), v1.end() ), v1.end() ); + }; + + auto lib = ctrl.last_irreversible_block_num(); + auto lscb = ctrl.last_stable_checkpoint_block_num(); + + auto proposed_schedule_blocks = ctrl.proposed_schedule_block_nums(); + auto promoted_schedule_blocks = ctrl.promoted_schedule_block_nums(); + unique_merge(prepare_watermarks, proposed_schedule_blocks); + unique_merge(prepare_watermarks, promoted_schedule_blocks); + + + for ( auto itr = prepare_watermarks.begin(); itr != prepare_watermarks.end();) { + if ((*itr) <= lscb) { + itr = prepare_watermarks.erase(itr); + } else { + ++itr; + } + } + std::sort(prepare_watermarks.begin(), prepare_watermarks.end()); + + + if (prepare_watermarks.empty()) return 0; + + auto cw = *std::upper_bound(prepare_watermarks.begin(), prepare_watermarks.end(), lib); + + if (cw > lib) return cw; else return 0; + } + + void pbft_database::set(pbft_state_ptr s) { + auto result = pbft_state_index.insert(s); + + EOS_ASSERT(result.second, pbft_exception, "unable to insert pbft state, duplicate state detected"); + } + + void pbft_database::set(pbft_checkpoint_state_ptr s) { + auto result = checkpoint_index.insert(s); + + EOS_ASSERT(result.second, pbft_exception, "unable to insert pbft checkpoint index, duplicate state detected"); + } + + void pbft_database::prune(const pbft_state_ptr &h) { + auto num = h->block_num; + + auto &by_bn = pbft_state_index.get(); + auto bni = by_bn.begin(); + while (bni != by_bn.end() && (*bni)->block_num < num) { + prune(*bni); + bni = by_bn.begin(); + } + + auto itr = pbft_state_index.find(h->block_id); + if (itr != pbft_state_index.end()) { + pbft_state_index.erase(itr); + } + } + + template + void pbft_database::emit(const Signal &s, Arg &&a) { + try { + s(std::forward(a)); + } catch (boost::interprocess::bad_alloc &e) { + wlog("bad alloc"); + throw e; + } catch (controller_emit_signal_exception &e) { + wlog("${details}", ("details", e.to_detail_string())); + throw e; + } catch (fc::exception &e) { + wlog("${details}", ("details", e.to_detail_string())); + } catch (...) { + wlog("signal handler threw exception"); + } + } + } +} \ No newline at end of file diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 7d3553e379b..bc0a764ed10 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -193,6 +193,23 @@ class privileged_api : public context_aware_api { }); } + void set_upgrade_parameters_packed( array_ptr packed_upgrade_parameters, size_t datalen) { + datastream ds( packed_upgrade_parameters, datalen ); + uint32_t target_num; + fc::raw::unpack(ds, target_num); + + EOS_ASSERT( context.control.head_block_num() < target_num - 100, wasm_execution_error, "upgrade target block is too close"); + + EOS_ASSERT( !context.control.is_upgraded(), wasm_execution_error, "the system has already upgraded to the new version"); + + EOS_ASSERT( !context.control.under_upgrade(), wasm_execution_error, "the system is currently under upgrade"); + + context.db.modify( context.control.get_upgrade_properties(), + [&]( auto& uprops ) { + uprops.upgrade_target_block_num = target_num; + }); + } + // *bos begin* void set_name_list_packed(int64_t list, int64_t action, array_ptr packed_name_list, size_t datalen) { @@ -1832,6 +1849,7 @@ REGISTER_INTRINSICS(privileged_api, (set_guaranteed_minimum_resources, void(int64_t,int64_t,int64_t) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) + (set_upgrade_parameters_packed, void(int, int) ) ); REGISTER_INJECTED_INTRINSICS(transaction_context, diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 5f0a5206fdc..5a9db207780 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -291,6 +292,7 @@ namespace eosio { namespace testing { fc::temp_directory tempdir; public: unique_ptr control; + unique_ptr pbft_ctrl; std::map block_signing_private_keys; protected: controller::config cfg; diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 0b42bdf41b0..d81bd231a45 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -9,6 +9,7 @@ add_subdirectory(producer_plugin) add_subdirectory(producer_api_plugin) add_subdirectory(history_plugin) add_subdirectory(history_api_plugin) +add_subdirectory(pbft_plugin) add_subdirectory(state_history_plugin) add_subdirectory(wallet_plugin) diff --git a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp index b62915b5220..766a73df56f 100644 --- a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp +++ b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace eosio { namespace chain { namespace plugin_interface { using namespace eosio::chain; @@ -61,4 +62,24 @@ namespace eosio { namespace chain { namespace plugin_interface { } } + namespace pbft { + namespace incoming { + using prepare_channel = channel_decl; + using commit_channel = channel_decl; + using view_change_channel = channel_decl; + using new_view_channel = channel_decl; + using checkpoint_channel = channel_decl; + + } + + namespace outgoing { + using prepare_channel = channel_decl; + using commit_channel = channel_decl; + using view_change_channel = channel_decl; + using new_view_channel = channel_decl; + using checkpoint_channel = channel_decl; + + } + } + } } } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index f3fddf4764a..965acc44a35 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -150,6 +150,17 @@ class chain_plugin_impl { ,incoming_block_channel(app().get_channel()) ,incoming_block_sync_method(app().get_method()) ,incoming_transaction_async_method(app().get_method()) + //pbft channels + ,pbft_outgoing_prepare_channel(app().get_channel()) + ,pbft_incoming_prepare_channel(app().get_channel()) + ,pbft_outgoing_commit_channel(app().get_channel()) + ,pbft_incoming_commit_channel(app().get_channel()) + ,pbft_outgoing_view_change_channel(app().get_channel()) + ,pbft_incoming_view_change_channel(app().get_channel()) + ,pbft_outgoing_new_view_channel(app().get_channel()) + ,pbft_incoming_new_view_channel(app().get_channel()) + ,pbft_outgoing_checkpoint_channel(app().get_channel()) + ,pbft_incoming_checkpoint_channel(app().get_channel()) {} bfs::path blocks_dir; @@ -161,11 +172,17 @@ class chain_plugin_impl { fc::optional chain_config; fc::optional chain; fc::optional chain_id; + fc::optional pbft_ctrl; //txn_msg_rate_limits rate_limits; fc::optional wasm_runtime; fc::microseconds abi_serializer_max_time_ms; fc::optional snapshot_path; + void on_pbft_incoming_prepare(pbft_prepare p); + void on_pbft_incoming_commit(pbft_commit c); + void on_pbft_incoming_view_change(pbft_view_change vc); + void on_pbft_incoming_new_view(pbft_new_view nv); + void on_pbft_incoming_checkpoint(pbft_checkpoint cp); // retained references to channels for easy publication channels::pre_accepted_block::channel_type& pre_accepted_block_channel; @@ -196,7 +213,31 @@ class chain_plugin_impl { fc::optional applied_transaction_connection; fc::optional accepted_confirmation_connection; - + //pbft + fc::optional pbft_outgoing_prepare_connection; + pbft::incoming::prepare_channel::channel_type::handle pbft_incoming_prepare_subscription; + pbft::outgoing::prepare_channel::channel_type& pbft_outgoing_prepare_channel; + pbft::incoming::prepare_channel::channel_type& pbft_incoming_prepare_channel; + + fc::optional pbft_outgoing_commit_connection; + pbft::incoming::commit_channel::channel_type::handle pbft_incoming_commit_subscription; + pbft::outgoing::commit_channel::channel_type& pbft_outgoing_commit_channel; + pbft::incoming::commit_channel::channel_type& pbft_incoming_commit_channel; + + fc::optional pbft_outgoing_view_change_connection; + pbft::incoming::view_change_channel::channel_type::handle pbft_incoming_view_change_subscription; + pbft::outgoing::view_change_channel::channel_type& pbft_outgoing_view_change_channel; + pbft::incoming::view_change_channel::channel_type& pbft_incoming_view_change_channel; + + fc::optional pbft_outgoing_new_view_connection; + pbft::incoming::new_view_channel::channel_type::handle pbft_incoming_new_view_subscription; + pbft::outgoing::new_view_channel::channel_type& pbft_outgoing_new_view_channel; + pbft::incoming::new_view_channel::channel_type& pbft_incoming_new_view_channel; + + fc::optional pbft_outgoing_checkpoint_connection; + pbft::incoming::checkpoint_channel::channel_type::handle pbft_incoming_checkpoint_subscription; + pbft::outgoing::checkpoint_channel::channel_type& pbft_outgoing_checkpoint_channel; + pbft::incoming::checkpoint_channel::channel_type& pbft_incoming_checkpoint_channel; }; chain_plugin::chain_plugin() @@ -294,12 +335,25 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip } +template +T dejsonify(const string& s) { + return fc::json::from_string(s).as(); +} + #define LOAD_VALUE_SET(options, name, container) \ if( options.count(name) ) { \ const std::vector& ops = options[name].as>(); \ std::copy(ops.begin(), ops.end(), std::inserter(container, container.end())); \ } +static signature_provider_type +make_key_signature_provider(const private_key_type& key) { + return [key]( const chain::digest_type& digest ) { + return key.sign(digest); + }; +} + + fc::time_point calculate_genesis_timestamp( string tstr ) { fc::time_point genesis_timestamp; if( strcasecmp (tstr.c_str(), "now") == 0 ) { @@ -350,6 +404,48 @@ void chain_plugin::plugin_initialize(const variables_map& options) { LOAD_VALUE_SET( options, "actor-blacklist", my->chain_config->actor_blacklist ); LOAD_VALUE_SET( options, "contract-whitelist", my->chain_config->contract_whitelist ); LOAD_VALUE_SET( options, "contract-blacklist", my->chain_config->contract_blacklist ); + LOAD_VALUE_SET( options, "producer-name", my->chain_config->my_producers); + if( options.count("private-key") ) + { + const std::vector key_id_to_wif_pair_strings = options["private-key"].as>(); + for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) + { + try { + auto key_id_to_wif_pair = dejsonify>(key_id_to_wif_pair_string); + my->chain_config->my_signature_providers[key_id_to_wif_pair.first] = make_key_signature_provider(key_id_to_wif_pair.second); + auto blanked_privkey = std::string(std::string(key_id_to_wif_pair.second).size(), '*' ); + wlog("\"private-key\" is DEPRECATED, use \"signature-provider=${pub}=KEY:${priv}\"", ("pub",key_id_to_wif_pair.first)("priv", blanked_privkey)); + } catch ( fc::exception& e ) { + elog("Malformed private key pair"); + } + } + } + + if( options.count("signature-provider") ) { + const std::vector key_spec_pairs = options["signature-provider"].as>(); + for (const auto& key_spec_pair : key_spec_pairs) { + try { + auto delim = key_spec_pair.find("="); + EOS_ASSERT(delim != std::string::npos, plugin_config_exception, "Missing \"=\" in the key spec pair"); + auto pub_key_str = key_spec_pair.substr(0, delim); + auto spec_str = key_spec_pair.substr(delim + 1); + + auto spec_delim = spec_str.find(":"); + EOS_ASSERT(spec_delim != std::string::npos, plugin_config_exception, "Missing \":\" in the key spec pair"); + auto spec_type_str = spec_str.substr(0, spec_delim); + auto spec_data = spec_str.substr(spec_delim + 1); + + auto pubkey = public_key_type(pub_key_str); + + if (spec_type_str == "KEY") { + my->chain_config->my_signature_providers[pubkey] = make_key_signature_provider(private_key_type(spec_data)); + } + + } catch (...) { + elog("Malformed signature provider: \"${val}\", ignoring!", ("val", key_spec_pair)); + } + } + } LOAD_VALUE_SET( options, "trusted-producer", my->chain_config->trusted_producers ); @@ -641,6 +737,9 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain.emplace( *my->chain_config ); my->chain_id.emplace( my->chain->get_chain_id()); + ilog("include pbft controller..."); + my->pbft_ctrl.emplace(*my->chain); + // set up method providers my->get_block_by_number_provider = app().get_method().register_provider( [this]( uint32_t block_num ) -> signed_block_ptr { @@ -703,11 +802,81 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->accepted_confirmation_channel.publish( conf ); } ); + + + //pbft + my->pbft_incoming_prepare_subscription = my->pbft_incoming_prepare_channel.subscribe( [this]( pbft_prepare p ){ + my->on_pbft_incoming_prepare(p); + }); + + my->pbft_incoming_commit_subscription = my->pbft_incoming_commit_channel.subscribe( [this]( pbft_commit c ){ + my->on_pbft_incoming_commit(c); + }); + + my->pbft_incoming_view_change_subscription = my->pbft_incoming_view_change_channel.subscribe( [this]( pbft_view_change vc ){ + my->on_pbft_incoming_view_change(vc); + }); + + my->pbft_incoming_new_view_subscription = my->pbft_incoming_new_view_channel.subscribe( [this]( pbft_new_view nv ){ + my->on_pbft_incoming_new_view(nv); + }); + + my->pbft_incoming_checkpoint_subscription = my->pbft_incoming_checkpoint_channel.subscribe( [this]( pbft_checkpoint cp ){ + my->on_pbft_incoming_checkpoint(cp); + }); + + my->pbft_outgoing_prepare_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_prepare.connect( + [this]( const pbft_prepare& prepare ) { + my->pbft_outgoing_prepare_channel.publish( prepare ); + }); + + my->pbft_outgoing_commit_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_commit.connect( + [this]( const pbft_commit& commit ) { + my->pbft_outgoing_commit_channel.publish( commit ); + }); + + my->pbft_outgoing_view_change_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_view_change.connect( + [this]( const pbft_view_change& view_change ) { + my->pbft_outgoing_view_change_channel.publish( view_change ); + }); + + my->pbft_outgoing_new_view_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_new_view.connect( + [this]( const pbft_new_view& new_view ) { + my->pbft_outgoing_new_view_channel.publish( new_view ); + }); + + my->pbft_outgoing_checkpoint_connection = my->pbft_ctrl->pbft_db.pbft_outgoing_checkpoint.connect( + [this]( const pbft_checkpoint& checkpoint ) { + my->pbft_outgoing_checkpoint_channel.publish( checkpoint ); + }); + my->chain->add_indices(); } FC_LOG_AND_RETHROW() + +} + +void chain_plugin_impl::on_pbft_incoming_prepare(pbft_prepare p){ + pbft_ctrl->on_pbft_prepare(p); +} + +void chain_plugin_impl::on_pbft_incoming_commit(pbft_commit c){ + pbft_ctrl->on_pbft_commit(c); +} + +void chain_plugin_impl::on_pbft_incoming_view_change(pbft_view_change vc){ + pbft_ctrl->on_pbft_view_change(vc); +} + +void chain_plugin_impl::on_pbft_incoming_new_view(pbft_new_view nv){ + pbft_ctrl->on_pbft_new_view(nv); } +void chain_plugin_impl::on_pbft_incoming_checkpoint(pbft_checkpoint cp){ + pbft_ctrl->on_pbft_checkpoint(cp); +} + + void chain_plugin::plugin_startup() { try { try { @@ -979,6 +1148,8 @@ bool chain_plugin::export_reversible_blocks( const fc::path& reversible_dir, controller& chain_plugin::chain() { return *my->chain; } const controller& chain_plugin::chain() const { return *my->chain; } +pbft_controller& chain_plugin::pbft_ctrl() { return *my->pbft_ctrl; } +const pbft_controller& chain_plugin::pbft_ctrl() const { return *my->pbft_ctrl; } chain::chain_id_type chain_plugin::get_chain_id()const { EOS_ASSERT( my->chain_id.valid(), chain_id_type_exception, "chain ID has not been initialized yet" ); @@ -1038,6 +1209,9 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params db.fork_db_head_block_id(), db.fork_db_head_block_time(), db.fork_db_head_block_producer(), + pbft_ctrl.state_machine.get_current_view(), + pbft_ctrl.state_machine.get_target_view(), + db.last_stable_checkpoint_block_num(), rm.get_virtual_block_cpu_limit(), rm.get_virtual_block_net_limit(), rm.get_block_cpu_limit(), diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index aa493e045fd..036d70358fb 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -20,6 +20,7 @@ #include #include +#include namespace fc { class variant; } @@ -73,12 +74,13 @@ class read_only { const controller& db; const fc::microseconds abi_serializer_max_time; bool shorten_abi_errors = true; + const chain::pbft_controller& pbft_ctrl; public: static const string KEYi64; - read_only(const controller& db, const fc::microseconds& abi_serializer_max_time) - : db(db), abi_serializer_max_time(abi_serializer_max_time) {} + read_only(const controller& db, const fc::microseconds& abi_serializer_max_time, const chain::pbft_controller& pbft_ctrl) + : db(db), abi_serializer_max_time(abi_serializer_max_time), pbft_ctrl(pbft_ctrl) {} void validate() const {} @@ -95,7 +97,9 @@ class read_only { chain::block_id_type head_block_id; fc::time_point head_block_time; account_name head_block_producer; - + uint32_t current_view = 0; + uint32_t target_view = 0; + uint32_t last_stable_checkpoint_block_num = 0; uint64_t virtual_block_cpu_limit = 0; uint64_t virtual_block_net_limit = 0; @@ -660,7 +664,7 @@ class chain_plugin : public plugin { void plugin_startup(); void plugin_shutdown(); - chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time()); } + chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time(), pbft_ctrl()); } chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time()); } void accept_block( const chain::signed_block_ptr& block ); @@ -689,6 +693,11 @@ class chain_plugin : public plugin { // Only call this after plugin_initialize()! const controller& chain() const; + // Only call this after plugin_initialize()! + chain::pbft_controller& pbft_ctrl(); + // Only call this after plugin_initialize()! + const chain::pbft_controller& pbft_ctrl() const; + chain::chain_id_type get_chain_id() const; fc::microseconds get_abi_serializer_max_time() const; @@ -706,7 +715,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(current_view)(target_view)(last_stable_checkpoint_block_num)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index d732b18cf0c..7fb193a979b 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -30,12 +30,14 @@ namespace eosio { void plugin_startup(); void plugin_shutdown(); + void broadcast_block(const chain::signed_block &sb); string connect( const string& endpoint ); string disconnect( const string& endpoint ); optional status( const string& endpoint )const; vector connections()const; + bool is_syncing()const; size_t num_peers() const; private: diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 76f11da2411..65b81583364 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include namespace eosio { @@ -132,6 +133,11 @@ namespace eosio { uint32_t end_block; }; + struct checkpoint_request_message { + uint32_t start_block; + uint32_t end_block; + }; + struct request_p2p_message{ bool discoverable; }; @@ -150,7 +156,14 @@ struct request_p2p_message{ signed_block, // which = 7 packed_transaction, // which = 8 response_p2p_message, - request_p2p_message>; + request_p2p_message, + pbft_prepare, + pbft_commit, + pbft_view_change, + pbft_new_view, + pbft_checkpoint, + pbft_stable_checkpoint, + checkpoint_request_message>; } // namespace eosio FC_REFLECT( eosio::select_ids, (mode)(pending)(ids) ) @@ -170,6 +183,9 @@ FC_REFLECT( eosio::request_message, (req_trx)(req_blocks) ) FC_REFLECT( eosio::sync_request_message, (start_block)(end_block) ) FC_REFLECT( eosio::request_p2p_message, (discoverable) ) FC_REFLECT( eosio::response_p2p_message, (discoverable)(p2p_peer_list) ) +FC_REFLECT( eosio::checkpoint_request_message, (start_block)(end_block) ) + + /** * Goals of Network Code diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8161a7119cf..7321c5de820 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -26,6 +26,8 @@ #include #include +#include + using namespace eosio::chain::plugin_interface::compat; namespace fc { @@ -106,7 +108,11 @@ namespace eosio { bool connected; }; class net_plugin_impl { + private: + std::shared_ptr> encode_pbft_message(const net_message &msg)const; public: + net_plugin_impl(); + unique_ptr acceptor; tcp::endpoint listen_endpoint; string p2p_address; @@ -140,10 +146,14 @@ namespace eosio { unique_ptr connector_check; unique_ptr transaction_check; unique_ptr keepalive_timer; + unique_ptr pbft_message_cache_timer; + unique_ptr connection_monitor_timer; boost::asio::steady_timer::duration connector_period; boost::asio::steady_timer::duration txn_exp_period; boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; + boost::asio::steady_timer::duration pbft_message_cache_tick_interval{std::chrono::seconds{10}}; + boost::asio::steady_timer::duration connection_monitor_tick_interval{std::chrono::seconds{2}}; int max_cleanup_time_ms = 0; const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. @@ -163,7 +173,22 @@ namespace eosio { bool use_socket_read_watermark = false; + std::unordered_map pbft_message_cache{}; + const int pbft_message_cache_TTL = 600; + const int pbft_message_TTL = 10; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + eosio::chain::plugin_interface::pbft::outgoing::prepare_channel::channel_type::handle pbft_outgoing_prepare_subscription; + eosio::chain::plugin_interface::pbft::outgoing::commit_channel::channel_type::handle pbft_outgoing_commit_subscription; + eosio::chain::plugin_interface::pbft::outgoing::view_change_channel::channel_type::handle pbft_outgoing_view_change_subscription; + eosio::chain::plugin_interface::pbft::outgoing::new_view_channel::channel_type::handle pbft_outgoing_new_view_subscription; + eosio::chain::plugin_interface::pbft::outgoing::checkpoint_channel::channel_type::handle pbft_outgoing_checkpoint_subscription; + + eosio::chain::plugin_interface::pbft::incoming::prepare_channel::channel_type& pbft_incoming_prepare_channel; + eosio::chain::plugin_interface::pbft::incoming::commit_channel::channel_type& pbft_incoming_commit_channel; + eosio::chain::plugin_interface::pbft::incoming::view_change_channel::channel_type& pbft_incoming_view_change_channel; + eosio::chain::plugin_interface::pbft::incoming::new_view_channel::channel_type& pbft_incoming_new_view_channel; + eosio::chain::plugin_interface::pbft::incoming::checkpoint_channel::channel_type& pbft_incoming_checkpoint_channel; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -214,6 +239,32 @@ namespace eosio { void handle_message( connection_ptr c, const request_p2p_message &msg); void handle_message( connection_ptr c, const response_p2p_message &msg); + //pbft messages + bool maybe_add_pbft_cache(const string &uuid); + void clean_expired_pbft_cache(); + template + bool is_pbft_msg_outdated(M const & msg); + template + bool is_pbft_msg_valid(M const & msg); + + void bcast_pbft_msg(const net_message &msg); + + void forward_pbft_msg(connection_ptr c, const net_message &msg); + + void pbft_outgoing_prepare(const pbft_prepare &prepare); + void pbft_outgoing_commit(const pbft_commit &commit); + void pbft_outgoing_view_change(const pbft_view_change &view_change); + void pbft_outgoing_new_view(const pbft_new_view &new_view); + void pbft_outgoing_checkpoint(const pbft_checkpoint &checkpoint); + + void handle_message( connection_ptr c, const pbft_prepare &msg); + void handle_message( connection_ptr c, const pbft_commit &msg); + void handle_message( connection_ptr c, const pbft_view_change &msg); + void handle_message( connection_ptr c, const pbft_new_view &msg); + void handle_message( connection_ptr c, const pbft_checkpoint &msg); + void handle_message( connection_ptr c, const pbft_stable_checkpoint &msg); + void handle_message( connection_ptr c, const checkpoint_request_message &msg); + void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_txn_timer(); void start_monitors(); @@ -221,6 +272,9 @@ namespace eosio { void expire_txns(); void expire_local_txns(); void connection_monitor(std::weak_ptr from_connection); + + void pbft_message_cache_ticker(); + void connection_monitor_ticker(); /** \name Peer Timestamps * Time message handling * @{ @@ -436,6 +490,8 @@ namespace eosio { uint32_t write_queue_size() const { return _write_queue_size; } + uint32_t out_queue_size() const { return _out_queue.size(); } + bool is_out_queue_empty() const { return _out_queue.empty(); } bool ready_to_send() const { @@ -497,6 +553,12 @@ namespace eosio { deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; + public: + //used for pbft msgs sending only + void push_to_out_queue( const queued_write& m) { + _out_queue.emplace_back( m ); + } + }; // queued_buffer @@ -517,6 +579,15 @@ namespace eosio { fc::optional outstanding_read_bytes; + struct queued_pbft_message { + std::shared_ptr> message; + fc::time_point_sec deadline; + }; + const int OUT_QUEUE_SIZE_LIMIT_FROM_WRITE_QUEUE = 100; + const int OUT_QUEUE_SIZE_LIMIT = 200; + + deque pbft_queue; + queued_buffer buffer_queue; uint32_t reads_in_flight = 0; @@ -527,6 +598,8 @@ namespace eosio { int16_t sent_handshake_count = 0; bool connecting = false; bool syncing = false; + int connecting_timeout_in_seconds = 10; + fc::time_point_sec connecting_deadline; uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; @@ -565,6 +638,7 @@ namespace eosio { bool connected(); bool current(); + bool pbft_ready(); void reset(); void close(); void send_handshake(); @@ -602,11 +676,15 @@ namespace eosio { void enqueue_buffer( const std::shared_ptr>& send_buffer, bool trigger_send, go_away_reason close_after_send, bool to_sync_queue = false); + void enqueue_pbft( const std::shared_ptr>& m, const time_point_sec deadline); + bool pbft_read_to_send(); + void cancel_sync(go_away_reason); void flush_queues(); bool enqueue_sync_block(); void request_sync_blocks(uint32_t start, uint32_t end); + void request_sync_checkpoints(uint32_t start, uint32_t end); void cancel_wait(); void sync_wait(); void fetch_wait(); @@ -618,6 +696,7 @@ namespace eosio { std::function callback, bool to_sync_queue = false); void do_queue_write(); + void fill_out_buffer_with_pbft_queue(std::vector &bufs); void send_p2p_request(bool discoverable); void send_p2p_response(bool discoverable,string p2p_peer_list); @@ -726,6 +805,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + bool is_syncing(); + void set_in_sync(); }; class dispatch_manager { @@ -784,6 +865,7 @@ namespace eosio { sent_handshake_count(0), connecting(true), syncing(false), + connecting_deadline(fc::time_point::now()+fc::seconds(connecting_timeout_in_seconds)), protocol_version(0), peer_addr(), response_expected(), @@ -815,6 +897,10 @@ namespace eosio { return (connected() && !syncing); } + bool connection::pbft_ready(){ + return current(); + } + void connection::reset() { peer_requested.reset(); blk_state.clear(); @@ -823,6 +909,7 @@ namespace eosio { void connection::flush_queues() { buffer_queue.clear_write_queue(); + pbft_queue.clear(); } void connection::close() { @@ -834,6 +921,7 @@ namespace eosio { } flush_queues(); connecting = false; + connecting_deadline = fc::time_point::min(); syncing = false; if( last_req ) { my_impl->dispatcher->retry_fetch(shared_from_this()); @@ -850,6 +938,7 @@ namespace eosio { void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); uint32_t head_num = cc.fork_db_head_block_num(); notice_message note; note.known_blocks.mode = normal; @@ -995,9 +1084,14 @@ namespace eosio { } } + bool connection::pbft_read_to_send() { + return !pbft_queue.empty() && buffer_queue.is_out_queue_empty(); + } + void connection::do_queue_write() { - if( !buffer_queue.ready_to_send() ) - return; + if( !(buffer_queue.ready_to_send() || pbft_read_to_send()) ) + return; + connection_wptr c(shared_from_this()); if(!socket->is_open()) { fc_elog(logger,"socket not open to ${p}",("p",peer_name())); @@ -1005,7 +1099,10 @@ namespace eosio { return; } std::vector bufs; + buffer_queue.fill_out_buffer( bufs ); + fill_out_buffer_with_pbft_queue( bufs ); + boost::asio::async_write(*socket, bufs, [c](boost::system::error_code ec, std::size_t w) { try { auto conn = c.lock(); @@ -1047,6 +1144,50 @@ namespace eosio { }); } + void connection::fill_out_buffer_with_pbft_queue(std::vector &bufs){ + //delete timeout pbft message + auto now = time_point::now(); + int drop_pbft_count = 0; + while (pbft_queue.size()>0) { + if (pbft_queue.front().deadline <= now) { + pbft_queue.pop_front(); + ++drop_pbft_count; + } else { + break; + } + } + + //drop timeout messages in mem, init send buffer only when actual send happens + //copied from a previous version of connection::enqueue + connection_wptr weak_this = shared_from_this(); + go_away_reason close_after_send = no_reason; + std::function callback = [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { + connection_ptr conn = weak_this.lock(); + if (conn) { + if (close_after_send != no_reason) { + elog ("sent a go away message: ${r}, closing connection to ${p}",("r", reason_str(close_after_send))("p", conn->peer_name())); + my_impl->close(conn); + return; + } + } else { + fc_wlog(logger, "connection expired before enqueued net_message called callback!"); + } + }; + + //push to out queue + while (buffer_queue.out_queue_size() < OUT_QUEUE_SIZE_LIMIT) { + if (pbft_queue.empty()) break; + + queued_pbft_message pbft = pbft_queue.front(); + pbft_queue.pop_front(); + auto m = pbft.message; + if (m) { + bufs.push_back(boost::asio::buffer(*m)); + buffer_queue.push_to_out_queue( {m, callback} ); + } + } + } + void connection::cancel_sync(go_away_reason reason) { fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); @@ -1147,6 +1288,15 @@ namespace eosio { to_sync_queue); } + void connection::enqueue_pbft(const std::shared_ptr>& m, + const time_point_sec deadline = time_point_sec(static_cast(600))) + { + pbft_queue.push_back(queued_pbft_message{m, deadline }); + if (buffer_queue.is_out_queue_empty()) { + do_queue_write(); + } + } + void connection::cancel_wait() { if (response_expected) response_expected->cancel(); @@ -1223,6 +1373,42 @@ namespace eosio { sync_wait(); } + void connection::request_sync_checkpoints(uint32_t start, uint32_t end) { + fc_dlog(logger, "request sync checkpoints"); + checkpoint_request_message crm = {start,end}; + enqueue( net_message(crm)); + sync_wait(); + } + +// bool connection::process_next_message(net_plugin_impl& impl, uint32_t message_length) { +// vector tmp_data; +// tmp_data.resize(message_length); +// +// try { +// auto ds = pending_message_buffer.create_datastream(); +// auto read_index = pending_message_buffer.read_index(); +// pending_message_buffer.peek(tmp_data.data(),message_length,read_index); +// +// net_message msg; +// fc::raw::unpack(ds, msg); +// msg_handler m(impl, shared_from_this() ); +// if( msg.contains() ) { +// m( std::move( msg.get() ) ); +// } else if( msg.contains() ) { +// m( std::move( msg.get() ) ); +// } else { +// msg.visit( m ); +// } +// } catch( const fc::exception& e ) { +// wlog("error message length: ${l}", ("l", message_length)); +// wlog("error raw bytes ${s}", ("s", tmp_data)); +// edump((e.to_detail_string() )); +// impl.close( shared_from_this() ); +// return false; +// } +// return true; +// } + bool connection::process_next_message(net_plugin_impl& impl, uint32_t message_length) { try { auto ds = pending_message_buffer.create_datastream(); @@ -1242,7 +1428,7 @@ namespace eosio { return false; } return true; - } + } bool connection::add_peer_block(const peer_block_state& entry) { auto bptr = blk_state.get().find(entry.id); @@ -1325,6 +1511,14 @@ namespace eosio { chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); } + bool sync_manager::is_syncing() { + return state != in_sync; + } + + void sync_manager::set_in_sync() { + set_state(in_sync); + } + void sync_manager::request_next_chunk( const connection_ptr& conn ) { uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); @@ -1455,6 +1649,7 @@ namespace eosio { void sync_manager::recv_handshake(const connection_ptr& c, const handshake_message& msg) { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); + uint32_t lscb_num = cc.last_stable_checkpoint_block_num(); uint32_t peer_lib = msg.last_irreversible_block_num; reset_lib_num(c); c->syncing = false; @@ -1473,6 +1668,13 @@ namespace eosio { uint32_t head = cc.fork_db_head_block_num(); block_id_type head_id = cc.fork_db_head_block_id(); + auto upgraded = cc.is_upgraded(); + if (peer_lib > lscb_num && upgraded) { + //there might be a better way to sync checkpoints, yet we do not want to modify the existing handshake msg. + fc_dlog(logger, "request sync checkpoints"); + c->request_sync_checkpoints(lscb_num, peer_lib); + } + if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); // notify peer of our pending transactions @@ -1864,6 +2066,21 @@ namespace eosio { } //------------------------------------------------------------------------ + std::shared_ptr> net_plugin_impl::encode_pbft_message(const net_message &msg) const { + + uint32_t payload_size = fc::raw::pack_size( msg ); + + char* header = reinterpret_cast(&payload_size); + size_t header_size = sizeof(payload_size); + size_t buffer_size = header_size + payload_size; + + auto send_buffer = std::make_shared>(buffer_size); + fc::datastream ds( send_buffer->data(), buffer_size); + ds.write( header, header_size ); + fc::raw::pack( ds, msg ); + + return send_buffer; + } void net_plugin_impl::connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { @@ -1916,13 +2133,14 @@ namespace eosio { ++endpoint_itr; c->connecting = true; c->pending_message_buffer.reset(); + c->connecting_deadline = fc::time_point::now()+fc::seconds(c->connecting_timeout_in_seconds); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this] ( const boost::system::error_code& err ) { auto c = weak_conn.lock(); if (!c) return; if( !err && c->socket->is_open() ) { if (start_session( c )) { - c->send_handshake (); + c->send_handshake(); send_p2p_request(c); } } else { @@ -2571,6 +2789,30 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } + void net_plugin_impl::handle_message( connection_ptr c, const checkpoint_request_message &msg) { + + if ( msg.end_block == 0 || msg.end_block < msg.start_block) return; + + fc_dlog(logger, "received checkpoint request message"); + vector scp_stack; + controller &cc = my_impl->chain_plug->chain(); + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + for (auto i = msg.end_block; i >= msg.start_block && i>0; --i) { + auto bid = cc.get_block_id_for_num(i); + auto scp = pcc.pbft_db.get_stable_checkpoint_by_id(bid); + if (scp != pbft_stable_checkpoint{}) { + scp_stack.push_back(scp); + } + } + fc_dlog(logger, "sent ${n} stable checkpoints on my node",("n",scp_stack.size())); + + while (scp_stack.size()) { + c->enqueue(scp_stack.back()); + scp_stack.pop_back(); + } + } + void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2639,6 +2881,13 @@ namespace eosio { try { chain_plug->accept_block(msg); //, sync_master->is_active(c)); reason = no_reason; + auto blk = msg; + auto &pcc = chain_plug->pbft_ctrl(); + auto scp = pcc.pbft_db.fetch_stable_checkpoint_from_blk_extn(blk); + + if (scp != pbft_stable_checkpoint{}) { + handle_message(c, scp); + } } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); reason = unlinkable; @@ -2679,6 +2928,210 @@ namespace eosio { } } + + template + bool net_plugin_impl::is_pbft_msg_outdated(M const & msg) { + return (time_point_sec(time_point::now()) > time_point_sec(msg.timestamp) + pbft_message_TTL); + } + + template + bool net_plugin_impl::is_pbft_msg_valid(M const & msg) { + // Do some basic validations of an incoming pbft msg, bad msgs should be quickly discarded without affecting state. + return (chain_id == msg.chain_id && !is_pbft_msg_outdated(msg) && !sync_master->is_syncing()); + } + + void net_plugin_impl::bcast_pbft_msg(const net_message &msg) { + if (sync_master->is_syncing()) return; + + auto deadline = time_point_sec(time_point::now()) + pbft_message_TTL; + + for (auto &conn: connections) { + if (conn->pbft_ready()) { + conn->enqueue_pbft(encode_pbft_message(msg), deadline); + } + } + } + + void net_plugin_impl::forward_pbft_msg(connection_ptr c, const net_message &msg) { + auto deadline = time_point_sec(time_point::now()) + pbft_message_TTL; + + for (auto &conn: connections) { + if (conn != c && conn->pbft_ready()) { + conn->enqueue_pbft(encode_pbft_message(msg), deadline); + } + } + } + + void net_plugin_impl::pbft_outgoing_prepare(const pbft_prepare &msg) { + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_prepare(msg)) return; + + bcast_pbft_msg(msg); + fc_ilog( logger, "sent prepare at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_num)("v", msg.view)("k", msg.public_key)); + } + + void net_plugin_impl::pbft_outgoing_commit(const pbft_commit &msg) { + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_commit(msg)) return; + + bcast_pbft_msg(msg); + fc_ilog( logger, "sent commit at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_num)("v", msg.view)("k", msg.public_key)); + } + + void net_plugin_impl::pbft_outgoing_view_change(const pbft_view_change &msg) { + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_view_change(msg)) return; + + bcast_pbft_msg(msg); + fc_ilog( logger, "sent view change {cv: ${cv}, tv: ${tv}} from ${v}", ("cv", msg.current_view)("tv", msg.target_view)("v", msg.public_key)); + } + + void net_plugin_impl::pbft_outgoing_new_view(const pbft_new_view &msg) { + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_new_view(msg)) return; + + bcast_pbft_msg(msg); + fc_ilog( logger, "sent new view at view: ${v}, from ${k}, ", ("v", msg.view)("k", msg.public_key)); + } + + void net_plugin_impl::pbft_outgoing_checkpoint(const pbft_checkpoint &msg) { + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_checkpoint(msg)) return; + + bcast_pbft_msg(msg); + } + + bool net_plugin_impl::maybe_add_pbft_cache(const string &uuid){ + auto itr = pbft_message_cache.find(uuid); + if (itr == pbft_message_cache.end()) { + //add to cache + pbft_message_cache[uuid] = time_point_sec(time_point::now()) + pbft_message_cache_TTL; + return true; + } + return false; + } + + void net_plugin_impl::clean_expired_pbft_cache(){ + auto itr = pbft_message_cache.begin(); + auto now = time_point::now(); + + while (itr != pbft_message_cache.end()) { + if (itr->second <= now) { + itr = pbft_message_cache.erase(itr); + } else + itr++; + } + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_prepare &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_prepare(msg)) return; + + forward_pbft_msg(c, msg); + fc_ilog( logger, "received prepare at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_num)("v", msg.view)("k", msg.public_key)); + + pbft_incoming_prepare_channel.publish(msg); + + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_commit &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_commit(msg)) return; + + forward_pbft_msg(c, msg); + fc_ilog( logger, "received commit at height: ${n}, view: ${v}, from ${k}, ", ("n", msg.block_num)("v", msg.view)("k", msg.public_key)); + + pbft_incoming_commit_channel.publish(msg); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_view_change &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_view_change(msg)) return; + + forward_pbft_msg(c, msg); + fc_ilog( logger, "received view change {cv: ${cv}, tv: ${tv}} from ${v}", ("cv", msg.current_view)("tv", msg.target_view)("v", msg.public_key)); + + pbft_incoming_view_change_channel.publish(msg); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_new_view &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_new_view(msg)) return; + + forward_pbft_msg(c, msg); + fc_dlog( logger, "received new view: ${n}, from ${v}", ("n", msg)("v", msg.public_key)); + + pbft_incoming_new_view_channel.publish(msg); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_checkpoint &msg) { + + if (!is_pbft_msg_valid(msg)) return; + + auto added = maybe_add_pbft_cache(msg.uuid); + if (!added) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + if (!pcc.pbft_db.is_valid_checkpoint(msg)) return; + + forward_pbft_msg(c, msg); + fc_dlog( logger, "received checkpoint at ${n}, from ${v}", ("n", msg.block_num)("v", msg.public_key)); + + pbft_incoming_checkpoint_channel.publish(msg); + } + + void net_plugin_impl::handle_message( connection_ptr c, const pbft_stable_checkpoint &msg) { + if (chain_id != msg.chain_id) return; + + pbft_controller &pcc = my_impl->chain_plug->pbft_ctrl(); + + if (pcc.pbft_db.is_valid_stable_checkpoint(msg)) { + fc_ilog(logger, "received stable checkpoint at ${n}, from ${v}", ("n", msg.block_num)("v", c->peer_name())); + for (auto cp: msg.checkpoints) { + pbft_incoming_checkpoint_channel.publish(cp); + } + } + } + void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { connector_check->expires_from_now( du); connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { @@ -2705,6 +3158,86 @@ namespace eosio { }); } + void net_plugin_impl::pbft_message_cache_ticker() { + pbft_message_cache_timer->expires_from_now (pbft_message_cache_tick_interval); + pbft_message_cache_timer->async_wait ([this](boost::system::error_code ec) { + pbft_message_cache_ticker (); + if (ec) { + wlog ("pbft message cache ticker error: ${m}", ("m", ec.message())); + } + clean_expired_pbft_cache(); + }); + } + + void net_plugin_impl::connection_monitor_ticker() { + connection_monitor_timer->expires_from_now (connection_monitor_tick_interval); + connection_monitor_timer->async_wait ([this](boost::system::error_code ec) { + connection_monitor_ticker (); + if (ec) { + wlog ("connection monitor ticker error: ${m}", ("m", ec.message())); + } + int total=0; + int current=0; + for(auto &conn: connections){ + if(conn->current()){ + ++current; + } + ++total; + auto is_open = conn->socket && conn->socket->is_open(); +// auto paddr = conn->peer_addr; +// paddr.insert(0, 20 - paddr.length(), ' '); + std::ostringstream ss; + + auto so = is_open?"1":"0"; + auto con = conn->connecting ?"1":"0"; + auto syn = conn->syncing ?"1":"0"; + auto cur = conn->current() ?"1":"0"; + ss << so << con << syn << cur ; + auto status = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(22) << conn->peer_addr; + auto paddr = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->buffer_queue.write_queue_size(); + auto write_queue = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->buffer_queue.out_queue_size(); + auto out_queue = ss.str(); + + ss.str(""); + ss.clear(); + + ss << std::setfill(' ') << std::setw(6) << conn->pbft_queue.size(); + auto pbft_queue = ss.str(); + + auto conn_str = conn->peer_addr; + if(conn_str.empty()) { + try { + conn_str = boost::lexical_cast(conn->socket->remote_endpoint()); + } catch (...) { + + } + } + + dlog("connection: ${conn} \tstatus(socket|connecting|syncing|current): ${status}\t|\twrite_queue: ${write}\t|\tout_queue: ${out}\t|\tpbft_queue: ${pbft}", ("status",status)("conn",conn_str)("write",write_queue)("out",out_queue)("pbft",pbft_queue)); + } + dlog("connections stats: current : ${current}\t total : ${total} ",("current",current)("total",total)); + dlog("================================================================================================"); + auto local_trx_pool_size = local_txns.size(); + fc_dlog(logger, "local trx pool size: ${local_trx_pool_size}",("local_trx_pool_size",local_trx_pool_size)); + fc_dlog(logger, "================================================================================================"); + }); + } + void net_plugin_impl::ticker() { keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait([this](boost::system::error_code ec) { @@ -2781,6 +3314,14 @@ namespace eosio { it = connections.erase(it); continue; } + }else if((*it)->connecting && (*it)->connecting_deadline < fc::time_point::now()){ + if( (*it)->peer_addr.length() > 0) { + close(*it); + } + else { + it = connections.erase(it); + continue; + } } ++it; } @@ -3092,6 +3633,10 @@ namespace eosio { my->keepalive_timer.reset( new boost::asio::steady_timer( app().get_io_service())); my->ticker(); + my->pbft_message_cache_timer.reset( new boost::asio::steady_timer( app().get_io_service())); + my->connection_monitor_timer.reset( new boost::asio::steady_timer( app().get_io_service())); + my->pbft_message_cache_ticker(); +// my->connection_monitor_ticker(); } FC_LOG_AND_RETHROW() } @@ -3117,6 +3662,16 @@ namespace eosio { } my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->pbft_outgoing_prepare_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_prepare, my.get(), _1)); + my->pbft_outgoing_commit_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_commit, my.get(), _1)); + my->pbft_outgoing_view_change_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_view_change, my.get(), _1)); + my->pbft_outgoing_new_view_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_new_view, my.get(), _1)); + my->pbft_outgoing_checkpoint_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::pbft_outgoing_checkpoint, my.get(), _1)); if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; @@ -3208,6 +3763,19 @@ namespace eosio { } return result; } + + bool net_plugin::is_syncing()const { + return my->sync_master->is_syncing(); + } + + net_plugin_impl::net_plugin_impl(): + pbft_incoming_prepare_channel(app().get_channel()), + pbft_incoming_commit_channel(app().get_channel()), + pbft_incoming_view_change_channel(app().get_channel()), + pbft_incoming_new_view_channel(app().get_channel()), + pbft_incoming_checkpoint_channel(app().get_channel()) + {} + connection_ptr net_plugin_impl::find_connection(const string& host )const { for( const auto& c : connections ) if( c->peer_addr == host ) return c; diff --git a/plugins/pbft_plugin/CMakeLists.txt b/plugins/pbft_plugin/CMakeLists.txt new file mode 100644 index 00000000000..9ca17f811f9 --- /dev/null +++ b/plugins/pbft_plugin/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB HEADERS "include/eosio/pbft_plugin/*.hpp") +add_library( pbft_plugin + pbft_plugin.cpp + ${HEADERS} ) + +target_link_libraries( pbft_plugin appbase fc eosio_chain chain_plugin net_plugin) +target_include_directories( pbft_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include") diff --git a/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp b/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp new file mode 100644 index 00000000000..d94c74785e3 --- /dev/null +++ b/plugins/pbft_plugin/include/eosio/pbft_plugin/pbft_plugin.hpp @@ -0,0 +1,28 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once +#include + +namespace eosio { + +using namespace appbase; + +class pbft_plugin : public appbase::plugin { +public: + pbft_plugin(); + virtual ~pbft_plugin(); + + APPBASE_PLUGIN_REQUIRES() + virtual void set_program_options(options_description&, options_description& cfg) override; + + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + std::unique_ptr my; +}; + +} diff --git a/plugins/pbft_plugin/pbft_plugin.cpp b/plugins/pbft_plugin/pbft_plugin.cpp new file mode 100644 index 00000000000..3b1762ba744 --- /dev/null +++ b/plugins/pbft_plugin/pbft_plugin.cpp @@ -0,0 +1,154 @@ +#include + +#include +#include +#include +#include +#include +#include + +namespace eosio { + static appbase::abstract_plugin &_pbft_plugin = app().register_plugin(); + using namespace std; + using namespace eosio::chain; + + class pbft_plugin_impl { + public: + unique_ptr prepare_timer; + unique_ptr commit_timer; + unique_ptr view_change_timer; + unique_ptr checkpoint_timer; + + boost::asio::steady_timer::duration prepare_timeout{std::chrono::milliseconds{1000}}; + boost::asio::steady_timer::duration commit_timeout{std::chrono::milliseconds{1000}}; + boost::asio::steady_timer::duration view_change_timeout{std::chrono::seconds{5}}; + boost::asio::steady_timer::duration checkpoint_timeout{std::chrono::seconds{50}}; + + void prepare_timer_tick(); + + void commit_timer_tick(); + + void view_change_timer_tick(); + + void checkpoint_timer_tick(); + + private: + bool upgraded = false; + bool is_replaying(); + bool is_syncing(); + bool pbft_ready(); + }; + + pbft_plugin::pbft_plugin() : my(new pbft_plugin_impl()) {} + + pbft_plugin::~pbft_plugin() = default; + + void pbft_plugin::set_program_options(options_description &, options_description &cfg) { + } + + void pbft_plugin::plugin_initialize(const variables_map &options) { + ilog("Initialize pbft plugin"); + my->prepare_timer = std::make_unique(app().get_io_service()); + my->commit_timer = std::make_unique(app().get_io_service()); + my->view_change_timer = std::make_unique(app().get_io_service()); + my->checkpoint_timer = std::make_unique(app().get_io_service()); + } + + void pbft_plugin::plugin_startup() { + my->prepare_timer_tick(); + my->commit_timer_tick(); + my->view_change_timer_tick(); + my->checkpoint_timer_tick(); + } + + void pbft_plugin::plugin_shutdown() { + } + + void pbft_plugin_impl::prepare_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + prepare_timer->expires_from_now(prepare_timeout); + prepare_timer->async_wait([&](boost::system::error_code ec) { + prepare_timer_tick(); + if (ec) { + wlog ("pbft plugin prepare timer tick error: ${m}", ("m", ec.message())); + } else { + if (pbft_ready()) pbft_ctrl.maybe_pbft_prepare(); + } + }); + } + + void pbft_plugin_impl::commit_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + commit_timer->expires_from_now(commit_timeout); + commit_timer->async_wait([&](boost::system::error_code ec) { + commit_timer_tick(); + if (ec) { + wlog ("pbft plugin commit timer tick error: ${m}", ("m", ec.message())); + } else { + if (pbft_ready()) pbft_ctrl.maybe_pbft_commit(); + } + }); + } + + void pbft_plugin_impl::view_change_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + try { + view_change_timer->cancel(); + } catch (boost::system::system_error &e) { + elog("view change timer cancel error: ${e}", ("e", e.what())); + } + view_change_timer->expires_from_now(view_change_timeout); + view_change_timer->async_wait([&](boost::system::error_code ec) { + view_change_timer_tick(); + if (ec) { + wlog ("pbft plugin view change timer tick error: ${m}", ("m", ec.message())); + } else { + if (pbft_ready()) pbft_ctrl.maybe_pbft_view_change(); + } + }); + } + + void pbft_plugin_impl::checkpoint_timer_tick() { + chain::pbft_controller &pbft_ctrl = app().get_plugin().pbft_ctrl(); + checkpoint_timer->expires_from_now(checkpoint_timeout); + checkpoint_timer->async_wait([&](boost::system::error_code ec) { + checkpoint_timer_tick(); + if (ec) { + wlog ("pbft plugin checkpoint timer tick error: ${m}", ("m", ec.message())); + } else { + if (pbft_ready()) pbft_ctrl.send_pbft_checkpoint(); + } + }); + } + + bool pbft_plugin_impl::is_replaying() { + return app().get_plugin().chain().is_replaying(); + } + + bool pbft_plugin_impl::is_syncing() { + return app().get_plugin().is_syncing(); + } + + bool pbft_plugin_impl::pbft_ready() { + // only trigger pbft related logic if I am in sync and replayed. + + auto& chain = app().get_plugin().chain(); + auto new_version = chain.is_upgraded(); + + if (new_version && !upgraded) { + wlog( "\n" + "******** BATCH-PBFT ENABLED ********\n" + "* *\n" + "* -- The blockchain -- *\n" + "* - has successfully switched - *\n" + "* - into the new version - *\n" + "* - Please enjoy a - *\n" + "* - better performance! - *\n" + "* *\n" + "************************************\n" ); + upgraded = true; + } + + return (new_version && (!is_syncing() && !is_replaying())); + } +} diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 7fcde1ac98c..62f4664ed82 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -90,6 +90,8 @@ void producer_api_plugin::plugin_startup() { INVOKE_R_V(producer, get_integrity_hash), 201), CALL(producer, producer, create_snapshot, INVOKE_R_V(producer, create_snapshot), 201), + CALL(producer, producer, set_pbft_current_view, + INVOKE_V_R(producer, set_pbft_current_view, uint32_t), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index c43f0e0f38b..f6d10eb277b 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -82,6 +82,8 @@ class producer_plugin : public appbase::plugin { integrity_hash_information get_integrity_hash() const; snapshot_information create_snapshot() const; + void set_pbft_current_view(const uint32_t view); + signal confirmed_block; private: std::shared_ptr my; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 7bcfb7042b7..61f6ac15f12 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -224,10 +224,13 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader; new_block_header.timestamp = new_block_header.timestamp.next(); new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); + + auto new_version = chain.is_upgraded(); + + auto new_bs = bsp->generate_next(new_block_header.timestamp, new_version); // for newly installed producers we can set their watermarks to the block they became active - if (new_bs.maybe_promote_pending() && bsp->active_schedule.version != new_bs.active_schedule.version) { + if (new_bs.maybe_promote_pending(new_version) && bsp->active_schedule.version != new_bs.active_schedule.version) { flat_set new_producers; new_producers.reserve(new_bs.active_schedule.producers.size()); for( const auto& p: new_bs.active_schedule.producers) { @@ -298,7 +301,8 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); - /* de-dupe here... no point in aborting block if we already know the block */ + + /* de-dupe here... no point in aborting block if we already know the block */ auto existing = chain.fetch_block_by_id( id ); if( existing ) { return; } @@ -338,11 +342,22 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (block->block_num() % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", - ("p",block->producer)("id",fc::variant(block->id()).as_string().substr(8,16)) - ("n",block_header::num_from_id(block->id()))("t",block->timestamp) - ("count",block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); + if (chain.is_upgraded()) { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, lscb: ${lscb}, latency: ${latency} ms]", + ("p", block->producer)("id", fc::variant(block->id()).as_string().substr(8, 16)) + ("n", block_header::num_from_id(block->id()))("t", block->timestamp) + ("count", block->transactions.size())("lib", chain.last_irreversible_block_num()) + ("lscb", chain.last_stable_checkpoint_block_num()) + ("latency", (fc::time_point::now() - block->timestamp).count() / 1000)); + } else { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", + ("p",block->producer)("id",fc::variant(block->id()).as_string().substr(8,16)) + ("n",block_header::num_from_id(block->id()))("t",block->timestamp) + ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) + ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); + } } } @@ -964,6 +979,12 @@ producer_plugin::snapshot_information producer_plugin::create_snapshot() const { return {head_id, snapshot_path}; } +void producer_plugin::set_pbft_current_view(const uint32_t view) { + //this is used to recover from a disaster, do not set this unless you have to do so. + pbft_controller& pbft_ctrl = app().get_plugin().pbft_ctrl(); + pbft_ctrl.state_machine.manually_set_current_view(view); +} + optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = chain_plug->chain(); const auto& hbs = chain.head_block_state(); @@ -1028,7 +1049,7 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const { fc::time_point block_time = base + fc::microseconds(min_time_to_next_block); - if((block_time - now) < fc::microseconds(config::block_interval_us/10) ) { // we must sleep for at least 50ms + if((block_time - now) < fc::microseconds(config::block_interval_us/5) ) { // we must sleep for at least 50ms block_time += fc::microseconds(config::block_interval_us); } return block_time; @@ -1083,7 +1104,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_mode = pending_block_mode::speculating; } - if (_pending_block_mode == pending_block_mode::producing) { + auto new_version = chain.is_upgraded(); + + if (_pending_block_mode == pending_block_mode::producing && !new_version) { // determine if our watermark excludes us from producing at this point if (currrent_watermark_itr != _producer_watermarks.end()) { if (currrent_watermark_itr->second >= hbs->block_num + 1) { @@ -1105,7 +1128,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { uint16_t blocks_to_confirm = 0; - if (_pending_block_mode == pending_block_mode::producing) { + if (_pending_block_mode == pending_block_mode::producing && !new_version) { // determine how many blocks this producer can confirm // 1) if it is not a producer from this node, assume no confirmations (we will discard this block anyway) // 2) if it is a producer on this node that has never produced, the conservative approach is to assume no @@ -1584,11 +1607,10 @@ void producer_plugin_impl::produce_block() { block_state_ptr new_bs = chain.head_block_state(); _producer_watermarks[new_bs->header.producer] = chain.head_block_num(); - ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", + ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, lscb: ${lscb}]", ("p",new_bs->header.producer)("id",fc::variant(new_bs->id).as_string().substr(0,16)) ("n",new_bs->block_num)("t",new_bs->header.timestamp) - ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); - + ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("lscb", chain.last_stable_checkpoint_block_num())); } } // namespace eosio diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 707f75bd9de..3a2c4fed5fb 100644 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -270,7 +270,7 @@ struct txn_test_gen_plugin_impl { act_a_to_b.name = N(transfer); act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", fc::mutable_variant_object()("l", salt))), abi_serializer_max_time); @@ -278,7 +278,7 @@ struct txn_test_gen_plugin_impl { act_b_to_a.name = N(transfer); act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", fc::mutable_variant_object()("l", salt))), abi_serializer_max_time); diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 7d03ce0001f..9399302f246 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -57,6 +57,7 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_api_plugin -Wl,${no_whole_archive_flag} +# PRIVATE -Wl,${whole_archive_flag} pbft_plugin -Wl,${no_whole_archive_flag} # PRIVATE -Wl,${whole_archive_flag} faucet_testnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} txn_test_gen_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} db_size_api_plugin -Wl,${no_whole_archive_flag} @@ -64,7 +65,7 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} test_control_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} test_control_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} - PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin + PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin pbft_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) if(BUILD_MONGO_DB_PLUGIN) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 52eb9a0e9ab..a21babac350 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -101,7 +102,7 @@ int main(int argc, char** argv) .default_unix_socket_path = "", .default_http_port = 8888 }); - if(!app().initialize(argc, argv)) + if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); ilog("nodeos version ${ver}", ("ver", app().version_string())); diff --git a/tests/chain_plugin_tests.cpp b/tests/chain_plugin_tests.cpp index 5a489c255b4..9d94e384708 100644 --- a/tests/chain_plugin_tests.cpp +++ b/tests/chain_plugin_tests.cpp @@ -90,7 +90,7 @@ BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, TESTER ) try { char headnumstr[20]; sprintf(headnumstr, "%d", headnum); chain_apis::read_only::get_block_params param{headnumstr}; - chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); // block should be decoded successfully std::string block_str = json::to_pretty_string(plugin.get_block(param)); diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index bb332b9a000..8084ddd2f93 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -72,7 +72,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { produce_blocks(1); // iterate over scope - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_by_scope_params param{N(eosio.token), N(accounts), "inita", "", 10}; eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); @@ -193,7 +193,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio.token); p.scope = "inita"; @@ -361,7 +361,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX), *(this->pbft_ctrl)); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio); p.scope = "eosio"; diff --git a/unittests/pbft_tests.cpp b/unittests/pbft_tests.cpp new file mode 100644 index 00000000000..50d8e573fe4 --- /dev/null +++ b/unittests/pbft_tests.cpp @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +using namespace eosio::chain; +using namespace eosio::testing; + + +BOOST_AUTO_TEST_SUITE(pbft_tests) + + BOOST_AUTO_TEST_CASE(can_init) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + + tester.produce_block(); + auto p = pbft_ctrl.pbft_db.should_prepared(); + BOOST_CHECK(!p); + } + + BOOST_AUTO_TEST_CASE(can_advance_lib_in_old_version) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + + auto privkey = tester::get_private_key( N(eosio), "active" ); + auto pubkey = tester::get_public_key( N(eosio), "active"); + auto sp = [privkey]( const eosio::chain::digest_type& digest ) { + return privkey.sign(digest); + }; + std::map msp; + msp[pubkey]=sp; + ctrl.set_my_signature_providers(msp); + + tester.produce_block();//produce block num 2 + BOOST_REQUIRE_EQUAL(ctrl.last_irreversible_block_num(), 0); + BOOST_REQUIRE_EQUAL(ctrl.head_block_num(), 2); + tester.produce_block(); + BOOST_REQUIRE_EQUAL(ctrl.last_irreversible_block_num(), 2); + BOOST_REQUIRE_EQUAL(ctrl.head_block_num(), 3); + } + +BOOST_AUTO_TEST_CASE(can_advance_lib_after_upgrade) { + tester tester; + controller &ctrl = *tester.control.get(); + pbft_controller pbft_ctrl{ctrl}; + ctrl.set_upo(150); + + const auto& upo = ctrl.db().get(); + const auto upo_upgrade_target_block_num = upo.upgrade_target_block_num; + BOOST_CHECK_EQUAL(upo_upgrade_target_block_num, 150); + + + auto privkey = tester::get_private_key( N(eosio), "active" ); + auto pubkey = tester::get_public_key( N(eosio), "active"); + auto sp = [privkey]( const eosio::chain::digest_type& digest ) { + return privkey.sign(digest); + }; + std::map msp; + msp[pubkey]=sp; + ctrl.set_my_signature_providers(msp); + + auto is_upgraded = ctrl.is_upgraded(); + + BOOST_CHECK_EQUAL(is_upgraded, false); + + tester.produce_block();//produce block num 2 + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 0); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 2); + tester.produce_blocks(150); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 151); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 152); + + is_upgraded = ctrl.is_upgraded(); + BOOST_CHECK_EQUAL(is_upgraded, true); + + tester.produce_blocks(10); + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), false); + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 151); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 162); + + pbft_ctrl.maybe_pbft_prepare(); + pbft_ctrl.maybe_pbft_commit(); + + BOOST_CHECK_EQUAL(ctrl.pending_pbft_lib(), true); + tester.produce_block(); //set lib using pending pbft lib + + BOOST_CHECK_EQUAL(ctrl.last_irreversible_block_num(), 162); + BOOST_CHECK_EQUAL(ctrl.head_block_num(), 163); +} + + +BOOST_AUTO_TEST_SUITE_END()