diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 5b28012..92cbde8 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -235,7 +235,6 @@ namespace detail { ~application_impl() { - fc::remove_all(_data_dir / "blockchain/dblock"); } void register_builtin_apis() @@ -248,29 +247,23 @@ namespace detail { void startup() { try { - bool clean = !fc::exists(_data_dir / "blockchain/dblock"); - fc::create_directories(_data_dir / "blockchain/dblock"); + fc::create_directories(_data_dir / "blockchain"); fc::create_directories(_data_dir / "node/transaction_history"); - - - auto initial_state = [&] { + auto initial_state = [this] { ilog("Initializing database..."); - if( _options->count("genesis-json") ){ - //FC_ASSERT( egenesis_json != "" ); - //FC_ASSERT( muse::egenesis::get_egenesis_json_hash() == fc::sha256::hash( egenesis_json ) ); + if( _options->count("genesis-json") ) + { fc::path genesis_path(_options->at("genesis-json").as()); auto genesis = fc::json::from_file( genesis_path ).as(); - genesis.initial_chain_id = MUSE_CHAIN_ID; //fc::sha256::hash( egenesis_json ); + genesis.initial_chain_id = MUSE_CHAIN_ID; return genesis; } else { std::string egenesis_json; muse::egenesis::compute_egenesis_json(egenesis_json); - //FC_ASSERT( egenesis_json != "" ); - //FC_ASSERT( muse::egenesis::get_egenesis_json_hash() == fc::sha256::hash( egenesis_json ) ); auto genesis = fc::json::from_string(egenesis_json).as(); - genesis.initial_chain_id = MUSE_CHAIN_ID; //fc::sha256::hash( egenesis_json ); + genesis.initial_chain_id = MUSE_CHAIN_ID; return genesis; } }; @@ -295,53 +288,18 @@ namespace detail { _chain_db->add_checkpoints( loaded_checkpoints ); if( _options->count("replay-blockchain") ) - { - ilog("Replaying blockchain on user request."); - _chain_db->reindex(_data_dir/"blockchain", initial_state() ); - } else if( clean ) { + _chain_db->wipe( _data_dir / "blockchain", false ); - auto is_new = [&]() -> bool - { - // directory doesn't exist - if( !fc::exists( _data_dir ) ) - return true; - // if directory exists but is empty, return true; else false. - return ( fc::directory_iterator( _data_dir ) == fc::directory_iterator() ); - }; - - auto is_outdated = [&]() -> bool - { - if( !fc::exists( _data_dir / "db_version" ) ) - return true; - std::string version_str; - fc::read_file_contents( _data_dir / "db_version", version_str ); - return (version_str != GRAPHENE_CURRENT_DB_VERSION); - }; - if( !is_new() && is_outdated() ) - { - ilog("Replaying blockchain due to version upgrade"); - - fc::remove_all( _data_dir / "db_version" ); - _chain_db->reindex(_data_dir / "blockchain", initial_state() ); - - // doing this down here helps ensure that DB will be wiped - // if any of the above steps were interrupted on a previous run - if( !fc::exists( _data_dir / "db_version" ) ) - { - std::ofstream db_version( - (_data_dir / "db_version").generic_string().c_str(), - std::ios::out | std::ios::binary | std::ios::trunc ); - std::string version_string = GRAPHENE_CURRENT_DB_VERSION; - db_version.write( version_string.c_str(), version_string.size() ); - db_version.close(); - } - } else { - _chain_db->open(_data_dir / "blockchain", initial_state() ); - } - } else { - wlog("Detected unclean shutdown. Replaying blockchain..."); - _chain_db->reindex(_data_dir / "blockchain", initial_state() ); + try + { + _chain_db->open( _data_dir / "blockchain", initial_state(), GRAPHENE_CURRENT_DB_VERSION ); } + catch( const fc::exception& e ) + { + elog( "Caught exception ${e} in open(), you might want to force a replay", ("e", e.to_detail_string()) ); + throw; + } + _pending_trx_db->open(_data_dir / "node/transaction_history" ); if( _options->count("force-validate") ) diff --git a/libraries/chain/block_database.cpp b/libraries/chain/block_database.cpp index c90dc85..8c0b8c4 100644 --- a/libraries/chain/block_database.cpp +++ b/libraries/chain/block_database.cpp @@ -20,14 +20,15 @@ void block_database::open( const fc::path& dbdir ) _block_num_to_pos.exceptions(std::ios_base::failbit | std::ios_base::badbit); _blocks.exceptions(std::ios_base::failbit | std::ios_base::badbit); - if( !fc::exists( dbdir/"index" ) ) + _index_filename = dbdir / "index"; + if( !fc::exists( _index_filename ) ) { - _block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); + _block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); _blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc); } else { - _block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); + _block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); _blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out ); } } FC_CAPTURE_AND_RETHROW( (dbdir) ) } @@ -96,7 +97,7 @@ bool block_database::contains( const block_id_type& id )const index_entry e; auto index_pos = sizeof(e)*block_header::num_from_id(id); _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); - if ( _block_num_to_pos.tellg() <= index_pos ) + if ( _block_num_to_pos.tellg() < index_pos + sizeof(e) ) return false; _block_num_to_pos.seekg( index_pos ); _block_num_to_pos.read( (char*)&e, sizeof(e) ); @@ -181,34 +182,47 @@ optional block_database::fetch_by_number( uint32_t block_num )cons return optional(); } -optional block_database::last()const -{ +optional block_database::last_index_entry()const { try { index_entry e; + _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); + std::streampos pos = _block_num_to_pos.tellg(); + if( pos < sizeof(index_entry) ) + return optional(); - if( _block_num_to_pos.tellp() < sizeof(index_entry) ) - return optional(); + pos -= pos % sizeof(index_entry); - _block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - uint64_t pos = _block_num_to_pos.tellg(); - while( e.block_size == 0 && pos > 0 ) + _blocks.seekg( 0, _block_num_to_pos.end ); + const std::streampos blocks_size = _blocks.tellg(); + while( pos > 0 ) { pos -= sizeof(index_entry); _block_num_to_pos.seekg( pos ); _block_num_to_pos.read( (char*)&e, sizeof(e) ); + if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0 + && e.block_pos + e.block_size <= blocks_size ) + try + { + vector data( e.block_size ); + _blocks.seekg( e.block_pos ); + _blocks.read( data.data(), e.block_size ); + if( _blocks.gcount() == e.block_size ) + { + const signed_block block = fc::raw::unpack(data); + if( block.id() == e.block_id ) + return e; + } + } + catch (const fc::exception&) + { + } + catch (const std::exception&) + { + } + fc::resize_file( _index_filename, pos ); } - - if( e.block_size == 0 ) - return optional(); - - vector data( e.block_size ); - _blocks.seekg( e.block_pos ); - _blocks.read( data.data(), e.block_size ); - auto result = fc::raw::unpack(data); - return result; } catch (const fc::exception&) { @@ -216,42 +230,21 @@ optional block_database::last()const catch (const std::exception&) { } + return optional(); +} + +optional block_database::last()const +{ + optional entry = last_index_entry(); + if( entry.valid() ) return fetch_by_number( block_header::num_from_id(entry->block_id) ); return optional(); } optional block_database::last_id()const { - try - { - index_entry e; - _block_num_to_pos.seekg( 0, _block_num_to_pos.end ); - - if( _block_num_to_pos.tellp() < sizeof(index_entry) ) - return optional(); - - _block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - uint64_t pos = _block_num_to_pos.tellg(); - while( e.block_size == 0 && pos > 0 ) - { - pos -= sizeof(index_entry); - _block_num_to_pos.seekg( pos ); - _block_num_to_pos.read( (char*)&e, sizeof(e) ); - } - - if( e.block_size == 0 ) - return optional(); - - return e.block_id; - } - catch (const fc::exception&) - { - } - catch (const std::exception&) - { - } + optional entry = last_index_entry(); + if( entry.valid() ) return entry->block_id; return optional(); } - } } diff --git a/libraries/chain/database.cpp b/libraries/chain/database.cpp index 0e9f4ef..63da45e 100644 --- a/libraries/chain/database.cpp +++ b/libraries/chain/database.cpp @@ -88,10 +88,29 @@ database::~database() clear_pending(); } -void database::open( const fc::path& data_dir, const genesis_state_type& initial_allocation ) +void database::open( const fc::path& data_dir, const genesis_state_type& initial_allocation, + const std::string& db_version ) { try { + bool wipe_object_db = false; + if( !fc::exists( data_dir / "db_version" ) ) + wipe_object_db = true; + else + { + std::string version_string; + fc::read_file_contents( data_dir / "db_version", version_string ); + wipe_object_db = ( version_string != db_version ); + } + if( wipe_object_db ) { + ilog("Wiping object_database due to missing or wrong version"); + object_database::wipe( data_dir ); + std::ofstream version_file( (data_dir / "db_version").generic_string().c_str(), + std::ios::out | std::ios::binary | std::ios::trunc ); + version_file.write( db_version.c_str(), db_version.size() ); + version_file.close(); + } + object_database::open(data_dir); _block_id_to_block.open(data_dir / "database" / "block_num_to_block"); @@ -101,31 +120,65 @@ void database::open( const fc::path& data_dir, const genesis_state_type& initial init_hardforks(); - fc::optional last_block = _block_id_to_block.last(); + fc::optional last_block = _block_id_to_block.last_id(); if( last_block.valid() ) { - _fork_db.start_block( *last_block ); - idump((last_block->id())(last_block->block_num())); - if( last_block->id() != head_block_id() ) - { - FC_ASSERT( head_block_num() == 0, "last block ID does not match current chain state", - ("last_block->block_num()",last_block->block_num())( "head_block_num", head_block_num()) ); - } + FC_ASSERT( *last_block >= head_block_id(), + "last block ID does not match current chain state", + ("last_block->id", last_block)("head_block_id",head_block_num()) ); + reindex( data_dir ); } } FC_CAPTURE_LOG_AND_RETHROW( (data_dir) ) } -void database::reindex(fc::path data_dir, const genesis_state_type& initial_allocation ) +/** Cuts blocks from the end of the block database. + * + * @param blocks the block database from which to remove blocks + * @param until the last block number to keep in the database + */ +static void cutoff_blocks( block_database& blocks, uint32_t until ) { - try + uint32_t count = 0; + fc::optional< block_id_type > last_id = blocks.last_id(); + while( last_id.valid() && block_header::num_from_id( *last_id ) > until ) { - ilog( "reindexing blockchain" ); - wipe(data_dir, false); - open(data_dir, initial_allocation); - _fork_db.reset(); // override effect of _fork_db.start_block() call in open() + blocks.remove( *last_id ); + count++; + last_id = blocks.last_id(); + } + wlog( "Dropped ${n} blocks from after the gap", ("n", count) ); +} - auto start = fc::time_point::now(); +/** Reads blocks number from start_block_num until last_block_num (inclusive) + * from the blocks database and pushes/applies them. Returns early if a block + * cannot be read from blocks. + * @return the number of the block following the last successfully read, + * usually last_block_num+1 + */ +static uint32_t reindex_range( block_database& blocks, uint32_t start_block_num, uint32_t last_block_num, + std::function push_or_apply ) +{ + for( uint32_t i = start_block_num; i <= last_block_num; ++i ) + { + if( i % 100000 == 0 ) + ilog( "${pct}% ${i} of ${n}", ("pct",double(i*100)/last_block_num)("i",i)("n",last_block_num) ); + fc::optional< signed_block > block = blocks.fetch_by_number(i); + if( !block.valid() ) + { + wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) ); + cutoff_blocks( blocks, i ); + return i; + } + push_or_apply( *block ); + } + return last_block_num + 1; +}; + +void database::reindex( fc::path data_dir ) +{ + try + { auto last_block = _block_id_to_block.last(); if( !last_block ) { @@ -133,71 +186,60 @@ void database::reindex(fc::path data_dir, const genesis_state_type& initial_allo edump((last_block)); return; } + if( last_block->block_num() <= head_block_num()) return; ilog( "Replaying blocks..." ); _undo_db.disable(); - auto reindex_range = [&]( uint32_t start_block_num, uint32_t last_block_num, uint32_t skip, bool do_push ) - { - for( uint32_t i = start_block_num; i <= last_block_num; ++i ) + auto start = fc::time_point::now(); + const uint32_t last_block_num_in_file = last_block->block_num(); + const uint32_t initial_undo_blocks = MUSE_MAX_UNDO_HISTORY; + + uint32_t first = head_block_num() + 1; + if( last_block_num_in_file > 2 * initial_undo_blocks + && first < last_block_num_in_file - 2 * initial_undo_blocks ) + { + first = reindex_range( _block_id_to_block, first, last_block_num_in_file - 2 * initial_undo_blocks, + [this]( const signed_block& block ) { + apply_block( block, skip_witness_signature | + skip_transaction_signatures | + skip_transaction_dupe_check | + skip_tapos_check | + skip_witness_schedule_check | + skip_authority_check | + skip_validate | /// no need to validate operations + skip_validate_invariants ); + } ); + if( first > last_block_num_in_file - 2 * initial_undo_blocks ) { - if( i % 100000 == 0 ) - std::cerr << " " << double(i*100)/last_block_num << "% "< block = _block_id_to_block.fetch_by_number(i); - if( !block.valid() ) - { - // TODO gap handling may not properly init fork db - wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) ); - uint32_t dropped_count = 0; - while( true ) - { - fc::optional< block_id_type > last_id = _block_id_to_block.last_id(); - // this can trigger if we attempt to e.g. read a file that has block #2 but no block #1 - if( !last_id.valid() ) - break; - // we've caught up to the gap - if( block_header::num_from_id( *last_id ) <= i ) - break; - _block_id_to_block.remove( *last_id ); - dropped_count++; - } - wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) ); - break; - } - if( do_push ) - push_block( *block, skip ); - else - apply_block( *block, skip ); + ilog( "Writing database to disk at block ${i}", ("i",first-1) ); + flush(); + ilog( "Done" ); } - }; - - const uint32_t last_block_num_in_file = last_block->block_num(); - const uint32_t initial_undo_blocks = 100; - - uint32_t first = 1; - - if( last_block_num_in_file > initial_undo_blocks ) - { - uint32_t last = last_block_num_in_file - initial_undo_blocks; - reindex_range( 1, last, - skip_witness_signature | - skip_transaction_signatures | - skip_transaction_dupe_check | - skip_tapos_check | - skip_witness_schedule_check | - skip_authority_check | - skip_validate | /// no need to validate operations - skip_validate_invariants, false ); - first = last+1; - _fork_db.start_block( *_block_id_to_block.fetch_by_number( last ) ); } - else - { - _fork_db.start_block( *_block_id_to_block.fetch_by_number( last_block_num_in_file ) ); + if( last_block_num_in_file > initial_undo_blocks + && first < last_block_num_in_file - initial_undo_blocks ) + { + first = reindex_range( _block_id_to_block, first, last_block_num_in_file - initial_undo_blocks, + [this]( const signed_block& block ) { + apply_block( block, skip_witness_signature | + skip_transaction_signatures | + skip_transaction_dupe_check | + skip_tapos_check | + skip_witness_schedule_check | + skip_authority_check | + skip_validate | /// no need to validate operations + skip_validate_invariants ); + } ); } + if( first > 1 ) + _fork_db.start_block( *_block_id_to_block.fetch_by_number( first - 1 ) ); _undo_db.enable(); - reindex_range( first, last_block_num_in_file, skip_nothing, true ); + reindex_range( _block_id_to_block, first, last_block_num_in_file, + [this]( const signed_block& block ) { + push_block( block, skip_nothing ); + } ); auto end = fc::time_point::now(); ilog( "Done reindexing, elapsed time: ${t} sec", ("t",double((end-start).count())/1000000.0 ) ); @@ -220,7 +262,7 @@ void database::close(bool rewind) try { if( !_block_id_to_block.is_open() ) return; - //ilog( "Closing database" ); + ilog( "Closing database" ); // pop all of the blocks that we can given our undo history, this should // throw when there is no more undo history to pop @@ -229,7 +271,6 @@ void database::close(bool rewind) try { uint32_t cutoff = get_dynamic_global_properties().last_irreversible_block_num; - //ilog( "rewinding to last irreversible block number ${c}", ("c",cutoff) ); clear_pending(); while( head_block_num() > cutoff ) @@ -237,24 +278,14 @@ void database::close(bool rewind) block_id_type popped_block_id = head_block_id(); pop_block(); _fork_db.remove(popped_block_id); // doesn't throw on missing - try - { - _block_id_to_block.remove(popped_block_id); - } - catch (const fc::key_not_found_exception&) - { - ilog( "key not found" ); - } } - //idump((head_block_num())(get_dynamic_global_properties().last_irreversible_block_num)); } catch ( const fc::exception& e ) { - // ilog( "exception on rewind ${e}", ("e",e.to_detail_string()) ); + ilog( "exception on rewind ${e}", ("e",e.to_detail_string()) ); } } - //ilog( "Clearing pending state" ); // Since pop_block() will move tx's in the popped blocks into pending, // we have to clear_pending() after we're done popping to get a clean // DB state (issue #336). @@ -864,7 +895,6 @@ void database::pop_block() MUSE_ASSERT( head_block.valid(), pop_empty_chain, "there are no blocks to pop" ); _fork_db.pop_block(); - _block_id_to_block.remove( head_id ); pop_undo(); _popped_tx.insert( _popped_tx.begin(), head_block->transactions.begin(), head_block->transactions.end() ); diff --git a/libraries/chain/include/muse/chain/block_database.hpp b/libraries/chain/include/muse/chain/block_database.hpp index 9b3a9f0..2f591e9 100644 --- a/libraries/chain/include/muse/chain/block_database.hpp +++ b/libraries/chain/include/muse/chain/block_database.hpp @@ -3,6 +3,8 @@ #include namespace muse { namespace chain { + class index_entry; + class block_database { public: @@ -21,6 +23,8 @@ namespace muse { namespace chain { optional last()const; optional last_id()const; private: + optional last_index_entry()const; + fc::path _index_filename; mutable std::fstream _blocks; mutable std::fstream _block_num_to_pos; }; diff --git a/libraries/chain/include/muse/chain/database.hpp b/libraries/chain/include/muse/chain/database.hpp index 2c4af0b..6b439bb 100644 --- a/libraries/chain/include/muse/chain/database.hpp +++ b/libraries/chain/include/muse/chain/database.hpp @@ -66,8 +66,12 @@ namespace muse { namespace chain { * will be initialized with the default state. * * @param data_dir Path to open or create database in + * @param genesis_loader A callable object which returns the genesis state to initialize new databases on + * @param db_version a version string that changes when the internal database format and/or logic is modified */ - void open( const fc::path& data_dir, const genesis_state_type& initial_allocation = genesis_state_type() ); + void open( const fc::path& data_dir, + const genesis_state_type& initial_allocation, + const std::string& db_version ); /** * @brief Rebuild object graph from block history and open detabase @@ -75,7 +79,7 @@ namespace muse { namespace chain { * This method may be called after or instead of @ref database::open, and will rebuild the object graph by * replaying blockchain history. When this method exits successfully, the database will be open. */ - void reindex(fc::path data_dir, const genesis_state_type& initial_allocation = genesis_state_type() ); + void reindex( fc::path data_dir ); /** * @brief wipe Delete database from disk, and potentially the raw chain as well. diff --git a/libraries/chain/include/muse/chain/streaming_platform_objects.hpp b/libraries/chain/include/muse/chain/streaming_platform_objects.hpp index cc2740c..d8d6769 100644 --- a/libraries/chain/include/muse/chain/streaming_platform_objects.hpp +++ b/libraries/chain/include/muse/chain/streaming_platform_objects.hpp @@ -74,6 +74,7 @@ namespace muse { namespace chain { * @ingroup object_index */ struct by_name; + struct by_vote_name; typedef multi_index_container< streaming_platform_object, indexed_by< diff --git a/libraries/db/include/graphene/db/undo_database.hpp b/libraries/db/include/graphene/db/undo_database.hpp index f75cf86..ff8c5a0 100644 --- a/libraries/db/include/graphene/db/undo_database.hpp +++ b/libraries/db/include/graphene/db/undo_database.hpp @@ -136,6 +136,7 @@ namespace graphene { namespace db { void undo(); void merge(); void commit(); + void rollback_state(); uint32_t _active_sessions = 0; bool _disabled = true; diff --git a/libraries/db/object_database.cpp b/libraries/db/object_database.cpp index 8e90780..6d9e358 100644 --- a/libraries/db/object_database.cpp +++ b/libraries/db/object_database.cpp @@ -40,6 +40,7 @@ object_database::~object_database(){} void object_database::close() { + // nothing to do } const object* object_database::find_object( object_id_type id )const @@ -70,15 +71,20 @@ index& object_database::get_mutable_index(uint8_t space_id, uint8_t type_id) void object_database::flush() { - //ilog("Save object_database in ${d}", ("d", _data_dir)); + fc::create_directories( _data_dir / "object_database.tmp" / "lock" ); for( uint32_t space = 0; space < _index.size(); ++space ) { - fc::create_directories( _data_dir / "object_database" / fc::to_string(space) ); + fc::create_directories( _data_dir / "object_database.tmp" / fc::to_string(space) ); const auto types = _index[space].size(); for( uint32_t type = 0; type < types; ++type ) if( _index[space][type] ) - _index[space][type]->save( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) ); + _index[space][type]->save( _data_dir / "object_database.tmp" / fc::to_string(space)/fc::to_string(type) ); } + fc::remove_all( _data_dir / "object_database.tmp" / "lock" ); + if( fc::exists( _data_dir / "object_database" ) ) + fc::rename( _data_dir / "object_database", _data_dir / "object_database.old" ); + fc::rename( _data_dir / "object_database.tmp", _data_dir / "object_database" ); + fc::remove_all( _data_dir / "object_database.old" ); } void object_database::wipe(const fc::path& data_dir) @@ -91,14 +97,17 @@ void object_database::wipe(const fc::path& data_dir) void object_database::open(const fc::path& data_dir) { try { - //ilog("Opening object database from ${d} ...", ("d", data_dir)); _data_dir = data_dir; + if( fc::exists( _data_dir / "object_database" / "lock" ) ) + { + wlog("Ignoring locked object_database"); + return; + } + ilog("Opening object database from ${d} ...", ("d", data_dir)); for( uint32_t space = 0; space < _index.size(); ++space ) for( uint32_t type = 0; type < _index[space].size(); ++type ) if( _index[space][type] ) _index[space][type]->open( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) ); - //ilog( "Done opening object database." ); - } FC_CAPTURE_AND_RETHROW( (data_dir) ) } diff --git a/libraries/db/undo_database.cpp b/libraries/db/undo_database.cpp index 608209d..66e662a 100644 --- a/libraries/db/undo_database.cpp +++ b/libraries/db/undo_database.cpp @@ -92,16 +92,12 @@ void undo_database::on_remove( const object& obj ) state.removed[obj.id] = obj.clone(); } -void undo_database::undo() +void undo_database::rollback_state() { try { - FC_ASSERT( !_disabled ); - FC_ASSERT( _active_sessions > 0 ); - disable(); - auto& state = _stack.back(); for( auto& item : state.old_values ) { - _db.modify( _db.get_object( item.second->id ), [&]( object& obj ){ obj.move_from( *item.second ); } ); + _db.modify( _db.get_object( item.second->id ), [&item]( object& obj ){ obj.move_from( *item.second ); } ); } for( auto ritr = state.new_ids.begin(); ritr != state.new_ids.end(); ++ritr ) @@ -118,8 +114,14 @@ void undo_database::undo() _db.insert( std::move(*item.second) ); _stack.pop_back(); - if( _stack.empty() ) - _stack.emplace_back(); +} FC_CAPTURE_AND_RETHROW() } + +void undo_database::undo() +{ try { + FC_ASSERT( !_disabled ); + FC_ASSERT( _active_sessions > 0 ); + disable(); + rollback_state(); enable(); --_active_sessions; } FC_CAPTURE_AND_RETHROW() } @@ -127,6 +129,12 @@ void undo_database::undo() void undo_database::merge() { FC_ASSERT( _active_sessions > 0 ); + if( _active_sessions == 1 && _stack.size() == 1 ) + { + _stack.pop_back(); + --_active_sessions; + return; + } FC_ASSERT( _stack.size() >=2 ); auto& state = _stack.back(); auto& prev_state = _stack[_stack.size()-2]; @@ -251,27 +259,7 @@ void undo_database::pop_commit() disable(); try { - auto& state = _stack.back(); - - for( auto& item : state.old_values ) - { - _db.modify( _db.get_object( item.second->id ), [&]( object& obj ){ obj.move_from( *item.second ); } ); - } - - for( auto ritr = state.new_ids.begin(); ritr != state.new_ids.end(); ++ritr ) - { - _db.remove( _db.get_object(*ritr) ); - } - - for( auto& item : state.old_index_next_ids ) - { - _db.get_mutable_index( item.first.space(), item.first.type() ).set_next_id( item.second ); - } - - for( auto& item : state.removed ) - _db.insert( std::move(*item.second) ); - - _stack.pop_back(); + rollback_state(); } catch ( const fc::exception& e ) { diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 9b9254a..368d924 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -109,7 +109,7 @@ live_database_fixture::live_database_fixture() _chain_dir = fc::current_path() / "test_blockchain"; FC_ASSERT( fc::exists( _chain_dir ), "Requires blockchain to test on in ./test_blockchain" ); - db.open( _chain_dir ); + db.open( _chain_dir, genesis_state_type(), "TEST" ); graphene::time::now(); auto ahplugin = app.register_plugin< muse::account_history::account_history_plugin >(); @@ -168,7 +168,7 @@ void database_fixture::open_database() if( !data_dir ) { data_dir = fc::temp_directory( graphene::utilities::temp_directory_path() ); const genesis_state_type genesis = prepare_genesis(); - db.open( data_dir->path(), genesis ); + db.open( data_dir->path(), genesis, "test" ); } } diff --git a/tests/tests/block_tests.cpp b/tests/tests/block_tests.cpp index c4a6396..b0a91de 100644 --- a/tests/tests/block_tests.cpp +++ b/tests/tests/block_tests.cpp @@ -136,9 +136,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) // TODO: Don't generate this here signed_block cutoff_block; + uint32_t last_block; { database db; - db.open(data_dir.path(), genesis ); + db.open(data_dir.path(), genesis, "TEST" ); init_witness_keys( db ); b = db.generate_block(db.get_slot_time(1), db.get_scheduled_witness(1), init_account_priv_key(), database::skip_nothing); @@ -156,6 +157,7 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) if( cutoff_height >= 200 ) { cutoff_block = *(db.fetch_block_by_number( cutoff_height )); + last_block = db.head_block_num(); break; } } @@ -163,9 +165,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks ) } { database db; - db.open(data_dir.path(), genesis ); - init_witness_keys( db ); - BOOST_CHECK_EQUAL( db.head_block_num(), cutoff_block.block_num() ); + db.open(data_dir.path(), genesis, "TEST" ); + BOOST_CHECK_EQUAL( db.head_block_num(), last_block ); + while( db.head_block_num() > cutoff_block.block_num() ) + db.pop_block(); b = cutoff_block; for( uint32_t i = 0; i < 200; ++i ) { @@ -192,7 +195,7 @@ BOOST_AUTO_TEST_CASE( undo_block ) fc::temp_directory data_dir( graphene::utilities::temp_directory_path() ); { database db; - db.open(data_dir.path(), genesis ); + db.open(data_dir.path(), genesis, "TEST" ); init_witness_keys( db ); fc::time_point_sec now( MUSE_TESTING_GENESIS_TIMESTAMP ); std::vector< time_point_sec > time_stack; @@ -246,10 +249,10 @@ BOOST_AUTO_TEST_CASE( fork_blocks ) //TODO This test needs 6-7 ish witnesses prior to fork database db1; - db1.open( data_dir1.path(), genesis ); + db1.open( data_dir1.path(), genesis, "TEST" ); init_witness_keys( db1 ); database db2; - db2.open( data_dir2.path(), genesis ); + db2.open( data_dir2.path(), genesis, "TEST" ); init_witness_keys( db2 ); for( uint32_t i = 0; i < 10; ++i ) @@ -314,9 +317,9 @@ BOOST_AUTO_TEST_CASE( switch_forks_undo_create ) database db1, db2; - db1.open( dir1.path(), genesis ); + db1.open( dir1.path(), genesis, "TEST" ); init_witness_keys( db1 ); - db2.open( dir2.path(), genesis ); + db2.open( dir2.path(), genesis, "TEST" ); init_witness_keys( db2 ); const graphene::db::index& account_idx = db1.get_index(implementation_ids, impl_account_object_type); @@ -375,9 +378,9 @@ BOOST_AUTO_TEST_CASE( duplicate_transactions ) database db1, db2; - db1.open(dir1.path(), genesis ); + db1.open(dir1.path(), genesis, "TEST" ); init_witness_keys( db1 ); - db2.open(dir2.path(), genesis ); + db2.open(dir2.path(), genesis, "TEST" ); init_witness_keys( db2 ); BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() ); @@ -428,7 +431,7 @@ BOOST_AUTO_TEST_CASE( tapos ) genesis.init_supply = INITIAL_TEST_SUPPLY; database db1; - db1.open(dir1.path(), genesis ); + db1.open(dir1.path(), genesis, "TEST" ); init_witness_keys( db1 ); auto b = db1.generate_block( db1.get_slot_time(1), db1.get_scheduled_witness( 1 ), init_account_priv_key(), database::skip_nothing); diff --git a/tests/tests/database_tests.cpp b/tests/tests/database_tests.cpp new file mode 100644 index 0000000..31a63fb --- /dev/null +++ b/tests/tests/database_tests.cpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include + +#include + +#include "../common/database_fixture.hpp" + +using namespace muse::chain; + +BOOST_FIXTURE_TEST_SUITE( database_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE( undo_test ) +{ + try { + database db; + auto ses = db._undo_db.start_undo_session(); + const auto& sp_obj1 = db.create( [&]( streaming_platform_object& obj ){ + // no owner right now + }); + auto id1 = sp_obj1.id; + // abandon changes + ses.undo(); + // start a new session + ses = db._undo_db.start_undo_session(); + + const auto& sp_obj2 = db.create( [&]( streaming_platform_object& obj ){ + // no owner right now + }); + auto id2 = sp_obj2.id; + BOOST_CHECK( id1 == id2 ); + } catch ( const fc::exception& e ) + { + edump( (e.to_detail_string()) ); + throw; + } +} + +BOOST_AUTO_TEST_CASE( merge_test ) +{ + try { + database db; + auto ses = db._undo_db.start_undo_session(); + db.create( [&]( streaming_platform_object& obj ){ + obj.owner = "42"; + }); + ses.merge(); + + auto sp = db.get_streaming_platform( "42" ); + BOOST_CHECK_EQUAL( "42", sp.owner ); + } catch ( const fc::exception& e ) + { + edump( (e.to_detail_string()) ); + throw; + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/muse_tests.cpp b/tests/tests/muse_tests.cpp index 9f4863d..dea9371 100644 --- a/tests/tests/muse_tests.cpp +++ b/tests/tests/muse_tests.cpp @@ -1540,7 +1540,7 @@ BOOST_AUTO_TEST_CASE( balance_object_test ) auto _sign = [&]( signed_transaction& tx, const private_key_type& key ) { tx.sign( key, db.get_chain_id() ); }; - db.open( td.path(), genesis_state ); + db.open( td.path(), genesis_state, "TEST" ); const balance_object& balance = balance_id_type()(db); BOOST_CHECK_EQUAL(1, balance.balance.amount.value); BOOST_CHECK_EQUAL(10, balance_id_type(1)(db).balance.amount.value);