Skip to content

Commit

Permalink
Merge remote-tracking branch 'url/dev_master' into testnet
Browse files Browse the repository at this point in the history
  • Loading branch information
RootkitKiller committed Jun 13, 2019
2 parents e08218a + 832129f commit a75421b
Show file tree
Hide file tree
Showing 9 changed files with 199 additions and 39 deletions.
65 changes: 65 additions & 0 deletions contracts/examples/test/testmapset/testmapset.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#include <graphenelib/contract.hpp>
#include <graphenelib/dispatcher.hpp>
#include <graphenelib/print.hpp>
#include <graphenelib/types.h>
#include <graphenelib/multi_index.hpp>
#include <map>

using namespace graphene;

class testmapset : public contract
{
public:
testmapset(uint64_t id)
: contract(id), mapsettabs(_self, _self)
{
}

/// @abi action
void mapaction(std::map<std::string,uint64_t> mappar)
{
auto itor = mappar.begin();
auto str = itor->first;
auto num = itor->second;
for (int i = 0; i < 1; ++i) {
print("str:",str,"\n");
print("num:",num,"\n");

}
uint64_t pk = mapsettabs.available_primary_key();
mapsettabs.emplace(0, [&](auto &o) {
o.id = pk;
o.mymap = mappar;
});
}

// @abi action
void setaction(std::set<std::string> setpar)
{
auto itor = setpar.begin();
print("set: ",*itor,"\n");

uint64_t pk = mapsettabs.available_primary_key();
mapsettabs.emplace(0, [&](auto &o) {
o.id = pk;
o.myset = setpar;
});
}

//@abi table mapsettab i64
struct mapsettab {
uint64_t id;
std::map<std::string,uint64_t> mymap;
std::set<std::string> myset;

uint64_t primary_key() const { return id; }

GRAPHENE_SERIALIZE(mapsettab, (id)(mymap)(myset))
};

typedef graphene::multi_index<N(mapsettab), mapsettab> mapset_index;
mapset_index mapsettabs;

};

GRAPHENE_ABI(testmapset, (mapaction)(setaction))
77 changes: 76 additions & 1 deletion libraries/abi_generator/abi_generator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -446,13 +446,23 @@ bool abi_generator::is_vector(const clang::QualType& vqt) {
qt = qt->getAs<clang::ElaboratedType>()->getNamedType();

return isa<clang::TemplateSpecializationType>(qt.getTypePtr()) \
&& boost::starts_with( get_type_name(qt, false), "vector");
&& (boost::starts_with( get_type_name(qt, false), "vector")||boost::starts_with( get_type_name(qt, false), "set"));
}

bool abi_generator::is_vector(const string& type_name) {
return boost::ends_with(type_name, "[]");
}

bool abi_generator::is_map(const clang::QualType& mqt) {
QualType qt(mqt);

if ( is_elaborated(qt) )
qt = qt->getAs<clang::ElaboratedType>()->getNamedType();

return isa<clang::TemplateSpecializationType>(qt.getTypePtr()) \
&& boost::starts_with( get_type_name(qt, false), "map");
}

bool abi_generator::is_struct_specialization(const clang::QualType& qt) {
return is_struct(qt) && isa<clang::TemplateSpecializationType>(qt.getTypePtr());
}
Expand All @@ -476,6 +486,17 @@ string abi_generator::get_vector_element_type(const string& type_name) {
return type_name;
}

std::vector<clang::QualType> abi_generator::get_map_element_type(const clang::QualType& qt)
{
const auto* tst = clang::dyn_cast<const clang::TemplateSpecializationType>(qt.getTypePtr());
ABI_ASSERT(tst != nullptr);
const clang::TemplateArgument& arg0 = tst->getArg(0);
const clang::TemplateArgument& arg1 = tst->getArg(1);
std::vector<clang::QualType> varg;
varg.emplace_back(arg0.getAsType());
varg.emplace_back(arg1.getAsType());
return varg;
}
string abi_generator::get_type_name(const clang::QualType& qt, bool with_namespace=false) {
auto name = clang::TypeName::getFullyQualifiedName(qt, *ast_context);
if(!with_namespace)
Expand Down Expand Up @@ -557,6 +578,7 @@ string abi_generator::add_vector(const clang::QualType& vqt, size_t recursion_de

auto vector_element_type = get_vector_element_type(qt);
ABI_ASSERT(!is_vector(vector_element_type), "Only one-dimensional arrays are supported");
ABI_ASSERT(!is_map(vector_element_type), "Only one-dimensional maps are supported");

add_type(vector_element_type, recursion_depth);

Expand All @@ -566,6 +588,54 @@ string abi_generator::add_vector(const clang::QualType& vqt, size_t recursion_de
return vector_element_type_str;
}

string abi_generator::add_map(const clang::QualType& mqt, size_t recursion_depth)
{
ABI_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );

clang::QualType qt(get_named_type_if_elaborated(mqt));

auto map_element_type_list = get_map_element_type(qt);
ABI_ASSERT(!is_map(map_element_type_list[0]), "Only one-dimensional maps are supported");
ABI_ASSERT(!is_map(map_element_type_list[1]), "Only one-dimensional maps are supported");

add_type(map_element_type_list[0], recursion_depth);
add_type(map_element_type_list[1], recursion_depth);

std::string map_element_type_str_0;
std::string map_element_type_str_1;
if(is_vector(map_element_type_list[0]))
map_element_type_str_0 = add_vector(map_element_type_list[0],recursion_depth);
else
map_element_type_str_0 = translate_type(get_type_name(map_element_type_list[0]));
if(is_vector(map_element_type_list[1]))
map_element_type_str_1 = add_vector(map_element_type_list[1],recursion_depth);
else
map_element_type_str_1 = translate_type(get_type_name(map_element_type_list[1]));

static uint64_t index = 1;
std::string index_name = std::to_string(index);
std::string map_element_type_str = "map" + index_name + "[]";
index++;

// add struct
struct_def map_def;
map_def.name = map_element_type_str.substr(0, map_element_type_str.length() - 2);
map_def.base = "";

std::string key_field_name = "key";
std::string key_field_type_name = map_element_type_str_0;
field_def key_struct_field{key_field_name, key_field_type_name};

std::string value_field_name = "value";
std::string value_field_type_name = map_element_type_str_1;
field_def value_struct_field{value_field_name, value_field_type_name};

map_def.fields.push_back(key_struct_field);
map_def.fields.push_back(value_struct_field);

output->structs.push_back(map_def);
return map_element_type_str;
}
string abi_generator::add_type(const clang::QualType& tqt, size_t recursion_depth) {

ABI_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
Expand Down Expand Up @@ -593,6 +663,11 @@ string abi_generator::add_type(const clang::QualType& tqt, size_t recursion_dept
return is_type_def ? type_name : vector_type_name;
}

if( is_map(qt) ){
auto map_type_name = add_map(qt, recursion_depth);
return is_type_def ? type_name : map_type_name;
}

if( is_struct(qt) ) {
return add_struct(qt, full_type_name, recursion_depth);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,8 @@ namespace graphene {
bool is_vector(const string& type_name);
string add_vector(const clang::QualType& qt, size_t recursion_depth);

bool is_map(const clang::QualType& qt);
string add_map(const clang::QualType& qt, size_t recursion_depth);
bool is_struct(const clang::QualType& qt);
string add_struct(const clang::QualType& qt, string full_type_name, size_t recursion_depth);

Expand All @@ -200,6 +202,7 @@ namespace graphene {
QualType get_vector_element_type(const clang::QualType& qt);
string get_vector_element_type(const string& type_name);

std::vector<clang::QualType> get_map_element_type(const clang::QualType& qt);
clang::QualType get_named_type_if_elaborated(const clang::QualType& qt);

const clang::RecordDecl::field_range get_struct_fields(const clang::QualType& qt);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ namespace graphene { namespace app {
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
void broadcast_transaction(const signed_transaction& trx);
transaction_id_type broadcast_transaction(const signed_transaction& trx);

/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
Expand Down
3 changes: 2 additions & 1 deletion libraries/app/network_broadcast_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,15 @@ namespace graphene { namespace app {
}
}

void network_broadcast_api::broadcast_transaction(const signed_transaction& trx)
transaction_id_type network_broadcast_api::broadcast_transaction(const signed_transaction& trx)
{
dlog("received trx message ${trx}",("trx",trx));

trx.validate();
_app.chain_database()->push_transaction(trx);// evaluatet trx
if (_app.p2p_node() != nullptr)
_app.p2p_node()->broadcast_transaction(trx); // broadcast trx
return trx.id();
}

fc::variant network_broadcast_api::broadcast_transaction_synchronous(const signed_transaction& trx)
Expand Down
4 changes: 2 additions & 2 deletions libraries/chain/db_update.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,13 +96,13 @@ void database::update_global_dynamic_data( const signed_block& b )

if( !(get_node_properties().skip_flags & skip_undo_history_check) )
{
GRAPHENE_ASSERT( head_number - _dgp.last_irreversible_block_num < GRAPHENE_MAX_UNDO_HISTORY, undo_database_exception,
GRAPHENE_ASSERT( _dgp.head_block_number - _dgp.last_irreversible_block_num < GRAPHENE_MAX_UNDO_HISTORY, undo_database_exception,
"The database does not have enough undo history to support a blockchain with so many missed blocks. "
"Please add a checkpoint if you would like to continue applying blocks beyond this point.",
("last_irreversible_block_num",_dgp.last_irreversible_block_num)("head", _dgp.head_block_number)
("recently_missed",_dgp.recently_missed_count)("max_undo",GRAPHENE_MAX_UNDO_HISTORY) );
}
_undo_db.set_max_size(head_number - _dgp.last_irreversible_block_num + 1);
_undo_db.set_max_size(_dgp.head_block_number - _dgp.last_irreversible_block_num + 1);
_fork_db.set_max_size(head_number - _dgp.last_irreversible_block_num + 1);
}

Expand Down
51 changes: 30 additions & 21 deletions libraries/plugins/elasticsearch/elasticsearch_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class elasticsearch_plugin_impl
std::string _elasticsearch_index_prefix = "gxchain";
bool _elasticsearch_operation_object = true;
uint32_t _elasticsearch_start_es_after_block = 0;
uint64_t _elasticsearch_max_ops_per_account = 0;
CURL *curl; // curl handler
vector <string> bulk_lines; // vector of op lines
vector<std::string> prepare;
Expand Down Expand Up @@ -322,27 +323,31 @@ void elasticsearch_plugin_impl::cleanObjects(const account_transaction_history_i
{
graphene::chain::database& db = database();
// remove everything except current object from ath
const auto &his_idx = db.get_index_type<account_transaction_history_index>();
const auto &by_seq_idx = his_idx.indices().get<by_seq>();
auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0));
if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath_id) {
// if found, remove the entry
const auto remove_op_id = itr->operation_id;
const auto itr_remove = itr;
++itr;
db.remove( *itr_remove );
// modify previous node's next pointer
// this should be always true, but just have a check here
if( itr != by_seq_idx.end() && itr->account == account_id )
{
db.modify( *itr, [&]( account_transaction_history_object& obj ){
obj.next = account_transaction_history_id_type();
});
}
// do the same on oho
const auto &by_opid_idx = his_idx.indices().get<by_opid>();
if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) {
db.remove(remove_op_id(db));
const auto& stats_obj = account_id(db).statistics(db);
if( stats_obj.total_ops - stats_obj.removed_ops > _elasticsearch_max_ops_per_account )
{
const auto &his_idx = db.get_index_type<account_transaction_history_index>();
const auto &by_seq_idx = his_idx.indices().get<by_seq>();
auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0));
if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath_id) {
// if found, remove the entry
const auto remove_op_id = itr->operation_id;
const auto itr_remove = itr;
++itr;
db.remove( *itr_remove );
// modify previous node's next pointer
// this should be always true, but just have a check here
if( itr != by_seq_idx.end() && itr->account == account_id )
{
db.modify( *itr, [&]( account_transaction_history_object& obj ){
obj.next = account_transaction_history_id_type();
});
}
// do the same on oho
const auto &by_opid_idx = his_idx.indices().get<by_opid>();
if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) {
db.remove(remove_op_id(db));
}
}
}
}
Expand Down Expand Up @@ -392,6 +397,7 @@ void elasticsearch_plugin::plugin_set_program_options(
("elasticsearch-index-prefix", boost::program_options::value<std::string>(), "Add a prefix to the index(gxchain)")
("elasticsearch-operation-object", boost::program_options::value<bool>(), "Save operation as object(true)")
("elasticsearch-start-es-after-block", boost::program_options::value<uint32_t>(), "Start doing ES job after block(0)")
("elasticsearch-max-ops-per-account", boost::program_options::value<uint64_t>(), "Maximum number of operations per account will be kept in memory")
;
cfg.add(cli);
}
Expand Down Expand Up @@ -428,6 +434,9 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia
if (options.count("elasticsearch-start-es-after-block")) {
my->_elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as<uint32_t>();
}
if (options.count("elasticsearch-max-ops-per-account")) {
my->_elasticsearch_max_ops_per_account = options["elasticsearch-max-ops-per-account"].as<uint64_t>();
}
}

void elasticsearch_plugin::plugin_startup()
Expand Down
25 changes: 16 additions & 9 deletions libraries/plugins/query_txid/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,22 @@ file(GLOB HEADERS "include/graphene/query_txid/*.hpp")
add_library( graphene_query_txid
query_txid_plugin.cpp
)
find_path(LevelDB_INCLUDE_PATH NAMES leveldb/db.h leveldb/write_batch.h)
find_library(LevelDB_LIBRARY NAMES libleveldb.a)
find_library(Snappy_LIBRARY NAMES libsnappy.a)

target_link_libraries( graphene_query_txid graphene_chain graphene_app leveldb)
target_include_directories( graphene_query_txid
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )
if(LevelDB_INCLUDE_PATH AND LevelDB_LIBRARY AND Snappy_LIBRARY)
target_link_libraries( graphene_query_txid graphene_chain graphene_app ${LevelDB_LIBRARY} ${Snappy_LIBRARY})
target_include_directories( graphene_query_txid
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ${LevelDB_INCLUDE_PATH})
install( TARGETS
graphene_query_txid

install( TARGETS
graphene_query_txid
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)
else(LevelDB_INCLUDE_PATH AND LevelDB_LIBRARY AND Snappy_LIBRARY)
message(FATAL_ERROR "You need leveldb and snappy")
endif()

RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)
8 changes: 4 additions & 4 deletions libraries/plugins/query_txid/query_txid_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,19 @@ class query_txid_plugin_impl
fc::signal<void()> sig_db_write;
fc::signal<void(const uint64_t)> sig_remove;

static DB *leveldb;
static leveldb::DB *leveldb;
void consume_block(); //Consume block
void remove_trx_index(const uint64_t trx_entry_id); //Remove trx_index in db
};
DB *query_txid_plugin_impl::leveldb = nullptr;
leveldb::DB *query_txid_plugin_impl::leveldb = nullptr;

void query_txid_plugin_impl::init()
{
try {
//Create leveldb
Options options;
leveldb::Options options;
options.create_if_missing = true;
Status s = DB::Open(options, db_path, &leveldb);
leveldb::Status s = leveldb::DB::Open(options, db_path, &leveldb);

// Respond to the sig_db_write signale
sig_db_write.connect([&]() { consume_block(); });
Expand Down

0 comments on commit a75421b

Please sign in to comment.