Skip to content

Commit

Permalink
Added publicly available network map
Browse files Browse the repository at this point in the history
  • Loading branch information
Daniel Graczer committed May 17, 2021
1 parent 4972cc2 commit 0489738
Show file tree
Hide file tree
Showing 12 changed files with 1,099 additions and 6 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Expand Up @@ -65,7 +65,7 @@ jobs:
if: runner.os == 'Linux'
run: |
sudo apt-get update
sudo apt-get install libsodium-dev libsqlite3-dev clang
sudo apt-get install libsodium-dev libsqlite3-dev clang libmaxminddb-dev
- name: '[Windows] Install dependencies & setup environment'
if: runner.os == 'Windows'
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Expand Up @@ -19,7 +19,7 @@ COPY devel/dotgdbinit /root/.gdbinit
COPY --from=Builder /root/packages/ /root/packages/
RUN apk --no-cache add --allow-untrusted -X /root/packages/build/ ldc-runtime=1.26.0-r0 \
&& rm -rf /root/packages/
RUN apk --no-cache add llvm-libunwind libgcc libsodium libstdc++ sqlite-libs
RUN apk --no-cache add llvm-libunwind libgcc libsodium libstdc++ sqlite-libs libmaxminddb
COPY --from=Builder /root/agora/talos/build/ /usr/share/agora/talos/
COPY --from=Builder /root/agora/build/agora /usr/local/bin/agora
WORKDIR /agora/
Expand Down
14 changes: 14 additions & 0 deletions doc/config.example.yaml
Expand Up @@ -55,6 +55,20 @@ node:
# after `relay_tx_cache_exp_secs`.
relay_tx_cache_exp_secs : 1200

# true, if this node should collect statistics about other
# nodes in the network, including their geographical location and OS
collect_network_statistics : true

# The number of network crawlers that will be instantiated to collects
# statistics about other nodes in the network
num_of_crawlers : 3

# The number of seconds one crawler should wait after successfully contacted a node
crawling_interval_secs : 2

# The path to a file containing IP address -> geographical location mapping
ipdb_path : "data/ipdb.mmdb"

# Each entry in this array is an interface Agora will listen to, allowing to
# expose the same node on more than one network interface or with different
# API, such as having one interface using HTTP+JSON and the other TCP+binary.
Expand Down
1 change: 1 addition & 0 deletions dub.json
Expand Up @@ -69,6 +69,7 @@
"dflags": [ "-extern-std=c++14", "-preview=in" ],
"lflags-posix": [ "-lstdc++", "-lsqlite3" ],
"lflags-windows": [ "sqlite3.lib", "/nodefaultlib:msvcetd.lib" ],
"libs": [ "maxminddb" ],
"libs-windows": [ "iphlpapi" ],
"buildRequirements": [ "allowWarnings" ],

Expand Down
21 changes: 21 additions & 0 deletions source/agora/api/FullNode.d
Expand Up @@ -21,6 +21,7 @@ import agora.consensus.data.PreImageInfo;
import agora.common.Types;
import agora.common.Set;
import agora.consensus.data.Transaction;
import agora.network.Crawler : CrawlResultHolder;

import vibe.data.serialization;
import vibe.http.common;
Expand All @@ -41,6 +42,9 @@ public struct NodeInfo

/// Partial or full view of the addresses of the node's quorum (based on is_complete)
public Set!string addresses;

/// String representation of the OS the node is running on
public string os;
}

/*******************************************************************************
Expand Down Expand Up @@ -227,6 +231,23 @@ public interface API

public PreImageInfo getPreimage (Hash enroll_key);

/***************************************************************************
Get the information about the network and it's reachable nodes.
The information includes the network address and the geograpical location
of the nodes including the continent/country/city/latitude/longitude.
API:
GET /network_info
Returns: information about the network and about it's nodes
***************************************************************************/

public CrawlResultHolder getNetworkInfo ();

/***************************************************************************
Get validators' pre-image information
Expand Down
23 changes: 23 additions & 0 deletions source/agora/common/Config.d
Expand Up @@ -228,6 +228,21 @@ public struct NodeConfig
/// Transaction put into the relay queue will expire, and will be removed
/// after `relay_tx_cache_exp`.
public Duration relay_tx_cache_exp;

/// true, if this node should collect statistics about other
/// nodes in the network, including their geographical location and OS
public bool collect_network_statistics;

/// The number of network crawlers that will be instantiated to collects
/// statistics about other nodes in the network
public ubyte num_of_crawlers;

/// The number of seconds one crawler should wait after successfully contacted
/// a node
public Duration crawling_interval;

/// The path to a file containing IP address -> geographical location mapping
public string ipdb_path;
}

/// Validator config
Expand Down Expand Up @@ -524,6 +539,10 @@ private NodeConfig parseNodeConfig (Node* node, in CommandLine cmdln)
Duration relay_tx_interval = opt!(ulong, "node", "relay_tx_interval_secs")(cmdln, node, 15).seconds;
const relay_tx_min_fee = Amount(opt!(ulong, "node", "relay_tx_min_fee")(cmdln, node, 0));
Duration relay_tx_cache_exp = opt!(ulong, "node", "relay_tx_cache_exp_secs")(cmdln, node, 1200).seconds;
const collect_network_statistics = opt!(bool, "node", "collect_network_statistics")(cmdln, node, true);
const num_of_crawlers = opt!(ubyte, "node", "num_of_crawlers")(cmdln, node, 1);
Duration crawling_interval = opt!(ulong, "node", "crawling_interval_secs")(cmdln, node, 3).seconds;
const ipdb_path = opt!(string, "node", "ipdb_path")(cmdln, node, "data/ipdb.mmdb");

NodeConfig result = {
min_listeners : min_listeners,
Expand All @@ -545,6 +564,10 @@ private NodeConfig parseNodeConfig (Node* node, in CommandLine cmdln)
relay_tx_interval : relay_tx_interval,
relay_tx_min_fee : relay_tx_min_fee,
relay_tx_cache_exp : relay_tx_cache_exp,
collect_network_statistics : collect_network_statistics,
num_of_crawlers : num_of_crawlers,
crawling_interval : crawling_interval,
ipdb_path : ipdb_path,
};
return result;
}
Expand Down

0 comments on commit 0489738

Please sign in to comment.