Skip to content

Commit

Permalink
Merge pull request #986 from input-output-hk/node_state
Browse files Browse the repository at this point in the history
Add node state to REST node stats
  • Loading branch information
NicolasDP committed Oct 18, 2019
2 parents 1217834 + 17fe56b commit e6610fc
Show file tree
Hide file tree
Showing 5 changed files with 188 additions and 94 deletions.
25 changes: 22 additions & 3 deletions doc/jcli/rest.md
Expand Up @@ -33,9 +33,28 @@ YAML printed on success

```yaml
---
blockRecvCnt: 7 # Blocks received by node
txRecvCnt: 90 # Transactions received by node
uptime: 2101 # Node uptitme in seconds
# Number of blocks received by node
blockRecvCnt: 1102
# The Epoch and slot Number of the block (optional)
lastBlockDate: "20.29"
# Sum of all fee values in all transactions in last block
lastBlockFees: 534
# The block hash, it's unique identifier in the blockchain (optional)
lastBlockHash: b9597b45a402451540e6aabb58f2ee4d65c67953b338e04c52c00aa0886bd1f0
# The block number, in order, since the block0 (optional)
lastBlockHeight: 202901
# Sum of all input values in all transactions in last block
lastBlockSum: 51604
# When last block was created, not set if none was created yet (optional)
lastBlockTime: 2019-08-12T11:20:52.316544007+00:00
# Number of transactions in last block
lastBlockTx: 2
# State of the node
state: Running
# Number of transactions received by node
txRecvCnt: 5440
# Node uptime in seconds
uptime: 20032
```

## Whole UTXO
Expand Down
32 changes: 18 additions & 14 deletions doc/openapi.yaml
Expand Up @@ -422,16 +422,25 @@ paths:
application/json:
schema:
type: object
required: [blockRecvCnt, lastBlockFees, lastBlockSum, lastBlockTx, txRecvCnt, uptime]
required: [blockRecvCnt, lastBlockFees, lastBlockSum, lastBlockTx, state, txRecvCnt, uptime]
properties:
blockRecvCnt:
description: Number of blocks received by node
type: integer
minimum: 0
lastBlockDate:
description: The Epoch and slot Number of the block
type: string
lastBlockFees:
description: Sum of all fee values in all transactions in last block
type: integer
minimum: 0
lastBlockHash:
description: The block hash, it's unique identifier in the blockchain
type: string
lastBlockHeight:
description: The block number, in order, since the block0
type: number
lastBlockSum:
description: Sum of all input values in all transactions in last block
type: integer
Expand All @@ -444,6 +453,10 @@ paths:
description: Number of transactions in last block
type: integer
minimum: 0
state:
description: State of the node
type: string
enum: [StartingRestServer, PreparingStorage, PreparingBlock0, Bootstrapping, StartingWorkers, Running]
txRecvCnt:
description: Number of transactions received by node
type: integer
Expand All @@ -452,26 +465,17 @@ paths:
description: Node uptime in seconds
type: integer
minimum: 0
lastBlockDate:
description: The Epoch and slot Number of the block.
type: string
lastBlockHeight:
description: The block number, in order, since the block0
type: number
minimum: 0
lastBlockHash:
description: The block hash, it's unique identifier in the blockchain.
type: string
example: |
{
"blockRecvCnt": 1102,
"lastBlockDate": "20.29",
"lastBlockFees": 534,
"lastBlockHash": "b9597b45a402451540e6aabb58f2ee4d65c67953b338e04c52c00aa0886bd1f0",
"lastBlockHeight": 202901,
"lastBlockSum": 51604,
"lastBlockTime": "2019-08-12T11:20:52.316544007+00:00",
"lastBlockDate": "20.29",
"lastBlockHeight": 202901,
"lastBlockHash": "b9597b45a402451540e6aabb58f2ee4d65c67953b338e04c52c00aa0886bd1f0",
"lastBlockTx": 2,
"state": "Running",
"txRecvCnt": 5440,
"uptime": 20032
}
Expand Down
41 changes: 29 additions & 12 deletions jormungandr/src/main.rs
Expand Up @@ -55,6 +55,7 @@ extern crate tokio;
use crate::{
blockcfg::{HeaderHash, Leader},
blockchain::Blockchain,
rest::NodeState,
secure::enclave::Enclave,
settings::start::Settings,
utils::{async_msg, task::Services},
Expand Down Expand Up @@ -101,13 +102,17 @@ pub struct BootstrappedNode {
new_epoch_notifier: tokio::sync::mpsc::Receiver<self::leadership::NewEpochToSchedule>,
logger: Logger,
explorer_db: Option<explorer::ExplorerDB>,
rest_server: Option<(rest::Server, rest::Context)>,
rest_context: Option<rest::Context>,
}

const FRAGMENT_TASK_QUEUE_LEN: usize = 1024;
const NETWORK_TASK_QUEUE_LEN: usize = 32;

fn start_services(bootstrapped_node: BootstrappedNode) -> Result<(), start_up::Error> {
if let Some(context) = bootstrapped_node.rest_context.as_ref() {
context.set_node_state(NodeState::StartingWorkers)
}

let mut services = Services::new(bootstrapped_node.logger.clone());

// initialize the network propagation channel
Expand Down Expand Up @@ -263,8 +268,8 @@ fn start_services(bootstrapped_node: BootstrappedNode) -> Result<(), start_up::E
});
}

let rest_server = match bootstrapped_node.rest_server {
Some((server, context)) => {
let rest_server = match bootstrapped_node.rest_context {
Some(rest_context) => {
let logger = bootstrapped_node
.logger
.new(o!(::log::KEY_TASK => "rest"))
Expand All @@ -281,8 +286,9 @@ fn start_services(bootstrapped_node: BootstrappedNode) -> Result<(), start_up::E
enclave,
explorer: explorer.as_ref().map(|(_msg_box, context)| context.clone()),
};
context.set_full(full_context);
Some(server)
rest_context.set_full(full_context);
rest_context.set_node_state(NodeState::Running);
Some(rest_context.server())
}
None => None,
};
Expand Down Expand Up @@ -314,8 +320,13 @@ fn bootstrap(initialized_node: InitializedNode) -> Result<BootstrappedNode, star
block0,
storage,
logger,
rest_server,
rest_context,
} = initialized_node;

if let Some(context) = rest_context.as_ref() {
context.set_node_state(NodeState::Bootstrapping)
}

let bootstrap_logger = logger.new(o!(log::KEY_TASK => "bootstrap"));

let (new_epoch_announcements, new_epoch_notifier) = tokio::sync::mpsc::channel(100);
Expand Down Expand Up @@ -366,7 +377,7 @@ fn bootstrap(initialized_node: InitializedNode) -> Result<BootstrappedNode, star
new_epoch_notifier,
logger,
explorer_db,
rest_server,
rest_context,
})
}

Expand All @@ -375,7 +386,7 @@ pub struct InitializedNode {
pub block0: blockcfg::Block,
pub storage: start_up::NodeStorage,
pub logger: Logger,
pub rest_server: Option<(rest::Server, rest::Context)>,
pub rest_context: Option<rest::Context>,
}

fn initialize_node() -> Result<InitializedNode, start_up::Error> {
Expand All @@ -402,19 +413,25 @@ fn initialize_node() -> Result<InitializedNode, start_up::Error> {
info!(init_logger, "Starting {}", env!("FULL_VERSION"),);
let settings = raw_settings.try_into_settings(&init_logger)?;

let rest_server = match &settings.rest {
let rest_context = match &settings.rest {
Some(rest) => {
let context = rest::Context::new();
let server = rest::start_rest_server(rest, settings.explorer, context.clone())?;
Some((server, context))
rest::start_rest_server(rest, settings.explorer, &context)?;
Some(context)
}
None => None,
};

if let Some(context) = rest_context.as_ref() {
context.set_node_state(NodeState::PreparingStorage)
}
let storage = start_up::prepare_storage(&settings, &init_logger)?;

// TODO: load network module here too (if needed)

if let Some(context) = rest_context.as_ref() {
context.set_node_state(NodeState::PreparingBlock0)
}
let block0 = start_up::prepare_block_0(
&settings,
&storage,
Expand All @@ -426,7 +443,7 @@ fn initialize_node() -> Result<InitializedNode, start_up::Error> {
block0,
storage,
logger,
rest_server,
rest_context,
})
}

Expand Down
40 changes: 33 additions & 7 deletions jormungandr/src/rest/mod.rs
Expand Up @@ -8,7 +8,7 @@ pub mod v0;
pub use self::server::{Error, Server};

use actix_web::dev::Resource;
use actix_web::error::{Error as ActixError, ErrorInternalServerError, ErrorServiceUnavailable};
use actix_web::error::{Error as ActixError, ErrorServiceUnavailable};
use actix_web::middleware::cors::Cors;
use actix_web::App;
use futures::{Future, IntoFuture};
Expand All @@ -29,13 +29,15 @@ use crate::utils::async_msg::MessageBox;
pub struct Context {
full: Arc<RwLock<Option<Arc<FullContext>>>>,
server: Arc<RwLock<Option<Arc<Server>>>>,
node_state: Arc<RwLock<NodeState>>,
}

impl Context {
pub fn new() -> Self {
Context {
full: Default::default(),
server: Default::default(),
node_state: Arc::new(RwLock::new(NodeState::StartingRestServer)),
}
}

Expand All @@ -59,12 +61,26 @@ impl Context {
*self.server.write().expect("Context server poisoned") = Some(Arc::new(server));
}

pub fn server(&self) -> Result<Arc<Server>, ActixError> {
pub fn server(&self) -> Arc<Server> {
self.server
.read()
.expect("Context server poisoned")
.clone()
.ok_or_else(|| ErrorInternalServerError("Server not set in context"))
.expect("Context server not set")
}

pub fn set_node_state(&self, node_state: NodeState) {
*self
.node_state
.write()
.expect("Context node state poisoned") = node_state;
}

pub fn node_state(&self) -> NodeState {
self.node_state
.read()
.expect("Context node state poisoned")
.clone()
}
}

Expand All @@ -82,11 +98,21 @@ pub struct FullContext {
pub explorer: Option<crate::explorer::Explorer>,
}

#[derive(Clone, Debug, Serialize)]
pub enum NodeState {
StartingRestServer,
PreparingStorage,
PreparingBlock0,
Bootstrapping,
StartingWorkers,
Running,
}

pub fn start_rest_server(
config: &Rest,
explorer_enabled: bool,
context: Context,
) -> Result<Server, ConfigError> {
context: &Context,
) -> Result<(), ConfigError> {
let app_context = context.clone();
let cors_cfg = config.cors.clone();
let server = Server::start(config.pkcs12.clone(), config.listen.clone(), move || {
Expand All @@ -108,8 +134,8 @@ pub fn start_rest_server(

apps
})?;
context.set_server(server.clone());
Ok(server)
context.set_server(server);
Ok(())
}

fn build_app<S, P, R>(state: S, prefix: P, resources: R, cors_cfg: &Option<CorsConfig>) -> App<S>
Expand Down

0 comments on commit e6610fc

Please sign in to comment.