Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ blake2b_simd = "0.5.10"
openssl = { version = "0.10", features = ["vendored"] }
log = "0.4.11"
log-panics = "2.0.0"
log4rs = "1.1.1"
log4rs = "1.2.0"
crossbeam = "0.8"
tokio = { version = "1", features = ["full"] }
tower-http = { version = "0.3.0", features = ["cors"] }
Expand Down
4 changes: 1 addition & 3 deletions core/src/actions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,7 @@ pub fn execute_action(action: PoolAction) -> Result<(), ActionExecError> {
if msg.as_str() == "Double spending attempt"
|| msg.contains("it is invalidated earlier or the pool is full") =>
{
log::info!(
"Node rejected tx, probably due to our previous tx is still in the mempool)"
);
log::info!("Node rejected tx, probably, due to this tx is already in the mempool)");
Ok(())
}
Err(e) => Err(e),
Expand Down
3 changes: 2 additions & 1 deletion core/src/cli_commands/bootstrap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,14 @@ pub fn bootstrap(config_file_name: String) -> Result<(), BootstrapError> {
debug!("Change address: {}", change_address_str);

let change_address = AddressEncoder::unchecked_parse_address_from_str(&change_address_str)?;
let erg_value_per_box = config.oracle_contract_parameters.min_storage_rent;
let input = BootstrapInput {
config,
wallet: &node as &dyn WalletDataSource,
tx_signer: &node as &dyn SignTransaction,
submit_tx: &node as &dyn SubmitTransaction,
tx_fee: *BASE_FEE,
erg_value_per_box: *BASE_FEE,
erg_value_per_box,
change_address,
height: node.current_block_height()? as u32,
};
Expand Down
10 changes: 9 additions & 1 deletion core/src/logging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use log4rs::append::rolling_file::policy::compound::trigger::size::SizeTrigger;
use log4rs::append::rolling_file::policy::compound::CompoundPolicy;
use log4rs::append::rolling_file::RollingFileAppender;
use log4rs::config::Appender;
use log4rs::config::Logger;
use log4rs::config::Root;
use log4rs::Config;

Expand Down Expand Up @@ -61,11 +62,18 @@ pub fn setup_log(override_log_level: Option<LevelFilter>) {
),
),
)
.logger(
Logger::builder()
.appender("logfile")
.appender("stdout")
.additive(false)
.build("oracle_core", log_level),
)
.build(
Root::builder()
.appender("stdout")
.appender("logfile")
.build(log_level),
.build(LevelFilter::Info),
)
.unwrap();

Expand Down
7 changes: 3 additions & 4 deletions core/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,8 +246,7 @@ fn handle_oracle_command(command: Command) {
}
loop {
if let Err(e) = main_loop_iteration(&op, read_only) {
error!("Fatal error: {:?}", e);
std::process::exit(exitcode::SOFTWARE);
error!("error: {:?}", e);
}
// Delay loop restart
thread::sleep(Duration::new(30, 0));
Expand Down Expand Up @@ -363,7 +362,7 @@ fn main_loop_iteration(op: &OraclePool, read_only: bool) -> std::result::Result<
network_change_address.address(),
);
if let Some(action) =
continue_if_non_fatal(network_change_address.network(), build_action_res)?
log_and_continue_if_non_fatal(network_change_address.network(), build_action_res)?
{
if !read_only {
execute_action(action)?;
Expand All @@ -373,7 +372,7 @@ fn main_loop_iteration(op: &OraclePool, read_only: bool) -> std::result::Result<
Ok(())
}

fn continue_if_non_fatal(
fn log_and_continue_if_non_fatal(
network_prefix: NetworkPrefix,
res: Result<PoolAction, PoolCommandError>,
) -> Result<Option<PoolAction>, PoolCommandError> {
Expand Down
4 changes: 2 additions & 2 deletions core/src/node_interface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,12 +129,12 @@ pub fn submit_transaction(signed_tx: &Transaction) -> Result<TxId> {
/// Sign an `UnsignedTransaction` and then submit it to the mempool.
pub fn sign_and_submit_transaction(unsigned_tx: &UnsignedTransaction) -> Result<TxId> {
let node = new_node_interface();
log::debug!(
log::trace!(
"Signing transaction: {}",
serde_json::to_string_pretty(&unsigned_tx).unwrap()
);
let signed_tx = node.sign_transaction(unsigned_tx, None, None)?;
log::debug!(
log::trace!(
"Submitting signed transaction: {}",
serde_json::to_string_pretty(&signed_tx).unwrap()
);
Expand Down
17 changes: 12 additions & 5 deletions core/src/oracle_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,6 @@ pub struct UpdateBoxScan<'a> {
#[derive(Debug, Clone)]
pub struct LiveEpochState {
pub pool_box_epoch_id: u32,
// TODO: newtypes fo epoch id, height, datapoint
pub local_datapoint_box_state: Option<LocalDatapointState>,
pub latest_pool_datapoint: u64,
pub latest_pool_box_height: u32,
Expand Down Expand Up @@ -423,13 +422,21 @@ impl StageDataSource for Stage {

impl<'a> DatapointBoxesSource for DatapointStage<'a> {
fn get_oracle_datapoint_boxes(&self) -> Result<Vec<PostedOracleBox>> {
let res = self
let oracle_boxes: Vec<OracleBoxWrapper> = self
.stage
.get_boxes()?
.into_iter()
.map(|b| PostedOracleBox::new(b, self.oracle_box_wrapper_inputs))
.collect::<std::result::Result<Vec<PostedOracleBox>, OracleBoxError>>()?;
Ok(res)
.map(|b| OracleBoxWrapper::new(b, self.oracle_box_wrapper_inputs))
.collect::<std::result::Result<Vec<OracleBoxWrapper>, _>>()?;

let posted_boxes = oracle_boxes
.into_iter()
.filter_map(|b| match b {
OracleBoxWrapper::Posted(p) => Some(p),
OracleBoxWrapper::Collected(_) => None,
})
.collect();
Ok(posted_boxes)
}
}

Expand Down
35 changes: 2 additions & 33 deletions core/src/pool_commands/publish_datapoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ pub fn build_subsequent_publish_datapoint_action(
change_address: Address,
datapoint_source: &dyn DataPointSource,
new_epoch_counter: u32,
pool_datapoint: i64,
_pool_datapoint: i64,
) -> Result<PublishDataPointAction, PublishDatapointActionError> {
let new_datapoint = datapoint_source.get_datapoint_retry(3)?;
let in_oracle_box = local_datapoint_box;
Expand All @@ -66,7 +66,7 @@ pub fn build_subsequent_publish_datapoint_action(
let output_candidate = make_oracle_box_candidate(
in_oracle_box.contract(),
in_oracle_box.public_key(),
compute_new_datapoint(new_datapoint, pool_datapoint),
new_datapoint,
new_epoch_counter,
in_oracle_box.oracle_token(),
in_oracle_box.reward_token(),
Expand Down Expand Up @@ -162,37 +162,6 @@ pub fn build_publish_first_datapoint_action(
Ok(PublishDataPointAction { tx })
}

fn compute_new_datapoint(datapoint: i64, old_datapoint: i64) -> i64 {
// Difference calc
let difference = datapoint as f64 / old_datapoint as f64;

// If the new datapoint is twice as high, post the new datapoint
#[allow(clippy::if_same_then_else)]
if difference > 2.00 {
datapoint
}
// If the new datapoint is half, post the new datapoint
else if difference < 0.50 {
datapoint
}
// TODO: remove 0.5% cap, kushti asked on TG:
// >Lets run 2.0 with no delay in data update in the default data provider
// >No, data provider currently cap oracle price change at 0.5 percent per epoch
//
// If the new datapoint is 0.49% to 50% lower, post 0.49% lower than old
else if difference < 0.9951 {
(old_datapoint as f64 * 0.9951) as i64
}
// If the new datapoint is 0.49% to 100% higher, post 0.49% higher than old
else if difference > 1.0049 {
(old_datapoint as f64 * 1.0049) as i64
}
// Else if the difference is within 0.49% either way, post the new datapoint
else {
datapoint
}
}

#[cfg(test)]
mod tests {
use std::convert::TryInto;
Expand Down
5 changes: 4 additions & 1 deletion core/src/pool_commands/refresh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ pub enum RefreshActionError {
TxBuilderError(TxBuilderError),
#[error("box builder error: {0}")]
ErgoBoxCandidateBuilderError(ErgoBoxCandidateBuilderError),
#[error("failed to found my own oracle box in the filtered posted oracle boxes")]
MyOracleBoxNoFound,
}

#[allow(clippy::too_many_arguments)]
Expand Down Expand Up @@ -120,7 +122,8 @@ pub fn build_refresh_action(
let my_input_oracle_box_index: i32 = valid_in_oracle_boxes
.iter()
.position(|b| b.public_key().h.as_ref() == my_oracle_pk)
.unwrap() as i32; // TODO: handle error
.ok_or(RefreshActionError::MyOracleBoxNoFound)?
as i32;

let mut valid_in_oracle_raw_boxes = valid_in_oracle_boxes
.clone()
Expand Down
1 change: 0 additions & 1 deletion core/src/scans.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,6 @@ pub fn register_ballot_box_scan(
Scan::register("Ballot Box Scan", scan_json)
}

// TODO: We don't currently scan for ErgoTree, since config does not store min_votes
pub fn register_update_box_scan(update_nft_token_id: &TokenId) -> Result<Scan> {
let scan_json = json! ( {
"predicate": "and",
Expand Down
10 changes: 8 additions & 2 deletions core/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ pub struct EpochState {
epoch_start_height: u64,
}

// TODO: remove NeedsBootstrap and use LiveEpochState?
/// Enum for the state that the oracle pool is currently in
#[derive(Debug, Clone)]
pub enum PoolState {
Expand All @@ -33,7 +32,14 @@ pub fn process(
if let Some(local_datapoint_box_state) = live_epoch.local_datapoint_box_state {
match local_datapoint_box_state {
Collected { height: _ } => {
Some(PoolCommand::PublishSubsequentDataPoint { republish: false })
// publish datapoint after some blocks have passed after the pool box published
// to avoid some oracle box become stale on the next refresh
// (datapoint posted on the first block of the epoch go out of the epoch window too fast)
if current_height > live_epoch.latest_pool_box_height + epoch_length / 2 {
Some(PoolCommand::PublishSubsequentDataPoint { republish: false })
} else {
None
}
}
Posted { epoch_id, height } => {
if height < min_start_height || epoch_id != live_epoch.pool_box_epoch_id {
Expand Down