Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions docs/rpc/api/core-node/get-burn-ops-peg-handoff.example.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"peg_handoff": [
{
"amount": 1337,
"block_height": 218,
"burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4",
"memo": "",
"next_peg_wallet": "tb1pqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqkgkkf5",
"reward_cycle": 12,
"recipient": "S0000000000000000000002AA028H.awesome_contract",
"txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771",
"vtxindex": 2
}
],
}
5 changes: 4 additions & 1 deletion docs/rpc/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ paths:
/v2/burn_ops/{burn_height}/{op_type}:
get:
summary: Get burn operations
description: Get all burn operations of type `op_type` successfully read at `burn_height`. Valid `op_type`s are `peg_in`, `peg_out_request` and `peg_out_fulfill`.
description: Get all burn operations of type `op_type` successfully read at `burn_height`. Valid `op_type`s are `peg_handoff`, `peg_in`, `peg_out_request` and `peg_out_fulfill`.
tags:
- Info
operationId: get_burn_ops
Expand All @@ -52,6 +52,9 @@ paths:
content:
application/json:
examples:
peg_handoff:
value:
$ref: ./api/core-node/get-burn-ops-peg-handoff.example.json
peg_in:
value:
$ref: ./api/core-node/get-burn-ops-peg-in.example.json
Expand Down
21 changes: 19 additions & 2 deletions src/burnchains/burnchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn, Sort
use crate::chainstate::burn::distribution::BurnSamplePoint;
use crate::chainstate::burn::operations::{
leader_block_commit::MissedBlockCommit, BlockstackOperationType, DelegateStxOp,
LeaderBlockCommitOp, LeaderKeyRegisterOp, PegInOp, PegOutFulfillOp, PegOutRequestOp, PreStxOp,
StackStxOp, TransferStxOp, UserBurnSupportOp,
LeaderBlockCommitOp, LeaderKeyRegisterOp, PegHandoffOp, PegInOp, PegOutFulfillOp,
PegOutRequestOp, PreStxOp, StackStxOp, TransferStxOp, UserBurnSupportOp,
};
use crate::chainstate::burn::{BlockSnapshot, Opcodes};
use crate::chainstate::coordinator::comm::CoordinatorChannels;
Expand Down Expand Up @@ -149,6 +149,9 @@ impl BurnchainStateTransition {
BlockstackOperationType::LeaderKeyRegister(_) => {
accepted_ops.push(block_ops[i].clone());
}
BlockstackOperationType::PegHandoff(_) => {
accepted_ops.push(block_ops[i].clone());
}
BlockstackOperationType::PegIn(_) => {
accepted_ops.push(block_ops[i].clone());
}
Expand Down Expand Up @@ -904,6 +907,20 @@ impl Burnchain {
}
}

x if x == Opcodes::PegHandoff as u8 => {
match PegHandoffOp::from_tx(block_header, burn_tx) {
Ok(op) => Some(BlockstackOperationType::PegHandoff(op)),
Err(e) => {
warn!("Failed to parse peg handoff tx";
"txid" => %burn_tx.txid(),
"data" => %to_hex(&burn_tx.data()),
"error" => ?e,
);
None
}
}
}

x if x == Opcodes::PegIn as u8 => match PegInOp::from_tx(block_header, burn_tx) {
Ok(op) => Some(BlockstackOperationType::PegIn(op)),
Err(e) => {
Expand Down
7 changes: 7 additions & 0 deletions src/chainstate/burn/db/processing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,13 @@ impl<'a> SortitionHandleTx<'a> {
);
BurnchainError::OpError(e)
}),
BlockstackOperationType::PegHandoff(ref op) => op.check().map_err(|e| {
warn!(
"REJECTED({}) peg handoff op {} at {},{}: {:?}",
op.block_height, &op.txid, op.block_height, op.vtxindex, &e
);
BurnchainError::OpError(e)
}),
BlockstackOperationType::PegIn(ref op) => op.check().map_err(|e| {
warn!(
"REJECTED({}) peg in op {} at {},{}: {:?}",
Expand Down
161 changes: 158 additions & 3 deletions src/chainstate/burn/db/sortdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ use crate::burnchains::{
use crate::chainstate::burn::operations::DelegateStxOp;
use crate::chainstate::burn::operations::{
leader_block_commit::{MissedBlockCommit, RewardSetInfo, OUTPUTS_PER_COMMIT},
BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, PegInOp, PegOutFulfillOp,
PegOutRequestOp, PreStxOp, StackStxOp, TransferStxOp, UserBurnSupportOp,
BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, PegHandoffOp, PegInOp,
PegOutFulfillOp, PegOutRequestOp, PreStxOp, StackStxOp, TransferStxOp, UserBurnSupportOp,
};
use crate::chainstate::burn::ConsensusHashExtensions;
use crate::chainstate::burn::Opcodes;
Expand Down Expand Up @@ -406,6 +406,40 @@ impl FromRow<DelegateStxOp> for DelegateStxOp {
}
}

impl FromRow<PegHandoffOp> for PegHandoffOp {
fn from_row<'a>(row: &'a Row) -> Result<Self, db_error> {
let txid = Txid::from_column(row, "txid")?;
let vtxindex: u32 = row.get("vtxindex")?;
let block_height = u64::from_column(row, "block_height")?;
let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?;

let next_peg_wallet = PoxAddress::from_column(row, "next_peg_wallet")?;
let amount = row
.get::<_, String>("amount")?
.parse()
.map_err(|_| db_error::ParseError)?;
let reward_cycle = row
.get::<_, String>("reward_cycle")?
.parse()
.map_err(|_| db_error::ParseError)?;

let memo_hex: String = row.get_unwrap("memo");
let memo_bytes = hex_bytes(&memo_hex).map_err(|_e| db_error::ParseError)?;
let memo = memo_bytes.to_vec();

Ok(Self {
txid,
vtxindex,
block_height,
burn_header_hash,
next_peg_wallet,
amount,
reward_cycle,
memo,
})
}
}

impl FromRow<PegInOp> for PegInOp {
fn from_row<'a>(row: &'a Row) -> Result<Self, db_error> {
let txid = Txid::from_column(row, "txid")?;
Expand Down Expand Up @@ -808,9 +842,23 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[
];

// update this to add new indexes
const LAST_SORTITION_DB_INDEX: &'static str = "index_peg_out_fulfill_burn_header_hash ";
const LAST_SORTITION_DB_INDEX: &'static str = "index_peg_handoff_burn_header_hash ";

const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[
r#"
CREATE TABLE peg_handoff (
txid TEXT NOT NULL,
vtxindex INTEGER NOT NULL,
block_height INTEGER NOT NULL,
burn_header_hash TEXT NOT NULL,

next_peg_wallet TEXT NOT NULL,
amount TEXT NOT NULL,
reward_cycle TEXT NOT NULL,
memo TEXT,

PRIMARY KEY(txid, burn_header_hash)
);"#,
r#"
CREATE TABLE peg_in (
txid TEXT NOT NULL,
Expand Down Expand Up @@ -884,6 +932,7 @@ const SORTITION_DB_INDEXES: &'static [&'static str] = &[
"CREATE INDEX IF NOT EXISTS index_peg_in_burn_header_hash ON peg_in(burn_header_hash);",
"CREATE INDEX IF NOT EXISTS index_peg_out_request_burn_header_hash ON peg_out_requests(burn_header_hash);",
"CREATE INDEX IF NOT EXISTS index_peg_out_fulfill_burn_header_hash ON peg_out_fulfillments(burn_header_hash);",
"CREATE INDEX IF NOT EXISTS index_peg_handoff_burn_header_hash ON peg_handoff(burn_header_hash);",
];

pub struct SortitionDB {
Expand Down Expand Up @@ -4054,6 +4103,20 @@ impl SortitionDB {
)
}

/// Get the list of Peg-Handoff operations processed in a given burnchain block.
/// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic
/// to reject them.
pub fn get_peg_handoff_ops(
conn: &Connection,
burn_header_hash: &BurnchainHeaderHash,
) -> Result<Vec<PegHandoffOp>, db_error> {
query_rows(
conn,
"SELECT * FROM peg_handoff WHERE burn_header_hash = ?",
&[burn_header_hash],
)
}

/// Get the list of Peg-In operations processed in a given burnchain block.
/// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic
/// to reject them.
Expand Down Expand Up @@ -4956,6 +5019,13 @@ impl<'a> SortitionHandleTx<'a> {
);
self.insert_delegate_stx(op)
}
BlockstackOperationType::PegHandoff(ref op) => {
info!(
"ACCEPTED({}) sBTC peg handoff opt {} at {},{}",
op.block_height, &op.txid, op.block_height, op.vtxindex
);
self.insert_peg_handoff_sbtc(op)
}
BlockstackOperationType::PegIn(ref op) => {
info!(
"ACCEPTED({}) sBTC peg in opt {} at {},{}",
Expand Down Expand Up @@ -5044,6 +5114,24 @@ impl<'a> SortitionHandleTx<'a> {
Ok(())
}

/// Insert a peg-handoff op
fn insert_peg_handoff_sbtc(&mut self, op: &PegHandoffOp) -> Result<(), db_error> {
let args: &[&dyn ToSql] = &[
&op.txid,
&op.vtxindex,
&u64_to_sql(op.block_height)?,
&op.burn_header_hash,
&op.next_peg_wallet.to_string(),
&op.amount.to_string(),
&op.reward_cycle.to_string(),
&to_hex(&op.memo),
];

self.execute("REPLACE INTO peg_handoff (txid, vtxindex, block_height, burn_header_hash, next_peg_wallet, amount, reward_cycle, memo) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?;

Ok(())
}

/// Insert a peg-in op
fn insert_peg_in_sbtc(&mut self, op: &PegInOp) -> Result<(), db_error> {
let args: &[&dyn ToSql] = &[
Expand Down Expand Up @@ -6516,6 +6604,73 @@ pub mod tests {
}
}

#[test]
fn test_insert_peg_handoff() {
let block_height = 123;

let peg_handoff_op = |burn_header_hash, amount| {
let txid = Txid([0; 32]);
let vtxindex = 456;
let next_peg_wallet =
PoxAddress::Addr32(false, address::PoxAddressType32::P2TR, [0; 32]);
let reward_cycle = 42;
let memo = vec![1, 3, 3, 7];

PegHandoffOp {
next_peg_wallet,
amount,
reward_cycle,
memo,

txid,
vtxindex,
block_height,
burn_header_hash,
}
};

let burn_header_hash_1 = BurnchainHeaderHash([0x01; 32]);
let burn_header_hash_2 = BurnchainHeaderHash([0x02; 32]);

let peg_handoff_1 = peg_handoff_op(burn_header_hash_1, 1337);
let peg_handoff_2 = peg_handoff_op(burn_header_hash_2, 42);

let first_burn_hash = BurnchainHeaderHash::from_hex(
"0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();

let epochs = StacksEpoch::unit_test(StacksEpochId::Epoch21, block_height);
let mut db =
SortitionDB::connect_test_with_epochs(block_height, &first_burn_hash, epochs).unwrap();

let snapshot_1 = test_append_snapshot(
&mut db,
burn_header_hash_1,
&vec![BlockstackOperationType::PegHandoff(peg_handoff_1.clone())],
);

let snapshot_2 = test_append_snapshot(
&mut db,
burn_header_hash_2,
&vec![BlockstackOperationType::PegHandoff(peg_handoff_2.clone())],
);

let res_peg_handoffs_1 =
SortitionDB::get_peg_handoff_ops(db.conn(), &snapshot_1.burn_header_hash)
.expect("Failed to get peg-in ops from sortition DB");

assert_eq!(res_peg_handoffs_1.len(), 1);
assert_eq!(res_peg_handoffs_1[0], peg_handoff_1);

let res_peg_handoffs_2 =
SortitionDB::get_peg_handoff_ops(db.conn(), &snapshot_2.burn_header_hash)
.expect("Failed to get peg-in ops from sortition DB");

assert_eq!(res_peg_handoffs_2.len(), 1);
assert_eq!(res_peg_handoffs_2[0], peg_handoff_2);
}

#[test]
fn test_insert_peg_in() {
let block_height = 123;
Expand Down
4 changes: 4 additions & 0 deletions src/chainstate/burn/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ pub enum Opcodes {
PreStx = 'p' as u8,
TransferStx = '$' as u8,
DelegateStx = '#' as u8,
PegHandoff = 'H' as u8,
PegIn = '<' as u8,
PegOutRequest = '>' as u8,
PegOutFulfill = '!' as u8,
Expand Down Expand Up @@ -196,6 +197,7 @@ impl Opcodes {
const HTTP_PRE_STX: &'static str = "pre_stx";
const HTTP_TRANSFER_STX: &'static str = "transfer_stx";
const HTTP_DELEGATE_STX: &'static str = "delegate_stx";
const HTTP_PEG_HANDOFF: &'static str = "peg_handoff";
const HTTP_PEG_IN: &'static str = "peg_in";
const HTTP_PEG_OUT_REQUEST: &'static str = "peg_out_request";
const HTTP_PEG_OUT_FULFILL: &'static str = "peg_out_fulfill";
Expand All @@ -209,6 +211,7 @@ impl Opcodes {
Opcodes::PreStx => Self::HTTP_PRE_STX,
Opcodes::TransferStx => Self::HTTP_TRANSFER_STX,
Opcodes::DelegateStx => Self::HTTP_DELEGATE_STX,
Opcodes::PegHandoff => Self::HTTP_PEG_HANDOFF,
Opcodes::PegIn => Self::HTTP_PEG_IN,
Opcodes::PegOutRequest => Self::HTTP_PEG_OUT_REQUEST,
Opcodes::PegOutFulfill => Self::HTTP_PEG_OUT_FULFILL,
Expand All @@ -217,6 +220,7 @@ impl Opcodes {

pub fn from_http_str(input: &str) -> Option<Opcodes> {
let opcode = match input {
Self::HTTP_PEG_HANDOFF => Opcodes::PegHandoff,
Self::HTTP_PEG_IN => Opcodes::PegIn,
Self::HTTP_PEG_OUT_REQUEST => Opcodes::PegOutRequest,
Self::HTTP_PEG_OUT_FULFILL => Opcodes::PegOutFulfill,
Expand Down
Loading