From e5ab7e96978bcfd9645f3ad17adae50b8a90b353 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:45:18 -0400 Subject: [PATCH 001/107] refactor: put PeerHost and PeerAddress into stacks-common --- stacks-common/src/types/net.rs | 389 +++++++++++++++++++++++++++++++++ 1 file changed, 389 insertions(+) create mode 100644 stacks-common/src/types/net.rs diff --git a/stacks-common/src/types/net.rs b/stacks-common/src/types/net.rs new file mode 100644 index 0000000000..890db4b3f0 --- /dev/null +++ b/stacks-common/src/types/net.rs @@ -0,0 +1,389 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fmt; +use std::hash::Hash; +use std::hash::Hasher; +use std::str::FromStr; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; +use std::net::SocketAddr; + +use serde::de::Deserialize; +use serde::ser::Serialize; + +use crate::util::hash::to_bin; + +use serde::de::Error as de_Error; + +#[derive(Debug)] +pub enum Error { + DecodeError(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::DecodeError(msg) => write!(f, "{}", &msg), + } + } +} + +impl std::error::Error for Error { + fn cause(&self) -> Option<&dyn std::error::Error> { + match self { + Error::DecodeError(_) => None, + } + } +} + +/// A container for an IPv4 or IPv6 address. +/// Rules: +/// -- If this is an IPv6 address, the octets are in network byte order +/// -- If this is an IPv4 address, the octets must encode an IPv6-to-IPv4-mapped address +pub struct PeerAddress(pub [u8; 16]); +impl_array_newtype!(PeerAddress, u8, 16); +impl_array_hexstring_fmt!(PeerAddress); +impl_byte_array_newtype!(PeerAddress, u8, 16); +impl_byte_array_message_codec!(PeerAddress, 16); + +impl Serialize for PeerAddress { + fn serialize(&self, s: S) -> Result { + let inst = format!("{}", self.to_socketaddr(0).ip()); + s.serialize_str(inst.as_str()) + } +} + +impl<'de> Deserialize<'de> for PeerAddress { + fn deserialize>(d: D) -> Result { + let inst = String::deserialize(d)?; + let ip = inst.parse::().map_err(de_Error::custom)?; + + Ok(PeerAddress::from_ip(&ip)) + } +} + +impl PeerAddress { + pub fn from_slice(bytes: &[u8]) -> Option { + if bytes.len() != 16 { + return None; + } + + let mut bytes16 = [0u8; 16]; + bytes16.copy_from_slice(&bytes[0..16]); + Some(PeerAddress(bytes16)) + } + + /// Is this an IPv4 address? + pub fn is_ipv4(&self) -> bool { + self.ipv4_octets().is_some() + } + + /// Get the octet representation of this peer address as an IPv4 address. + /// The last 4 bytes of the list contain the IPv4 address. + /// This method returns None if the bytes don't encode a valid IPv4-mapped address (i.e. ::ffff:0:0/96) + pub fn ipv4_octets(&self) -> Option<[u8; 4]> { + if self.0[0..12] + != [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, + ] + { + return None; + } + let mut ret = [0u8; 4]; + ret.copy_from_slice(&self.0[12..16]); + Some(ret) + } + + /// Return the bit representation of this peer address as an IPv4 address, in network byte + /// order. Return None if this is not an IPv4 address. + pub fn ipv4_bits(&self) -> Option { + let octets_opt = self.ipv4_octets(); + if octets_opt.is_none() { + return None; + } + + let octets = octets_opt.unwrap(); + Some( + ((octets[0] as u32) << 24) + | ((octets[1] as u32) << 16) + | ((octets[2] as u32) << 8) + | (octets[3] as u32), + ) + } + + /// Convert to SocketAddr + pub fn to_socketaddr(&self, port: u16) -> SocketAddr { + if self.is_ipv4() { + SocketAddr::new( + IpAddr::V4(Ipv4Addr::new( + self.0[12], self.0[13], self.0[14], self.0[15], + )), + port, + ) + } else { + let addr_words: [u16; 8] = [ + ((self.0[0] as u16) << 8) | (self.0[1] as u16), + ((self.0[2] as u16) << 8) | (self.0[3] as u16), + ((self.0[4] as u16) << 8) | (self.0[5] as u16), + ((self.0[6] as u16) << 8) | (self.0[7] as u16), + ((self.0[8] as u16) << 8) | (self.0[9] as u16), + ((self.0[10] as u16) << 8) | (self.0[11] as u16), + ((self.0[12] as u16) << 8) | (self.0[13] as u16), + ((self.0[14] as u16) << 8) | (self.0[15] as u16), + ]; + + SocketAddr::new( + IpAddr::V6(Ipv6Addr::new( + addr_words[0], + addr_words[1], + addr_words[2], + addr_words[3], + addr_words[4], + addr_words[5], + addr_words[6], + addr_words[7], + )), + port, + ) + } + } + + /// Convert from socket address + pub fn from_socketaddr(addr: &SocketAddr) -> PeerAddress { + PeerAddress::from_ip(&addr.ip()) + } + + /// Convert from IP address + pub fn from_ip(addr: &IpAddr) -> PeerAddress { + match addr { + IpAddr::V4(ref addr) => { + let octets = addr.octets(); + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, + octets[0], octets[1], octets[2], octets[3], + ]) + } + IpAddr::V6(ref addr) => { + let words = addr.segments(); + PeerAddress([ + (words[0] >> 8) as u8, + (words[0] & 0xff) as u8, + (words[1] >> 8) as u8, + (words[1] & 0xff) as u8, + (words[2] >> 8) as u8, + (words[2] & 0xff) as u8, + (words[3] >> 8) as u8, + (words[3] & 0xff) as u8, + (words[4] >> 8) as u8, + (words[4] & 0xff) as u8, + (words[5] >> 8) as u8, + (words[5] & 0xff) as u8, + (words[6] >> 8) as u8, + (words[6] & 0xff) as u8, + (words[7] >> 8) as u8, + (words[7] & 0xff) as u8, + ]) + } + } + } + + /// Convert from ipv4 octets + pub fn from_ipv4(o1: u8, o2: u8, o3: u8, o4: u8) -> PeerAddress { + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, o1, o2, o3, o4, + ]) + } + + /// Is this the any-network address? i.e. 0.0.0.0 (v4) or :: (v6)? + pub fn is_anynet(&self) -> bool { + self.0 == [0x00; 16] || self == &PeerAddress::from_ipv4(0, 0, 0, 0) + } + + /// Is this a private IP address? + pub fn is_in_private_range(&self) -> bool { + if self.is_ipv4() { + // 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16 + self.0[12] == 10 + || (self.0[12] == 172 && self.0[13] >= 16 && self.0[13] <= 31) + || (self.0[12] == 192 && self.0[13] == 168) + } else { + self.0[0] >= 0xfc + } + } + + pub fn to_bin(&self) -> String { + to_bin(&self.0) + } +} + +/// Peer address variants for the Host: header +#[derive(Clone, PartialEq)] +pub enum PeerHost { + DNS(String, u16), + IP(PeerAddress, u16), +} + +impl fmt::Display for PeerHost { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + PeerHost::DNS(ref s, ref p) => write!(f, "{}:{}", s, p), + PeerHost::IP(ref a, ref p) => write!(f, "{}", a.to_socketaddr(*p)), + } + } +} + +impl fmt::Debug for PeerHost { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + PeerHost::DNS(ref s, ref p) => write!(f, "PeerHost::DNS({},{})", s, p), + PeerHost::IP(ref a, ref p) => write!(f, "PeerHost::IP({:?},{})", a, p), + } + } +} + +impl Hash for PeerHost { + fn hash(&self, state: &mut H) { + match *self { + PeerHost::DNS(ref name, ref port) => { + "DNS".hash(state); + name.hash(state); + port.hash(state); + } + PeerHost::IP(ref addrbytes, ref port) => { + "IP".hash(state); + addrbytes.hash(state); + port.hash(state); + } + } + } +} + +impl FromStr for PeerHost { + type Err = Error; + + fn from_str(header: &str) -> Result { + // we're looser than the RFC allows for DNS names -- anything that doesn't parse to an IP + // address will be parsed to a DNS name. + // try as IP:port + match header.parse::() { + Ok(socketaddr) => Ok(PeerHost::IP( + PeerAddress::from_socketaddr(&socketaddr), + socketaddr.port(), + )), + Err(_) => { + // maybe missing :port + let hostport = format!("{}:80", header); + match hostport.parse::() { + Ok(socketaddr) => Ok(PeerHost::IP( + PeerAddress::from_socketaddr(&socketaddr), + socketaddr.port(), + )), + Err(_) => { + // try as DNS-name:port + let host; + let port; + let parts: Vec<&str> = header.split(":").collect(); + if parts.len() == 0 { + return Err(Error::DecodeError( + "Failed to parse PeerHost: no parts".to_string(), + )); + } else if parts.len() == 1 { + // no port + host = Some(parts[0].to_string()); + port = Some(80); + } else { + let np = parts.len(); + if parts[np - 1].chars().all(char::is_numeric) { + // ends in :port + let host_str = parts[0..np - 1].join(":"); + if host_str.len() == 0 { + return Err(Error::DecodeError("Empty host".to_string())); + } + host = Some(host_str); + + let port_res = parts[np - 1].parse::(); + port = match port_res { + Ok(p) => Some(p), + Err(_) => { + return Err(Error::DecodeError( + "Failed to parse PeerHost: invalid port".to_string(), + )); + } + }; + } else { + // only host + host = Some(header.to_string()); + port = Some(80); + } + } + + match (host, port) { + (Some(h), Some(p)) => Ok(PeerHost::DNS(h, p)), + (_, _) => Err(Error::DecodeError( + "Failed to parse PeerHost: failed to extract host and/or port" + .to_string(), + )), // I don't think this is reachable + } + } + } + } + } + } +} + +impl PeerHost { + pub fn hostname(&self) -> String { + match *self { + PeerHost::DNS(ref s, _) => s.clone(), + PeerHost::IP(ref a, ref p) => format!("{}", a.to_socketaddr(*p).ip()), + } + } + + pub fn port(&self) -> u16 { + match *self { + PeerHost::DNS(_, ref p) => *p, + PeerHost::IP(_, ref p) => *p, + } + } + + pub fn from_host_port(host: String, port: u16) -> PeerHost { + // try as IP, and fall back to DNS + match host.parse::() { + Ok(addr) => PeerHost::IP(PeerAddress::from_ip(&addr), port), + Err(_) => PeerHost::DNS(host, port), + } + } + + pub fn from_socketaddr(socketaddr: &SocketAddr) -> PeerHost { + PeerHost::IP(PeerAddress::from_socketaddr(socketaddr), socketaddr.port()) + } + + pub fn to_host_port(&self) -> (String, u16) { + match *self { + PeerHost::DNS(ref s, ref p) => (s.clone(), *p), + PeerHost::IP(ref i, ref p) => (format!("{}", i.to_socketaddr(0).ip()), *p), + } + } +} + +impl From for PeerHost { + fn from(addr: SocketAddr) -> PeerHost { + PeerHost::from_socketaddr(&addr) + } +} From e3f61b88f0ae213b1bd049f2ff36dfc27d6771f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:46:18 -0400 Subject: [PATCH 002/107] chore: expose net module --- stacks-common/src/types/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 35bb97d860..6893d863d3 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -19,6 +19,7 @@ use crate::util::hash::Hash160; use std::cmp::Ordering; pub mod chainstate; +pub mod net; /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); From ab329b69435519e2d4762e36bc499921c44518fd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:46:39 -0400 Subject: [PATCH 003/107] feat: get_chunk_size() helper --- stacks-common/src/util/chunked_encoding.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index f8a4117591..65133580c9 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -22,6 +22,8 @@ use std::io::{Read, Write}; use crate::codec::MAX_MESSAGE_LEN; use crate::deps_common::httparse; +/// NOTE: it is imperative that the given Read and Write impls here _never_ fail with EWOULDBLOCK. + #[derive(Debug)] pub enum ChunkedError { DeserializeError(String), @@ -343,6 +345,10 @@ impl HttpChunkedTransferWriterState { corked: false, } } + + pub fn get_chunk_size(&self) -> usize { + self.chunk_size + } } pub struct HttpChunkedTransferWriter<'a, 'state, W: Write> { From a10bf55d9cf645cd51c45af2e0093d3c51ba2da8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:46:51 -0400 Subject: [PATCH 004/107] chore: fix compiler warning --- stacks-common/src/util/hash.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/util/hash.rs b/stacks-common/src/util/hash.rs index 9660210dc9..3f7c165518 100644 --- a/stacks-common/src/util/hash.rs +++ b/stacks-common/src/util/hash.rs @@ -429,7 +429,7 @@ where row_hashes.reserve(nodes[i].len() / 2); for j in 0..(nodes[i].len() / 2) { - let h = MerkleTree::get_node_hash(&nodes[i][(2 * j)], &nodes[i][2 * j + 1]); + let h = MerkleTree::get_node_hash(&nodes[i][2 * j], &nodes[i][2 * j + 1]); row_hashes.push(h); } From 5e68d0d85539d16b958557c414b83e2fdbf4a82a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:47:09 -0400 Subject: [PATCH 005/107] fix: 'hex' not 'sha256d' for hex error message --- stacks-common/src/util/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 02e6501583..1b3c467806 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -66,8 +66,8 @@ pub enum HexError { impl fmt::Display for HexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - HexError::BadLength(n) => write!(f, "bad length {} for sha256d hex string", n), - HexError::BadCharacter(c) => write!(f, "bad character {} in sha256d hex string", c), + HexError::BadLength(n) => write!(f, "bad length {} for hex string", n), + HexError::BadCharacter(c) => write!(f, "bad character {} for hex string", c), } } } From d95340877b6ad053ba6f3574e094fde7ae4691a9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:47:31 -0400 Subject: [PATCH 006/107] fix: compiler warning: use to_owned() --- stackslib/src/chainstate/burn/db/sortdb.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index ba1ee25830..42b058525a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5582,7 +5582,8 @@ impl<'a> SortitionHandleTx<'a> { .first() .as_ref() .expect("FATAL: zero-length list of tied block IDs") - .clone(); + .to_owned(); + let winner_index = *mapping .get(&winner) .expect("FATAL: winning block ID not mapped"); From a6586502e04d034a14da8325f9af63f3f768b64f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:47:44 -0400 Subject: [PATCH 007/107] fix: compiler warning: use to_owned() --- stackslib/src/chainstate/coordinator/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c6b6ca1684..b2d51c180d 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -360,7 +360,7 @@ pub fn setup_states_with_epochs( burnchain_blocks_db, first_sortition.burn_header_hash, registers, - path.clone(), + path.to_owned(), )); } else { others.push(burnchain_blocks_db); From 5b4ca5865ed70a23efccfadf9c64d76dde624057 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:48:01 -0400 Subject: [PATCH 008/107] refactor: remove all streaming code, since we don't need it anymore --- stackslib/src/chainstate/stacks/db/blocks.rs | 479 ++----------------- 1 file changed, 29 insertions(+), 450 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 06cf1c3d91..e7c4647e52 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -43,6 +43,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::transactions::TransactionNonceMismatch; +use crate::chainstate::stacks::db::ExtendedStacksHeader; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::Error; @@ -60,10 +61,8 @@ use crate::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use crate::core::*; use crate::cost_estimates::EstimatorError; use crate::net::relay::Relayer; -use crate::net::stream::{BlockStreamData, HeaderStreamData, MicroblockStreamData, Streamer}; use crate::net::BlocksInvData; use crate::net::Error as net_error; -use crate::net::ExtendedStacksHeader; use crate::util_lib::db::u64_to_sql; use crate::util_lib::db::Error as db_error; use crate::util_lib::db::{ @@ -1179,6 +1178,15 @@ impl StacksChainState { ) -> Result, Error> { let parent_index_hash = StacksBlockHeader::make_index_block_hash(parent_consensus_hash, parent_block_hash); + Self::load_staging_microblock_indexed(blocks_conn, &parent_index_hash, microblock_hash) + } + + /// Load up a preprocessed microblock given the index block hash of the anchored parent + pub fn load_staging_microblock_indexed( + blocks_conn: &DBConn, + parent_index_hash: &StacksBlockId, + microblock_hash: &BlockHeaderHash, + ) -> Result, Error> { match StacksChainState::load_staging_microblock_info( blocks_conn, &parent_index_hash, @@ -2926,6 +2934,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. + #[cfg(test)] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2957,458 +2966,29 @@ impl StacksChainState { Ok(microblock_info) } - /// Write header data to the fd - fn write_stream_data( - fd: &mut W, - stream: &mut S, - input: &mut R, - count: u64, - ) -> Result { - let mut buf = vec![0u8; count as usize]; - let nr = input.read(&mut buf).map_err(Error::ReadError)?; - fd.write_all(&buf[0..nr]).map_err(Error::WriteError)?; - - stream.add_bytes(nr as u64); - - Ok(nr as u64) - } - - /// Stream header data from one Read to one Write - fn stream_data( - fd: &mut W, - stream: &mut S, - input: &mut R, - count: u64, - ) -> Result { - input - .seek(SeekFrom::Start(stream.offset())) - .map_err(Error::ReadError)?; - - StacksChainState::write_stream_data(fd, stream, input, count) - } - - /// Stream a single header's data from disk - /// If this method returns 0, it's because we're EOF on the header and should begin the next. - /// - /// The data streamed to `fd` is meant to be part of a JSON array. The header data will be - /// encoded as JSON, and a `,` will be written after it if there are more headers to follow. - /// The caller is responsible for writing `[` before writing headers, and writing `]` after all - /// headers have been written. - /// - /// Returns the number of bytes written - pub fn stream_one_header( - blocks_conn: &DBConn, - block_path: &str, - fd: &mut W, - stream: &mut HeaderStreamData, - count: u64, - ) -> Result { - if stream.header_bytes.is_none() && stream.num_headers > 0 { - let header = - StacksChainState::load_block_header_indexed(block_path, &stream.index_block_hash)? - .ok_or(Error::NoSuchBlockError)?; - - let header_info = - StacksChainState::load_staging_block_info(blocks_conn, &stream.index_block_hash)? - .ok_or(Error::NoSuchBlockError)?; - - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &header_info.parent_consensus_hash, - &header_info.parent_anchored_block_hash, - ); - - let mut header_bytes = vec![]; - let extended_header = ExtendedStacksHeader { - consensus_hash: header_info.consensus_hash, - header: header, - parent_block_id: parent_index_block_hash, - }; - - serde_json::to_writer(&mut header_bytes, &extended_header).map_err(|e| { - Error::NetError(net_error::SerializeError(format!( - "Failed to send as JSON: {:?}", - &e - ))) - })?; - - if stream.num_headers > 1 { - header_bytes.push(',' as u8); - } - - test_debug!( - "header_bytes: {}", - String::from_utf8(header_bytes.clone()).unwrap() - ); - - stream.header_bytes = Some(header_bytes); - stream.offset = 0; - } - - if stream.header_bytes.is_some() { - let header_bytes = stream - .header_bytes - .take() - .expect("Do not have header bytes and did not set them"); - let res = (|| { - if stream.offset >= (header_bytes.len() as u64) { - // EOF - return Ok(0); - } - - let num_bytes = StacksChainState::write_stream_data( - fd, - stream, - &mut &header_bytes[(stream.offset as usize)..], - count, - )?; - test_debug!( - "Stream header hash={} offset={} total_bytes={}, num_bytes={} num_headers={}", - &stream.index_block_hash, - stream.offset, - stream.total_bytes, - num_bytes, - stream.num_headers - ); - Ok(num_bytes) - })(); - stream.header_bytes = Some(header_bytes); - res - } else { - Ok(0) - } - } - - /// Stream multiple headers from disk, moving in reverse order from the chain tip back. - /// The format will be a JSON array. - /// Returns total number of bytes written (will be equal to the number of bytes read). - /// Returns 0 if we run out of headers - pub fn stream_headers( - &self, - fd: &mut W, - stream: &mut HeaderStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = match StacksChainState::stream_one_header( - &self.db(), - &self.blocks_path, - fd, - stream, - to_write, - ) { - Ok(nw) => nw, - Err(Error::DBError(db_error::NotFoundError)) => { - // out of headers - debug!( - "No more header to stream after {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - Err(e) => { - return Err(e); - } - }; - - if nw == 0 { - if stream.num_headers == 0 { - // out of headers - debug!( - "No more header to stream after {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - - // EOF on header; move to the next one (its parent) - let header_info = match StacksChainState::load_staging_block_info( - &self.db(), - &stream.index_block_hash, - )? { - Some(x) => x, - None => { - // out of headers - debug!( - "Out of headers to stream after block {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - }; - - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &header_info.parent_consensus_hash, - &header_info.parent_anchored_block_hash, - ); - - stream.index_block_hash = parent_index_block_hash; - stream.num_headers = stream - .num_headers - .checked_sub(1) - .expect("BUG: streamed more headers than called for"); + /// Read one header for the purposes of streaming. + pub fn read_extended_header( + db: &DBConn, + blocks_path: &str, + index_block_hash: &StacksBlockId, + ) -> Result { + let header = StacksChainState::load_block_header_indexed(blocks_path, index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; - stream.header_bytes = None; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } + let header_info = StacksChainState::load_staging_block_info(db, index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; - debug!( - "Streaming header={}: to_write={}, nw={}", - &stream.index_block_hash, to_write, nw - ); - } - debug!( - "Streamed headers ({} remaining): {} - {} = {}", - stream.num_headers, - count, - to_write, - count - to_write + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &header_info.parent_consensus_hash, + &header_info.parent_anchored_block_hash, ); - Ok(count - to_write) - } - /// Stream a single microblock's data from the staging database. - /// If this method returns 0, it's because we're EOF on the blob. - pub fn stream_one_microblock( - blocks_conn: &DBConn, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let rowid = match stream.rowid { - None => { - // need to get rowid in order to get the blob - match StacksChainState::stream_microblock_get_rowid( - blocks_conn, - &stream.parent_index_block_hash, - &stream.microblock_hash, - )? { - Some(rid) => rid, - None => { - test_debug!("Microblock hash={:?} not in DB", &stream.microblock_hash,); - return Err(Error::NoSuchBlockError); - } - } - } - Some(rid) => rid, + let extended_header = ExtendedStacksHeader { + consensus_hash: header_info.consensus_hash, + header: header, + parent_block_id: parent_index_block_hash, }; - - stream.rowid = Some(rowid); - let mut blob = blocks_conn - .blob_open( - DatabaseName::Main, - "staging_microblocks_data", - "block_data", - rowid, - true, - ) - .map_err(|e| { - match e { - sqlite_error::SqliteFailure(_, _) => { - // blob got moved out of staging - Error::NoSuchBlockError - } - _ => Error::DBError(db_error::SqliteError(e)), - } - })?; - - let num_bytes = StacksChainState::stream_data(fd, stream, &mut blob, count)?; - test_debug!( - "Stream microblock rowid={} hash={} offset={} total_bytes={}, num_bytes={}", - rowid, - &stream.microblock_hash, - stream.offset, - stream.total_bytes, - num_bytes - ); - Ok(num_bytes) - } - - /// Stream multiple microblocks from staging, moving in reverse order from the stream tail to the stream head. - /// Returns total number of bytes written (will be equal to the number of bytes read). - /// Returns 0 if we run out of microblocks in the staging db - pub fn stream_microblocks_confirmed( - chainstate: &StacksChainState, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = - StacksChainState::stream_one_microblock(&chainstate.db(), fd, stream, to_write)?; - if nw == 0 { - // EOF on microblock blob; move to the next one (its parent) - let mblock_info = match StacksChainState::load_staging_microblock_info( - &chainstate.db(), - &stream.parent_index_block_hash, - &stream.microblock_hash, - )? { - Some(x) => x, - None => { - // out of mblocks - debug!( - "Out of microblocks to stream after confirmed microblock {}", - &stream.microblock_hash - ); - break; - } - }; - - let rowid = match StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), - &stream.parent_index_block_hash, - &mblock_info.parent_hash, - )? { - Some(rid) => rid, - None => { - // out of mblocks - debug!( - "No rowid found for confirmed stream microblock {}", - &mblock_info.parent_hash - ); - break; - } - }; - - stream.offset = 0; - stream.rowid = Some(rowid); - stream.microblock_hash = mblock_info.parent_hash; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } - debug!( - "Streaming microblock={}: to_write={}, nw={}", - &stream.microblock_hash, to_write, nw - ); - } - debug!( - "Streamed confirmed microblocks: {} - {} = {}", - count, - to_write, - count - to_write - ); - Ok(count - to_write) - } - - /// Stream block data from the chunk store. - pub fn stream_data_from_chunk_store( - blocks_path: &str, - fd: &mut W, - stream: &mut BlockStreamData, - count: u64, - ) -> Result { - let block_path = - StacksChainState::get_index_block_path(blocks_path, &stream.index_block_hash)?; - - // The reason we open a file on each call to stream data is because we don't want to - // exhaust the supply of file descriptors. Maybe a future version of this code will do - // something like cache the set of open files so we don't have to keep re-opening them. - let mut file_fd = fs::OpenOptions::new() - .read(true) - .write(false) - .create(false) - .truncate(false) - .open(&block_path) - .map_err(|e| { - if e.kind() == io::ErrorKind::NotFound { - error!("File not found: {:?}", &block_path); - Error::NoSuchBlockError - } else { - Error::ReadError(e) - } - })?; - - StacksChainState::stream_data(fd, stream, &mut file_fd, count) - } - - /// Stream block data from the chain state. - /// Returns the number of bytes written, and updates `stream` to point to the next point to - /// read. Writes the bytes streamed to `fd`. - pub fn stream_block( - &mut self, - fd: &mut W, - stream: &mut BlockStreamData, - count: u64, - ) -> Result { - StacksChainState::stream_data_from_chunk_store(&self.blocks_path, fd, stream, count) - } - - /// Stream unconfirmed microblocks from the staging DB. Pull only from the staging DB. - /// Returns the number of bytes written, and updates `stream` to point to the next point to - /// read. Wrties the bytes streamed to `fd`. - pub fn stream_microblocks_unconfirmed( - chainstate: &StacksChainState, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = - StacksChainState::stream_one_microblock(&chainstate.db(), fd, stream, to_write)?; - if nw == 0 { - // EOF on microblock blob; move to the next one - let next_seq = match stream.seq { - u16::MAX => { - return Err(Error::NoSuchBlockError); - } - x => x + 1, - }; - let next_mblock_hash = match StacksChainState::load_next_descendant_microblock( - &chainstate.db(), - &stream.index_block_hash, - next_seq, - )? { - Some(mblock) => { - test_debug!( - "Switch to {}-{} ({})", - &stream.index_block_hash, - &mblock.block_hash(), - next_seq - ); - mblock.block_hash() - } - None => { - // EOF on stream - break; - } - }; - - let rowid = match StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), - &stream.parent_index_block_hash, - &next_mblock_hash, - )? { - Some(rid) => rid, - None => { - // out of mblocks - break; - } - }; - - stream.offset = 0; - stream.rowid = Some(rowid); - stream.microblock_hash = next_mblock_hash; - stream.seq = next_seq; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } - } - Ok(count - to_write) + Ok(extended_header) } /// Check whether or not there exists a Stacks block at or higher @@ -7160,7 +6740,6 @@ pub mod test { use crate::chainstate::stacks::*; use crate::core::mempool::*; use crate::net::test::*; - use crate::net::ExtendedStacksHeader; use crate::util_lib::db::Error as db_error; use crate::util_lib::db::*; use stacks_common::util::hash::*; From 8750d467fdaae9b2f6bc72e531296c9e103c7339 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:48:25 -0400 Subject: [PATCH 009/107] refactor: put ExtendedStacksHeader into Stacks chainstate module hierarchy --- stackslib/src/chainstate/stacks/db/mod.rs | 73 ++++++++++++++++++++++- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 22ee6fc560..80bac05476 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -63,7 +63,6 @@ use crate::core::*; use crate::monitoring; use crate::net::atlas::BNS_CHARS_REGEX; use crate::net::Error as net_error; -use crate::net::MemPoolSyncData; use crate::util_lib::db::Error as db_error; use crate::util_lib::db::{ query_count, query_row, tx_begin_immediate, tx_busy_handler, DBConn, DBTx, FromColumn, FromRow, @@ -84,7 +83,7 @@ use clarity::vm::representations::ClarityName; use clarity::vm::representations::ContractName; use clarity::vm::types::TupleData; use stacks_common::util; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{hex_bytes, to_hex}; use crate::chainstate::burn::ConsensusHashExtensions; use crate::chainstate::stacks::address::StacksAddressExtensions; @@ -97,6 +96,11 @@ use crate::util_lib::boot::{boot_code_acc, boot_code_addr, boot_code_id, boot_co use clarity::vm::Value; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash}; +use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; + +use serde::de::Error as de_Error; +use serde::Deserialize; + pub mod accounts; pub mod blocks; pub mod contracts; @@ -202,6 +206,57 @@ pub struct StacksEpochReceipt { pub epoch_transition: bool, } +/// Headers we serve over the network +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ExtendedStacksHeader { + pub consensus_hash: ConsensusHash, + #[serde( + serialize_with = "ExtendedStacksHeader_StacksBlockHeader_serialize", + deserialize_with = "ExtendedStacksHeader_StacksBlockHeader_deserialize" + )] + pub header: StacksBlockHeader, + pub parent_block_id: StacksBlockId, +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_serialize( + header: &StacksBlockHeader, + s: S, +) -> Result { + let bytes = header.serialize_to_vec(); + let header_hex = to_hex(&bytes); + s.serialize_str(&header_hex.as_str()) +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let header_hex = String::deserialize(d)?; + let header_bytes = hex_bytes(&header_hex).map_err(de_Error::custom)?; + StacksBlockHeader::consensus_deserialize(&mut &header_bytes[..]).map_err(de_Error::custom) +} + +impl StacksMessageCodec for ExtendedStacksHeader { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.header)?; + write_next(fd, &self.parent_block_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let ch = read_next(fd)?; + let bh = read_next(fd)?; + let pbid = read_next(fd)?; + Ok(ExtendedStacksHeader { + consensus_hash: ch, + header: bh, + parent_block_id: pbid, + }) + } +} + #[derive(Debug, Clone, PartialEq)] pub struct DBConfig { pub version: String, @@ -1555,6 +1610,20 @@ impl StacksChainState { ) } + /// Re-open the chainstate DB + pub fn reopen_db(&self) -> Result { + let path = PathBuf::from(self.root_path.clone()); + let header_index_root_path = StacksChainState::header_index_root_path(path); + let header_index_root = header_index_root_path + .to_str() + .ok_or_else(|| Error::DBError(db_error::ParseError))? + .to_string(); + + let state_index = + StacksChainState::open_db(self.mainnet, self.chain_id, &header_index_root)?; + Ok(state_index.into_sqlite_conn()) + } + pub fn blocks_path(mut path: PathBuf) -> PathBuf { path.push("blocks"); path From ac0187ea7b2445a9dc03b2282f5ee9e34bedb220 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:48:46 -0400 Subject: [PATCH 010/107] feat: destruct a MARF into its Sqlite connection (i.e. for purposes of re-opening a StacksChainState) --- stackslib/src/chainstate/stacks/index/marf.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 7a7cbfd73c..976dd69c2e 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1545,4 +1545,9 @@ impl MARF { pub fn get_root_hash_at(&mut self, block_hash: &T) -> Result { self.storage.connection().get_root_hash_at(block_hash) } + + /// Convert to the inner sqlite connection + pub fn into_sqlite_conn(self) -> Connection { + self.storage.into_sqlite_conn() + } } From ba30d82308489f1656302cafa7d6e921e2091ead Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:49:16 -0400 Subject: [PATCH 011/107] feat: destruct TrieFileStorage into its inner connection (i.e. for purposes of reopening it) --- stackslib/src/chainstate/stacks/index/storage.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 4bb57db524..7f0ba077d6 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -875,7 +875,7 @@ impl TrieRAM { for j in 0..node_data.len() { let next_node = &mut self.data[node_data[j] as usize].0; if !next_node.is_leaf() { - let mut ptrs = next_node.ptrs_mut(); + let ptrs = next_node.ptrs_mut(); let num_children = ptrs.len(); for k in 0..num_children { if ptrs[k].id != TrieNodeID::Empty as u8 && !is_backptr(ptrs[k].id) { @@ -1381,6 +1381,10 @@ impl TrieFileStorage { tx_begin_immediate(&mut self.db) } + pub fn into_sqlite_conn(self) -> Connection { + self.db + } + fn open_opts( db_path: &str, readonly: bool, From 9edec34c6e4b79a3763438ee52856b0c23cb2e76 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:49:40 -0400 Subject: [PATCH 012/107] feat: helper to create a test microblock with a given number of txs --- stackslib/src/chainstate/stacks/mod.rs | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 4ed4169d4a..e9839626c2 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1345,4 +1345,46 @@ pub mod test { txs: txs_anchored, } } + + pub fn make_codec_test_microblock(num_txs: usize) -> StacksMicroblock { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + ); + let all_txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::OffChainOnly, + &TransactionPostConditionMode::Allow, + ); + + let txs_mblock: Vec<_> = all_txs.into_iter().take(num_txs).collect(); + let txid_vecs = txs_mblock + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + + let mut header = StacksMicroblockHeader { + version: 6, + sequence: 1, + prev_block: BlockHeaderHash([0x11; 32]), + tx_merkle_root, + signature: MessageSignature::empty(), + }; + + header.sign(&privk).unwrap(); + StacksMicroblock { + header: header, + txs: txs_mblock, + } + } } From f9b3f01c517f2a8be91114cb91d569d314820b94 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:49:59 -0400 Subject: [PATCH 013/107] fix: use to_owned() --- stackslib/src/chainstate/stacks/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 77d5162b6d..687265164d 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -256,7 +256,7 @@ impl TestMinerTrace { for miner_id in p.miner_node_map.keys() { if let Some(ref test_name) = p.miner_node_map.get(miner_id) { if !all_test_names.contains(test_name) { - all_test_names.insert(test_name.clone()); + all_test_names.insert(test_name.to_owned()); } } } From 2bd674f879388f8ee4f6a048e575dd560836fafc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:50:14 -0400 Subject: [PATCH 014/107] fix: use to_owned() --- stackslib/src/clarity_vm/database/marf.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index a1bb96cb9f..e98a3bcb48 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -88,7 +88,7 @@ impl MarfedKV { ) -> InterpreterResult { let marf = MarfedKV::setup_db(path_str, false, marf_opts)?; let chain_tip = match miner_tip { - Some(ref miner_tip) => *miner_tip.clone(), + Some(miner_tip) => miner_tip.to_owned(), None => StacksBlockId::sentinel(), }; @@ -102,7 +102,7 @@ impl MarfedKV { ) -> InterpreterResult { let marf = MarfedKV::setup_db(path_str, true, marf_opts)?; let chain_tip = match miner_tip { - Some(ref miner_tip) => *miner_tip.clone(), + Some(miner_tip) => miner_tip.to_owned(), None => StacksBlockId::sentinel(), }; From 6c98cd60562df3b6d29853207fcaba8b14bf5bbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:50:30 -0400 Subject: [PATCH 015/107] refactor: put MemPoolSyncData into the mempool module, and remove transaction streaming code --- stackslib/src/core/mempool.rs | 334 ++++++++++++++++++++-------------- 1 file changed, 202 insertions(+), 132 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index c6b155f1a4..8b04f6de0b 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -18,6 +18,7 @@ use std::cmp::{self, Ordering}; use std::collections::{HashMap, HashSet, VecDeque}; use std::fs; use std::hash::Hasher; +use std::io; use std::io::{Read, Write}; use std::ops::Deref; use std::ops::DerefMut; @@ -50,7 +51,7 @@ use crate::core::StacksEpochId; use crate::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use crate::core::FIRST_STACKS_BLOCK_HASH; use crate::monitoring::increment_stx_mempool_gc; -use crate::net::stream::TxStreamData; +use crate::net::Error as net_error; use crate::util_lib::db::query_int; use crate::util_lib::db::query_row_columns; use crate::util_lib::db::query_rows; @@ -69,8 +70,6 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::hash::Sha512Trunc256Sum; use std::time::Instant; -use crate::net::MemPoolSyncData; - use crate::util_lib::bloom::{BloomCounter, BloomFilter, BloomNodeHasher}; use crate::clarity_vm::clarity::ClarityConnection; @@ -90,6 +89,9 @@ use crate::monitoring; use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; use crate::util_lib::db::table_exists; +use stacks_common::codec::{read_next, write_next, MAX_MESSAGE_LEN}; +use stacks_common::util::retry::{BoundReader, RetryReader}; + // maximum number of confirmations a transaction can have before it's garbage-collected pub const MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; pub const MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; @@ -150,6 +152,167 @@ impl StacksMessageCodec for TxTag { } } +define_u8_enum!(MemPoolSyncDataID { + BloomFilter = 0x01, + TxTags = 0x02 +}); + +#[derive(Debug, Clone, PartialEq)] +pub enum MemPoolSyncData { + BloomFilter(BloomFilter), + TxTags([u8; 32], Vec), +} + +impl StacksMessageCodec for MemPoolSyncData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + match *self { + MemPoolSyncData::BloomFilter(ref bloom_filter) => { + write_next(fd, &MemPoolSyncDataID::BloomFilter.to_u8())?; + write_next(fd, bloom_filter)?; + } + MemPoolSyncData::TxTags(ref seed, ref tags) => { + write_next(fd, &MemPoolSyncDataID::TxTags.to_u8())?; + write_next(fd, seed)?; + write_next(fd, tags)?; + } + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let data_id: u8 = read_next(fd)?; + match MemPoolSyncDataID::from_u8(data_id).ok_or(codec_error::DeserializeError(format!( + "Unrecognized MemPoolSyncDataID {}", + &data_id + )))? { + MemPoolSyncDataID::BloomFilter => { + let bloom_filter: BloomFilter = read_next(fd)?; + Ok(MemPoolSyncData::BloomFilter(bloom_filter)) + } + MemPoolSyncDataID::TxTags => { + let seed: [u8; 32] = read_next(fd)?; + let txtags: Vec = read_next(fd)?; + Ok(MemPoolSyncData::TxTags(seed, txtags)) + } + } + } +} + +/// Read the trailing page ID from a transaction stream +fn parse_mempool_query_page_id( + pos: usize, + retry_reader: &mut RetryReader<'_, R>, +) -> Result, net_error> { + // possibly end-of-transactions, in which case, the last 32 bytes should be + // a page ID. Expect end-of-stream after this. + retry_reader.set_position(pos); + let next_page: Txid = match read_next(retry_reader) { + Ok(txid) => txid, + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if pos == retry_reader.position() { + // this is fine -- the node didn't get another page + return Ok(None); + } else { + // partial data -- corrupt stream + test_debug!("Unexpected EOF: {} != {}", pos, retry_reader.position()); + return Err(e.into()); + } + } + _ => { + return Err(e.into()); + } + }, + e => { + return Err(e.into()); + } + }, + }; + + test_debug!("Read page_id {:?}", &next_page); + Ok(Some(next_page)) +} + +/// Decode a transaction stream, returned from /v2/mempool/query. +/// The wire format is a list of transactions (no SIP-003 length prefix), followed by an +/// optional 32-byte page ID. Obtain both the transactions and page ID, if it exists. +pub fn decode_tx_stream( + fd: &mut R, +) -> Result<(Vec, Option), net_error> { + // The wire format is `tx, tx, tx, tx, .., tx, txid`. + // The last 32 bytes are the page ID for the next mempool query. + // NOTE: there will be no length prefix on this. + let mut txs: Vec = vec![]; + let mut bound_reader = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + let mut retry_reader = RetryReader::new(&mut bound_reader); + let mut page_id = None; + let mut expect_eof = false; + + loop { + let pos = retry_reader.position(); + let next_msg: Result = read_next(&mut retry_reader); + match next_msg { + Ok(tx) => { + if expect_eof { + // this should have failed + test_debug!("Expected EOF; got transaction {}", tx.txid()); + return Err(net_error::ExpectedEndOfStream); + } + + test_debug!("Read transaction {}", tx.txid()); + txs.push(tx); + Ok(()) + } + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if expect_eof { + if pos != retry_reader.position() { + // read partial data. The stream is corrupt. + test_debug!( + "Expected EOF; stream advanced from {} to {}", + pos, + retry_reader.position() + ); + return Err(net_error::ExpectedEndOfStream); + } + } else { + // couldn't read a full transaction. This is possibly a page ID, whose + // 32 bytes decode to the prefix of a well-formed transaction. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = parse_mempool_query_page_id(pos, &mut retry_reader)?; + } + break; + } + _ => Err(e), + }, + codec_error::DeserializeError(_msg) => { + if expect_eof { + // this should have failed due to EOF + test_debug!("Expected EOF; got DeserializeError '{}'", &_msg); + return Err(net_error::ExpectedEndOfStream); + } + + // failed to parse a transaction. This is possibly a page ID. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = parse_mempool_query_page_id(pos, &mut retry_reader)?; + + // do one more pass to make sure we're actually end-of-stream. + // otherwise, the stream itself was corrupt, since any 32 bytes is a valid + // txid and the presence of more bytes means that we simply got a bad tx + // that we couldn't decode. + expect_eof = true; + Ok(()) + } + _ => Err(e), + }, + }?; + } + + Ok((txs, page_id)) +} + pub struct MemPoolAdmitter { cur_block: BlockHeaderHash, cur_consensus_hash: ConsensusHash, @@ -1212,6 +1375,21 @@ impl MemPoolDB { }) } + pub fn reopen(&self, readwrite: bool) -> Result { + if let Err(e) = fs::metadata(&self.path) { + return Err(db_error::IOError(e)); + } + + let open_flags = if readwrite { + OpenFlags::SQLITE_OPEN_READ_WRITE + } else { + OpenFlags::SQLITE_OPEN_READ_ONLY + }; + + let conn = sqlite_open(&self.path, open_flags, true)?; + Ok(conn) + } + /// Open the mempool db within the chainstate directory. /// The chainstate must be instantiated already. pub fn open( @@ -2445,6 +2623,24 @@ impl MemPoolDB { query_row(&self.conn(), sql, args) } + pub fn find_next_missing_transactions( + &self, + data: &MemPoolSyncData, + height: u64, + last_randomized_txid: &Txid, + max_txs: u64, + max_run: u64, + ) -> Result<(Vec, Option, u64), db_error> { + Self::static_find_next_missing_transactions( + self.conn(), + data, + height, + last_randomized_txid, + max_txs, + max_run, + ) + } + /// Get the next batch of transactions from our mempool that are *not* represented in the given /// MemPoolSyncData. Transactions are ordered lexicographically by randomized_txids.hashed_txid, since this allows us /// to use the txid as a cursor while ensuring that each node returns txids in a deterministic random order @@ -2452,8 +2648,8 @@ impl MemPoolDB { /// a requesting node will still have a good chance of getting something useful). /// Also, return the next value to pass for `last_randomized_txid` to load the next page. /// Also, return the number of rows considered. - pub fn find_next_missing_transactions( - &self, + pub fn static_find_next_missing_transactions( + conn: &DBConn, data: &MemPoolSyncData, height: u64, last_randomized_txid: &Txid, @@ -2483,7 +2679,7 @@ impl MemPoolDB { } } - let mut stmt = self.conn().prepare(sql)?; + let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; let mut num_rows_visited = 0; let mut next_page = None; @@ -2528,130 +2724,4 @@ impl MemPoolDB { Ok((ret, next_page, num_rows_visited)) } - - /// Stream transaction data. - /// Send back one transaction at a time. - pub fn stream_txs( - &self, - fd: &mut W, - query: &mut TxStreamData, - count: u64, - ) -> Result { - let mut num_written = 0; - while num_written < count { - // write out bufferred tx - let start = query.tx_buf_ptr; - let end = cmp::min(query.tx_buf.len(), ((start as u64) + count) as usize); - fd.write_all(&query.tx_buf[start..end]) - .map_err(ChainstateError::WriteError)?; - - let nw = end.saturating_sub(start) as u64; - - query.tx_buf_ptr = end; - num_written += nw; - - if query.tx_buf_ptr >= query.tx_buf.len() { - if query.corked { - // we're done - test_debug!( - "Finished streaming txs; last page was {:?}", - &query.last_randomized_txid - ); - break; - } - - if query.num_txs >= query.max_txs { - // no more space in this stream - debug!( - "No more space in this query after {:?}. Corking tx stream.", - &query.last_randomized_txid - ); - - // send the next page ID - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - - query - .last_randomized_txid - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - continue; - } - - // load next - let remaining = query.max_txs.saturating_sub(query.num_txs); - let (next_txs, next_last_randomized_txid_opt, num_rows_visited) = self - .find_next_missing_transactions( - &query.tx_query, - query.height, - &query.last_randomized_txid, - 1, - remaining, - )?; - - debug!( - "Streaming mempool propagation stepped"; - "rows_visited" => num_rows_visited, - "last_rand_txid" => %query.last_randomized_txid, - "num_txs" => query.num_txs, - "max_txs" => query.max_txs - ); - - query.num_txs += num_rows_visited; - if next_txs.len() > 0 { - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - - for next_tx in next_txs.iter() { - next_tx - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - } - if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { - query.last_randomized_txid = next_last_randomized_txid; - } else { - test_debug!( - "No more txs after {}", - &next_txs - .last() - .map(|tx| tx.txid()) - .unwrap_or(Txid([0u8; 32])) - ); - break; - } - } else if let Some(next_txid) = next_last_randomized_txid_opt { - test_debug!( - "No rows returned for {}; cork tx stream with next page {}", - &query.last_randomized_txid, - &next_txid - ); - - // no rows found - query.last_randomized_txid = next_txid; - - // send the next page ID - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - - query - .last_randomized_txid - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - } else if next_last_randomized_txid_opt.is_none() { - // no more transactions - test_debug!( - "No more txs to send after {:?}; corking stream", - &query.last_randomized_txid - ); - - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - } - } - } - Ok(num_written) - } } From fcd364e0efc90e3505ca85c128b5fcd4065c3e60 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:50:54 -0400 Subject: [PATCH 016/107] refactor: remove dead unit tests --- stackslib/src/core/tests/mod.rs | 351 +------------------------------- 1 file changed, 2 insertions(+), 349 deletions(-) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index ea713481cd..c4ce530edd 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -40,15 +40,14 @@ use crate::chainstate::stacks::{ C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::core::mempool::db_get_all_nonces; +use crate::core::mempool::decode_tx_stream; +use crate::core::mempool::MemPoolSyncData; use crate::core::mempool::MemPoolWalkSettings; use crate::core::mempool::TxTag; use crate::core::mempool::{BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS}; use crate::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use crate::core::FIRST_STACKS_BLOCK_HASH; -use crate::net::stream::StreamCursor; use crate::net::Error as NetError; -use crate::net::HttpResponseType; -use crate::net::MemPoolSyncData; use crate::util_lib::bloom::test::setup_bloom_counter; use crate::util_lib::bloom::*; use crate::util_lib::db::{tx_begin_immediate, DBConn, FromRow}; @@ -2447,352 +2446,6 @@ fn test_find_next_missing_transactions() { assert!(next_page_opt.is_none()); } -#[test] -fn test_stream_txs() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let chainstate_path = chainstate_path(function_name!()); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - let addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - let mut txs = vec![]; - let block_height = 10; - let mut total_len = 0; - - let mut mempool_tx = mempool.tx_begin().unwrap(); - for i in 0..10 { - let pk = StacksPrivateKey::new(); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - total_len += tx_bytes.len(); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1 + (block_height as u8); 20]), - &BlockHeaderHash([0x2 + (block_height as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - block_height as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - txs.push(tx); - } - mempool_tx.commit().unwrap(); - - let mut buf = vec![]; - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], vec![]), - MAX_BLOOM_COUNTER_TXS.into(), - block_height, - Some(Txid([0u8; 32])), - ); - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - eprintln!("Read {} bytes of tx data", buf.len()); - - // buf decodes to the list of txs we have - let mut decoded_txs = vec![]; - let mut ptr = &buf[..]; - loop { - let tx: StacksTransaction = match read_next::(&mut ptr) { - Ok(tx) => tx, - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - eprintln!("out of transactions"); - break; - } - _ => { - panic!("IO error: {:?}", &e); - } - }, - _ => { - panic!("other error: {:?}", &e); - } - }, - }; - decoded_txs.push(tx); - } - - let mut tx_set = HashSet::new(); - for tx in txs.iter() { - tx_set.insert(tx.txid()); - } - - // the order won't be preserved - assert_eq!(tx_set.len(), decoded_txs.len()); - for tx in decoded_txs { - assert!(tx_set.contains(&tx.txid())); - } - - // verify that we can stream through pagination, with an empty tx tags - let mut page_id = Txid([0u8; 32]); - let mut decoded_txs = vec![]; - loop { - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], vec![]), - 1, - block_height, - Some(page_id), - ); - - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - let mut buf = vec![]; - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - // buf decodes to the list of txs we have, plus page ids - let mut ptr = &buf[..]; - test_debug!("Decode {}", to_hex(ptr)); - let (mut next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); - - decoded_txs.append(&mut next_txs); - - // for fun, use a page ID that is actually a well-formed prefix of a transaction - if let Some(ref tx) = decoded_txs.last() { - let mut evil_buf = tx.serialize_to_vec(); - let mut evil_page_id = [0u8; 32]; - evil_page_id.copy_from_slice(&evil_buf[0..32]); - evil_buf.extend_from_slice(&evil_page_id); - - test_debug!("Decode evil buf {}", &to_hex(&evil_buf)); - - let (evil_next_txs, evil_next_page) = - HttpResponseType::decode_tx_stream(&mut &evil_buf[..], None).unwrap(); - - // should still work - assert_eq!(evil_next_txs.len(), 1); - assert_eq!(evil_next_txs[0].txid(), tx.txid()); - assert_eq!(evil_next_page.unwrap().0[0..32], evil_buf[0..32]); - } - - if let Some(next_page) = next_page { - page_id = next_page; - } else { - break; - } - } - - // make sure we got them all - let mut tx_set = HashSet::new(); - for tx in txs.iter() { - tx_set.insert(tx.txid()); - } - - // the order won't be preserved - assert_eq!(tx_set.len(), decoded_txs.len()); - for tx in decoded_txs { - assert!(tx_set.contains(&tx.txid())); - } - - // verify that we can stream through pagination, with a full bloom filter - let mut page_id = Txid([0u8; 32]); - let all_txs_tags: Vec<_> = txs - .iter() - .map(|tx| TxTag::from(&[0u8; 32], &tx.txid())) - .collect(); - loop { - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], all_txs_tags.clone()), - 1, - block_height, - Some(page_id), - ); - - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - let mut buf = vec![]; - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - // buf decodes to an empty list of txs, plus page ID - let mut ptr = &buf[..]; - test_debug!("Decode {}", to_hex(ptr)); - let (next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); - - assert_eq!(next_txs.len(), 0); - - if let Some(next_page) = next_page { - page_id = next_page; - } else { - break; - } - } -} - -#[test] -fn test_decode_tx_stream() { - let addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - let mut txs = vec![]; - for _i in 0..10 { - let pk = StacksPrivateKey::new(); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - txs.push(tx); - } - - // valid empty tx stream - let empty_stream = [0x11u8; 32]; - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut empty_stream.as_ref(), None).unwrap(); - assert_eq!(next_txs.len(), 0); - assert_eq!(next_page, Some(Txid([0x11; 32]))); - - // valid tx stream with a page id at the end - let mut tx_stream: Vec = vec![]; - for tx in txs.iter() { - tx.consensus_serialize(&mut tx_stream).unwrap(); - } - tx_stream.extend_from_slice(&[0x22; 32]); - - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut &tx_stream[..], None).unwrap(); - assert_eq!(next_txs, txs); - assert_eq!(next_page, Some(Txid([0x22; 32]))); - - // valid tx stream with _no_ page id at the end - let mut partial_stream: Vec = vec![]; - txs[0].consensus_serialize(&mut partial_stream).unwrap(); - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut &partial_stream[..], None).unwrap(); - assert_eq!(next_txs.len(), 1); - assert_eq!(next_txs[0], txs[0]); - assert!(next_page.is_none()); - - // garbage tx stream - let garbage_stream = [0xff; 256]; - let err = HttpResponseType::decode_tx_stream(&mut garbage_stream.as_ref(), None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } - - // tx stream that is too short - let short_stream = [0x33u8; 33]; - let err = HttpResponseType::decode_tx_stream(&mut short_stream.as_ref(), None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } - - // tx stream has a tx, a page ID, and then another tx - let mut interrupted_stream = vec![]; - txs[0].consensus_serialize(&mut interrupted_stream).unwrap(); - interrupted_stream.extend_from_slice(&[0x00u8; 32]); - txs[1].consensus_serialize(&mut interrupted_stream).unwrap(); - - let err = HttpResponseType::decode_tx_stream(&mut &interrupted_stream[..], None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } -} - #[test] fn test_drop_and_blacklist_txs_by_time() { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); From cd716fe1df5f9c40c03f1e88c2fe1d5188955ce4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:51:05 -0400 Subject: [PATCH 017/107] chore: API sync --- stackslib/src/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bbeb297e74..0763814e55 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -73,7 +73,7 @@ use blockstack_lib::codec::StacksMessageCodec; use blockstack_lib::core::*; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::{db::LocalPeer, p2p::PeerNetwork, PeerAddress}; +use blockstack_lib::net::{db::LocalPeer, p2p::PeerNetwork}; use blockstack_lib::types::chainstate::StacksAddress; use blockstack_lib::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksBlockId, @@ -102,6 +102,7 @@ use std::fs::{File, OpenOptions}; use std::io::BufReader; use libstackerdb::StackerDBChunkData; +use stacks_common::types::net::PeerAddress; fn main() { let mut argv: Vec = env::args().collect(); From 0a97fac68725ffed43982bb109c8306d48b7d4fa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:51:16 -0400 Subject: [PATCH 018/107] chore: API sync --- stackslib/src/monitoring/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index bbc05f3064..c136218774 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -24,7 +24,7 @@ use crate::util_lib::db::Error as DatabaseError; use crate::{ burnchains::Txid, core::MemPoolDB, - net::{Error as net_error, HttpRequestType}, + net::{httpcore::StacksHttpRequest, Error as net_error}, util::get_epoch_time_secs, util_lib::db::{tx_busy_handler, DBConn}, }; @@ -49,11 +49,11 @@ pub fn increment_rpc_calls_counter() { } pub fn instrument_http_request_handler( - req: HttpRequestType, + req: StacksHttpRequest, handler: F, ) -> Result where - F: FnOnce(HttpRequestType) -> Result, + F: FnOnce(StacksHttpRequest) -> Result, { #[cfg(feature = "monitoring_prom")] increment_rpc_calls_counter(); From 6ac7ae4e3842133e9ee5eb4b41cc39219b54ad46 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:51:25 -0400 Subject: [PATCH 019/107] chore: remove dead import --- stackslib/src/net/asn.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index 9a7612a292..3bcd91def9 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -19,7 +19,6 @@ use std::io::BufRead; use std::io::BufReader; use crate::net::Error as net_error; -use crate::net::PeerAddress; use regex::Captures; use regex::Regex; From 49b45941911f7f8e80200e78a8970709b8a38f95 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:51:45 -0400 Subject: [PATCH 020/107] chore: cargo fmt --- stackslib/src/net/atlas/db.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 4270caa00f..6c773b291e 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -660,10 +660,7 @@ impl AtlasDB { Ok(rows) } - pub fn find_attachment( - &mut self, - content_hash: &Hash160, - ) -> Result, db_error> { + pub fn find_attachment(&self, content_hash: &Hash160) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); From 07cf31430a3de80cc3b286ed6b5f67abf833673f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:52:08 -0400 Subject: [PATCH 021/107] refactor: use new StacksHttpRequest HTTP request constructors --- stackslib/src/net/atlas/download.rs | 78 +++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index 9efe57efde..74a02c2415 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -24,20 +24,20 @@ use std::net::{IpAddr, SocketAddr}; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::StacksChainState; use crate::net::atlas::MAX_RETRY_DELAY; +use crate::net::atlas::{GetAttachmentResponse, GetAttachmentsInvResponse}; use crate::net::connection::ConnectionOptions; use crate::net::dns::*; use crate::net::p2p::PeerNetwork; use crate::net::server::HttpPeer; use crate::net::Error as net_error; use crate::net::NeighborKey; -use crate::net::{GetAttachmentResponse, GetAttachmentsInvResponse}; -use crate::net::{HttpRequestMetadata, HttpRequestType, HttpResponseType, PeerHost, Requestable}; +use crate::net::Requestable; use crate::types::chainstate::StacksBlockId; use crate::util_lib::db::Error as DBError; use crate::util_lib::strings; use crate::util_lib::strings::UrlString; use clarity::vm::types::QualifiedContractIdentifier; -use stacks_common::util::hash::{Hash160, MerkleHashFunc}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::types::chainstate::BlockHeaderHash; @@ -50,6 +50,13 @@ use std::cmp; use crate::core::mempool::MemPoolDB; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; + +use crate::net::http::HttpRequestContents; + +use serde::ser::Serialize; +use stacks_common::types::net::PeerHost; + #[derive(Debug)] pub struct AttachmentsDownloader { priority_queue: BinaryHeap, @@ -522,7 +529,15 @@ impl AttachmentsBatchStateContext { .peers .get_mut(request.get_url()) .expect("Atlas: unable to retrieve reliability report for peer"); - if let Some(HttpResponseType::GetAttachmentsInv(_, response)) = response { + + let response = if let Some(r) = response { + r + } else { + report.bump_failed_requests(); + continue; + }; + + if let Ok(response) = response.decode_atlas_attachments_inv_response() { let peer_url = request.get_url().clone(); match self.inventories.entry(request.key()) { Entry::Occupied(responses) => { @@ -558,7 +573,15 @@ impl AttachmentsBatchStateContext { .peers .get_mut(request.get_url()) .expect("Atlas: unable to retrieve reliability report for peer"); - if let Some(HttpResponseType::GetAttachment(_, response)) = response { + + let response = if let Some(r) = response { + r + } else { + report.bump_failed_requests(); + continue; + }; + + if let Ok(response) = response.decode_atlas_get_attachment() { self.attachments.insert(response.attachment); report.bump_successful_requests(); } else { @@ -929,14 +952,13 @@ impl BatchedRequestsState } Some(response) => { let peer_url = request.get_url().clone(); - - if let HttpResponseType::NotFound(_, _) = response { + if response.preamble().status_code == 404 { state.faulty_peers.insert(event_id, peer_url); continue; } debug!( - "Atlas: Request {} (event_id: {}) received response {:?}", - request, event_id, response + "Atlas: Request {} (event_id: {}) received HTTP 200", + request, event_id ); state.succeeded.insert(request, Some(response)); } @@ -990,7 +1012,7 @@ struct BatchedRequestsInitializedState { #[derive(Debug, Default)] pub struct BatchedRequestsResult { pub remaining: HashMap, - pub succeeded: HashMap>, + pub succeeded: HashMap>, pub errors: HashMap, pub faulty_peers: HashMap, } @@ -1062,16 +1084,27 @@ impl Requestable for AttachmentsInventoryRequest { &self.url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { - let mut pages_indexes = HashSet::new(); + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { + let mut page_indexes = HashSet::new(); for page in self.pages.iter() { - pages_indexes.insert(*page); + page_indexes.insert(*page); } - HttpRequestType::GetAttachmentsInv( - HttpRequestMetadata::from_host(peer_host, self.canonical_stacks_tip_height), - self.index_block_hash, - pages_indexes, + let page_list: Vec = page_indexes + .into_iter() + .map(|i| format!("{}", &i)) + .collect(); + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + "/v2/attachments/inv".into(), + HttpRequestContents::new() + .query_arg( + "index_block_hash".into(), + format!("{}", &self.index_block_hash), + ) + .query_arg("page_indexes".into(), page_list[..].join(",")), ) + .expect("FATAL: failed to create an HTTP request for infallible data") } } @@ -1127,11 +1160,14 @@ impl Requestable for AttachmentRequest { url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { - HttpRequestType::GetAttachment( - HttpRequestMetadata::from_host(peer_host, self.canonical_stacks_tip_height), - self.content_hash, + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".to_string(), + format!("/v2/attachments/{}", &self.content_hash), + HttpRequestContents::new(), ) + .expect("FATAL: failed to create an HTTP request for infallible data") } } From 898aeba9371d6036395a53201dbe76f53ed70c5a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:52:35 -0400 Subject: [PATCH 022/107] refactor: put Atlas HTTP response types into the Atlas system --- stackslib/src/net/atlas/mod.rs | 39 +++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c620115181..6d6e42f26c 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -29,13 +29,16 @@ use crate::chainstate::burn::ConsensusHash; use crate::types::chainstate::StacksBlockId; use crate::util_lib::boot::boot_code_id; use clarity::vm::types::{QualifiedContractIdentifier, SequenceData, TupleData, Value}; -use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc}; +use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, MerkleHashFunc}; use crate::types::chainstate::BlockHeaderHash; pub use self::db::AtlasDB; pub use self::download::AttachmentsDownloader; +use serde::de::{Deserialize, Error as de_Error}; +use serde::ser::Serialize; + /// Implements AtlasDB and associated API. Stores information about attachments and attachment /// instances. pub mod db; @@ -59,6 +62,40 @@ const MAX_UNINSTANTIATED_ATTACHMENTS_MIN: u32 = 50_000; const UNINSTANTIATED_ATTACHMENTS_EXPIRE_AFTER_MIN: u32 = 86_400; const UNRESOLVED_ATTACHMENT_INSTANCES_EXPIRE_AFTER_MIN: u32 = 172_800; +#[derive(Debug, Clone, PartialEq)] +pub struct GetAttachmentResponse { + pub attachment: Attachment, +} + +impl Serialize for GetAttachmentResponse { + fn serialize(&self, s: S) -> Result { + let hex_encoded = to_hex(&self.attachment.content[..]); + s.serialize_str(hex_encoded.as_str()) + } +} + +impl<'de> Deserialize<'de> for GetAttachmentResponse { + fn deserialize>(d: D) -> Result { + let payload = String::deserialize(d)?; + let hex_encoded = payload.parse::().map_err(de_Error::custom)?; + let bytes = hex_bytes(&hex_encoded).map_err(de_Error::custom)?; + let attachment = Attachment::new(bytes); + Ok(GetAttachmentResponse { attachment }) + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GetAttachmentsInvResponse { + pub block_id: StacksBlockId, + pub pages: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AttachmentPage { + pub index: u32, + pub inventory: Vec, +} + #[derive(Debug, Clone)] pub struct AtlasConfig { pub contracts: HashSet, From 046d386432141997107b1920805671090d269500 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:52:59 -0400 Subject: [PATCH 023/107] refactor: use new StacksHttpRequest and StacksHttpResposne types --- stackslib/src/net/atlas/tests.rs | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index efb5a397aa..ca06c343c1 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -23,23 +23,26 @@ use crate::burnchains::Txid; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::StacksChainState; use crate::net::connection::ConnectionOptions; -use crate::net::{ - AttachmentPage, GetAttachmentsInvResponse, HttpResponseMetadata, HttpResponseType, HttpVersion, - PeerHost, Requestable, -}; +use crate::net::Requestable; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::u64_to_sql; use crate::util_lib::strings::UrlString; use clarity::vm::types::QualifiedContractIdentifier; use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::Hash160; +use crate::net::http::{HttpResponsePayload, HttpResponsePreamble, HttpVersion}; +use crate::net::httpcore::StacksHttpResponse; + +use crate::net::atlas::GetAttachmentsInvResponse; + use super::download::{ AttachmentRequest, AttachmentsBatch, AttachmentsBatchStateContext, AttachmentsInventoryRequest, BatchedRequestsResult, ReliabilityReport, }; -use super::{AtlasConfig, AtlasDB, Attachment, AttachmentInstance}; +use super::{AtlasConfig, AtlasDB, Attachment, AttachmentInstance, AttachmentPage}; fn new_attachment_from(content: &str) -> Attachment { Attachment { @@ -131,8 +134,7 @@ fn new_attachments_inventory_request( } } -fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> HttpResponseType { - let md = HttpResponseMetadata::new(HttpVersion::Http11, 1, None, true, None); +fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> StacksHttpResponse { let pages = pages .into_iter() .map(|(index, inventory)| AttachmentPage { index, inventory }) @@ -141,7 +143,14 @@ fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> HttpRespons block_id: StacksBlockId([0u8; 32]), pages, }; - HttpResponseType::GetAttachmentsInv(md, response) + + let response_json = serde_json::to_value(&response).unwrap(); + let body = HttpResponsePayload::try_from_json(response_json).unwrap(); + + StacksHttpResponse::new( + HttpResponsePreamble::raw_ok_json(HttpVersion::Http11, false), + body, + ) } #[test] From e663ebd94b53d767c23beb42b0ada0c964c3a421 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:53:14 -0400 Subject: [PATCH 024/107] chore: clean up compiler warnings --- stackslib/src/net/chat.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 721b58d456..6e07864433 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -54,13 +54,13 @@ use crate::net::GetBlocksInv; use crate::net::GetPoxInv; use crate::net::Neighbor; use crate::net::NeighborKey; -use crate::net::PeerAddress; use crate::net::StacksMessage; use crate::net::StacksP2P; use crate::net::GETPOXINV_MAX_BITLEN; use crate::net::*; use crate::util_lib::db::DBConn; use crate::util_lib::db::Error as db_error; +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; use stacks_common::util::log; @@ -718,13 +718,13 @@ impl ConversationP2P { chain_view: &BurnchainView, ) -> bool { let bhh = match chain_view.last_burn_block_hashes.get(&block_height) { - Some(ref bhh) => bhh.clone(), + Some(bhh) => bhh, None => { // not present; can't prove disagreement (assume the remote peer is just stale) return false; } }; - if *bhh != *their_burn_header_hash { + if bhh != their_burn_header_hash { test_debug!( "Burn header hash mismatch in preamble: {} != {}", bhh, From 4efec5d5ae72b5da34649d2c7f4e64d3a92342a9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:53:24 -0400 Subject: [PATCH 025/107] refactor: MemPoolSyncData codec lives in the mempool modules --- stackslib/src/net/codec.rs | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index 32f304e4d6..4ea752b196 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -717,41 +717,6 @@ impl StacksMessageCodec for NatPunchData { } } -impl StacksMessageCodec for MemPoolSyncData { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - match *self { - MemPoolSyncData::BloomFilter(ref bloom_filter) => { - write_next(fd, &MemPoolSyncDataID::BloomFilter.to_u8())?; - write_next(fd, bloom_filter)?; - } - MemPoolSyncData::TxTags(ref seed, ref tags) => { - write_next(fd, &MemPoolSyncDataID::TxTags.to_u8())?; - write_next(fd, seed)?; - write_next(fd, tags)?; - } - } - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let data_id: u8 = read_next(fd)?; - match MemPoolSyncDataID::from_u8(data_id).ok_or(codec_error::DeserializeError(format!( - "Unrecognized MemPoolSyncDataID {}", - &data_id - )))? { - MemPoolSyncDataID::BloomFilter => { - let bloom_filter: BloomFilter = read_next(fd)?; - Ok(MemPoolSyncData::BloomFilter(bloom_filter)) - } - MemPoolSyncDataID::TxTags => { - let seed: [u8; 32] = read_next(fd)?; - let txtags: Vec = read_next(fd)?; - Ok(MemPoolSyncData::TxTags(seed, txtags)) - } - } - } -} - fn contract_id_consensus_serialize( fd: &mut W, cid: &QualifiedContractIdentifier, From 760eb25f243d2745575149e3a53cac369b9a9726 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:53:43 -0400 Subject: [PATCH 026/107] feat: add the ability to flush data to a reply handle's pipe's read endpoint, without destroying the write endpoint. This lets the caller send data to the read endpoint as it's being generated, so the read endpoint can consume it concurrently. --- stackslib/src/net/connection.rs | 44 ++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 7ecffa9575..3900149bc8 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -34,18 +34,10 @@ use std::sync::mpsc::TrySendError; use mio; use mio::net as mio_net; -use crate::codec::StacksMessageCodec; use crate::codec::MAX_MESSAGE_LEN; -use crate::core::mempool::MAX_BLOOM_COUNTER_TXS; -use crate::net::codec::*; use crate::net::Error as net_error; -use crate::net::HttpRequestPreamble; -use crate::net::HttpResponsePreamble; use crate::net::MessageSequence; -use crate::net::PeerAddress; -use crate::net::Preamble; use crate::net::ProtocolFamily; -use crate::net::RelayData; use crate::net::StacksHttp; use crate::net::StacksP2P; @@ -59,8 +51,7 @@ use crate::net::neighbors::{ use clarity::vm::{costs::ExecutionCost, types::BOUND_VALUE_SERIALIZATION_HEX}; -use crate::chainstate::burn::ConsensusHash; - +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; use stacks_common::util::log; @@ -91,7 +82,7 @@ impl ReceiverNotify

{ /// Send this message to the waiting receiver, consuming this notification handle. /// May fail silently. - pub fn send(self, msg: P::Message) -> () { + pub fn send(self, msg: P::Message) { let msg_name = msg.get_message_name().to_string(); let msg_id = msg.request_id(); match self.receiver_input.send(msg) { @@ -243,25 +234,37 @@ impl NetworkReplyHandle

{ } } - /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe. - /// Only call this once you're done sending -- this is just to move the data along. - /// Return true if we're done sending; false if we need to call this again. - pub fn try_flush(&mut self) -> Result { + /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe if + /// `drop_on_success` is true. Returns `true` if we drained the write end, `false` if not. + pub fn try_flush_ex(&mut self, drop_on_success: bool) -> Result { + let mut ret = false; let fd_opt = match self.request_pipe_write.take() { Some(mut fd) => { - let res = fd.try_flush().map_err(net_error::WriteError)?; - if res { - // all data flushed! + ret = fd.try_flush().map_err(net_error::WriteError)?; + if ret && drop_on_success { + // all data flushed, and we won't send more. None } else { - // still have data to send + // still have data to send, or we will send more. Some(fd) } } None => None, }; self.request_pipe_write = fd_opt; - Ok(self.request_pipe_write.is_none()) + Ok(ret) + } + + /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe. + /// Only call this once you're done sending -- this is just to move the data along. + /// Return true if we're done sending; false if we need to call this again. + pub fn try_flush(&mut self) -> Result { + self.try_flush_ex(true) + } + + /// Get a mutable reference to the inner pipe, if we have it + pub fn inner_pipe_out(&mut self) -> Option<&mut PipeWrite> { + self.request_pipe_write.as_mut() } } @@ -1059,6 +1062,7 @@ impl ConnectionOutbox

{ let _nr_input = match self.pending_message_fd { Some(ref mut message_fd) => { // consume from message-writer until we're out of data + // TODO: make this configurable let mut buf = [0u8; 8192]; let nr_input = match message_fd.read(&mut buf) { Ok(0) => { From 8bf6c586f0388a74aa1cba67712e0481b13d1aff Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:54:46 -0400 Subject: [PATCH 027/107] refactor: expose getter for peer stackerdbs --- stackslib/src/net/db.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 90bbf9c969..a11627a638 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -62,7 +62,6 @@ use crate::net::asn::ASEntry4; use crate::net::Neighbor; use crate::net::NeighborAddress; use crate::net::NeighborKey; -use crate::net::PeerAddress; use crate::net::ServiceFlags; use crate::burnchains::PrivateKey; @@ -72,16 +71,12 @@ use crate::core::NETWORK_P2P_PORT; use crate::util_lib::strings::UrlString; +use stacks_common::types::net::PeerAddress; + pub const PEERDB_VERSION: &'static str = "2"; const NUM_SLOTS: usize = 8; -impl PeerAddress { - pub fn to_bin(&self) -> String { - to_bin(&self.0) - } -} - impl FromColumn for PeerAddress { fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { let addrbytes_bin: String = row.get_unwrap(column_name); @@ -1360,7 +1355,7 @@ impl PeerDB { } /// Get a peer's advertized stacker DBs - fn static_get_peer_stacker_dbs( + pub fn static_get_peer_stacker_dbs( conn: &Connection, neighbor: &Neighbor, ) -> Result, db_error> { @@ -1834,7 +1829,7 @@ mod test { use super::*; use crate::net::Neighbor; use crate::net::NeighborKey; - use crate::net::PeerAddress; + use stacks_common::types::net::PeerAddress; use stacks_common::types::chainstate::StacksAddress; From 1bcc75db5e1e8e8c3a1778b6c2afc5928bf233cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:55:09 -0400 Subject: [PATCH 028/107] chore: API sync --- stackslib/src/net/dns.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index f342cf12ef..ac99ee5d41 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -29,7 +29,6 @@ use crate::net::asn::ASEntry4; use crate::net::Error as net_error; use crate::net::Neighbor; use crate::net::NeighborKey; -use crate::net::PeerAddress; use crate::net::codec::*; use crate::net::*; @@ -43,6 +42,7 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; From 0273bbdd79e775604d77444d0518222bc7a13c6b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:55:25 -0400 Subject: [PATCH 029/107] refactor: use new StacksHttpRequest and StacksHttpResponse types for block downloader --- stackslib/src/net/download.rs | 224 +++++++++++++++------------------- 1 file changed, 98 insertions(+), 126 deletions(-) diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 1695b11f0b..b1deee2746 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -42,6 +42,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as chainstate_error; use crate::chainstate::stacks::StacksBlockHeader; +use crate::core::mempool::MemPoolDB; use crate::core::EMPTY_MICROBLOCK_PARENT_HASH; use crate::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use crate::core::FIRST_STACKS_BLOCK_HASH; @@ -53,6 +54,8 @@ use crate::net::connection::ReplyHandleHttp; use crate::net::db::PeerDB; use crate::net::db::*; use crate::net::dns::*; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::inv::InvState; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::net::p2p::PeerNetwork; @@ -62,13 +65,13 @@ use crate::net::Error as net_error; use crate::net::GetBlocksInv; use crate::net::Neighbor; use crate::net::NeighborKey; -use crate::net::PeerAddress; use crate::net::StacksMessage; use crate::net::StacksP2P; use crate::net::*; use crate::types::chainstate::StacksBlockId; use crate::util_lib::db::DBConn; use crate::util_lib::db::Error as db_error; +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; @@ -147,6 +150,28 @@ impl BlockRequestKey { canonical_stacks_tip_height, } } + + /// Make a request for a block + fn make_getblock_request(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + format!("/v2/blocks/{}", &self.index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to create HTTP request for infallible data") + } + + /// Make a request for a stream of confirmed microblocks + fn make_confirmed_microblocks_request(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + format!("/v2/microblocks/confirmed/{}", &self.index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to create HTTP request for infallible data") + } } impl Requestable for BlockRequestKey { @@ -154,20 +179,11 @@ impl Requestable for BlockRequestKey { &self.data_url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { match self.kind { - BlockRequestKeyKind::Block => HttpRequestType::GetBlock( - HttpRequestMetadata::from_host(peer_host, Some(self.canonical_stacks_tip_height)), - self.index_block_hash, - ), + BlockRequestKeyKind::Block => self.make_getblock_request(peer_host), BlockRequestKeyKind::ConfirmedMicroblockStream => { - HttpRequestType::GetMicroblocksConfirmed( - HttpRequestMetadata::from_host( - peer_host, - Some(self.canonical_stacks_tip_height), - ), - self.index_block_hash, - ) + self.make_confirmed_microblocks_request(peer_host) } } } @@ -499,47 +515,44 @@ impl BlockDownloader { debug!("Event {} ({:?}, {:?} for block {}) is still waiting for a response", event_id, &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); pending_block_requests.insert(block_key, event_id); } - Some(http_response) => match http_response { - HttpResponseType::Block(_md, block) => { - if StacksBlockHeader::make_index_block_hash( - &block_key.consensus_hash, - &block.block_hash(), - ) != block_key.index_block_hash - { - info!("Invalid block from {:?} ({:?}): did not ask for block {}/{}", &block_key.neighbor, &block_key.data_url, block_key.consensus_hash, block.block_hash()); + Some(http_response) => { + match StacksHttpResponse::decode_block(http_response) { + Ok(block) => { + if StacksBlockHeader::make_index_block_hash( + &block_key.consensus_hash, + &block.block_hash(), + ) != block_key.index_block_hash + { + info!("Invalid block from {:?} ({:?}): did not ask for block {}/{}", &block_key.neighbor, &block_key.data_url, block_key.consensus_hash, block.block_hash()); + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } else { + // got the block + debug!( + "Got block {}: {}/{}", + &block_key.sortition_height, + &block_key.consensus_hash, + block.block_hash() + ); + self.blocks.insert(block_key, block); + } + } + Err(net_error::NotFoundError) => { + // remote peer didn't have the block + info!("Remote neighbor {:?} ({:?}) does not actually have block {} indexed at {} ({})", &block_key.neighbor, &block_key.data_url, block_key.sortition_height, &block_key.index_block_hash, &block_key.consensus_hash); + + // the fact that we asked this peer means that it's block inv indicated + // it was present, so the absence is the mark of a broken peer + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } + Err(e) => { + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); - } else { - // got the block - debug!( - "Got block {}: {}/{}", - &block_key.sortition_height, - &block_key.consensus_hash, - block.block_hash() - ); - self.blocks.insert(block_key, block); } } - // TODO: redirect? - HttpResponseType::NotFound(_, _) => { - // remote peer didn't have the block - info!("Remote neighbor {:?} ({:?}) does not actually have block {} indexed at {} ({})", &block_key.neighbor, &block_key.data_url, block_key.sortition_height, &block_key.index_block_hash, &block_key.consensus_hash); - - // the fact that we asked this peer means that it's block inv indicated - // it was present, so the absence is the mark of a broken peer - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - _ => { - // wrong message response - info!( - "Got bad HTTP response from {:?}: {:?}", - &block_key.data_url, &http_response - ); - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - }, + } } } } @@ -633,45 +646,45 @@ impl BlockDownloader { debug!("Event {} ({:?}, {:?} for microblocks built by {:?}) is still waiting for a response", event_id, &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); pending_microblock_requests.insert(rh_block_key, event_id); } - Some(http_response) => match http_response { - HttpResponseType::Microblocks(_md, microblocks) => { - if microblocks.len() == 0 { - // we wouldn't have asked for a 0-length stream - info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + Some(http_response) => { + match StacksHttpResponse::decode_microblocks(http_response) { + Ok(microblocks) => { + if microblocks.len() == 0 { + // we wouldn't have asked for a 0-length stream + info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } else { + // have microblocks (but we don't know yet if they're well-formed) + debug!( + "Got (tentative) microblocks {}: {}/{}-{}", + block_key.sortition_height, + &block_key.consensus_hash, + &block_key.index_block_hash, + microblocks[0].block_hash() + ); + self.microblocks.insert(block_key, microblocks); + } + } + Err(net_error::NotFoundError) => { + // remote peer didn't have the microblock, even though their blockinv said + // they did. + info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); + + // the fact that we asked this peer means that it's block inv indicated + // it was present, so the absence is the mark of a broken peer. + // HOWEVER, there has been some bugs recently about nodes reporting + // invalid microblock streams as present, even though they are + // truly absent. Don't punish these peers with a ban; just don't + // talk to them for a while. + } + Err(e) => { + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); - } else { - // have microblocks (but we don't know yet if they're well-formed) - debug!( - "Got (tentative) microblocks {}: {}/{}-{}", - block_key.sortition_height, - &block_key.consensus_hash, - &block_key.index_block_hash, - microblocks[0].block_hash() - ); - self.microblocks.insert(block_key, microblocks); } } - // TODO: redirect? - HttpResponseType::NotFound(_, _) => { - // remote peer didn't have the microblock, even though their blockinv said - // they did. - info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); - - // the fact that we asked this peer means that it's block inv indicated - // it was present, so the absence is the mark of a broken peer. - // HOWEVER, there has been some bugs recently about nodes reporting - // invalid microblock streams as present, even though they are - // truly absent. Don't punish these peers with a ban; just don't - // talk to them for a while. - } - _ => { - // wrong message response - info!("Got bad HTTP response from {:?}", &block_key.data_url); - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - }, + } } } } @@ -1917,47 +1930,6 @@ impl PeerNetwork { }) } - /// Send a (non-blocking) HTTP request to a remote peer. - /// Returns the event ID on success. - pub fn connect_or_send_http_request( - &mut self, - data_url: UrlString, - addr: SocketAddr, - request: HttpRequestType, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result { - PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { - PeerNetwork::with_http(network, |ref mut network, ref mut http| { - match http.connect_http( - network_state, - network, - data_url.clone(), - addr.clone(), - Some(request.clone()), - ) { - Ok(event_id) => Ok(event_id), - Err(net_error::AlreadyConnected(event_id, _)) => { - match http.get_conversation_and_socket(event_id) { - (Some(ref mut convo), Some(ref mut socket)) => { - convo.send_request(request)?; - HttpPeer::saturate_http_socket(socket, convo, mempool, chainstate)?; - Ok(event_id) - } - (_, _) => { - debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); - Err(net_error::PeerNotConnected) - } - } - } - Err(e) => { - return Err(e); - } - } - }) - }) - } - /// Start a request, given the list of request keys to consider. Use the given request_factory to /// create the HTTP request. Pops requests off the front of request_keys, and returns once it successfully /// sends out a request via the HTTP peer. Returns the event ID in the http peer that's From 4e41c2311eb87dcd481a3ff5310efdb2530b6d08 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 15:55:47 -0400 Subject: [PATCH 030/107] refactor: bye-byte http.rs! --- stackslib/src/net/http.rs | 6765 ------------------------------------- 1 file changed, 6765 deletions(-) delete mode 100644 stackslib/src/net/http.rs diff --git a/stackslib/src/net/http.rs b/stackslib/src/net/http.rs deleted file mode 100644 index c55586356f..0000000000 --- a/stackslib/src/net/http.rs +++ /dev/null @@ -1,6765 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::fmt; -use std::io; -use std::io::prelude::*; -use std::io::{Read, Write}; -use std::mem; -use std::net::SocketAddr; -use std::str; -use std::str::FromStr; -use std::time::SystemTime; - -use clarity::vm::representations::MAX_STRING_LEN; -use percent_encoding::percent_decode_str; -use regex::{Captures, Regex}; -use serde::{Deserialize, Serialize}; -use serde_json; -use time; -use url::{form_urlencoded, Url}; - -use libstackerdb::STACKERDB_MAX_CHUNK_SIZE; - -use crate::burnchains::{Address, Txid}; -use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::stacks::{ - StacksBlock, StacksMicroblock, StacksPublicKey, StacksTransaction, -}; -use crate::net::atlas::Attachment; -use crate::net::ClientError; -use crate::net::Error as net_error; -use crate::net::Error::ClarityError; -use crate::net::ExtendedStacksHeader; -use crate::net::HttpContentType; -use crate::net::HttpRequestMetadata; -use crate::net::HttpRequestPreamble; -use crate::net::HttpRequestType; -use crate::net::HttpResponseMetadata; -use crate::net::HttpResponsePreamble; -use crate::net::HttpResponseType; -use crate::net::HttpVersion; -use crate::net::MemPoolSyncData; -use crate::net::MessageSequence; -use crate::net::NeighborAddress; -use crate::net::PeerAddress; -use crate::net::PeerHost; -use crate::net::ProtocolFamily; -use crate::net::StackerDBChunkData; -use crate::net::StacksHttpMessage; -use crate::net::StacksHttpPreamble; -use crate::net::UnconfirmedTransactionResponse; -use crate::net::UnconfirmedTransactionStatus; -use crate::net::HTTP_PREAMBLE_MAX_ENCODED_SIZE; -use crate::net::HTTP_PREAMBLE_MAX_NUM_HEADERS; -use crate::net::HTTP_REQUEST_ID_RESERVED; -use crate::net::MAX_HEADERS; -use crate::net::MAX_MICROBLOCKS_UNCONFIRMED; -use crate::net::{CallReadOnlyRequestBody, TipRequest}; -use crate::net::{GetAttachmentResponse, GetAttachmentsInvResponse, PostTransactionRequestBody}; -use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; -use clarity::vm::types::{StandardPrincipalData, TraitIdentifier}; -use clarity::vm::{ - representations::{ - CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, - }, - types::{PrincipalData, QualifiedContractIdentifier, BOUND_VALUE_SERIALIZATION_HEX}, - ClarityName, ContractName, Value, -}; -use stacks_common::util::hash::hex_bytes; -use stacks_common::util::hash::to_hex; -use stacks_common::util::hash::Hash160; -use stacks_common::util::log; -use stacks_common::util::retry::BoundReader; -use stacks_common::util::retry::RetryReader; - -use stacks_common::deps_common::httparse; - -use stacks_common::util::chunked_encoding::*; - -use crate::chainstate::stacks::StacksBlockHeader; -use crate::chainstate::stacks::TransactionPayload; -use crate::codec::{ - read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, - MAX_PAYLOAD_LEN, -}; -use crate::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; - -use super::FeeRateEstimateRequestBody; - -lazy_static! { - static ref PATH_GETINFO: Regex = Regex::new(r#"^/v2/info$"#).unwrap(); - static ref PATH_GETPOXINFO: Regex = Regex::new(r#"^/v2/pox$"#).unwrap(); - static ref PATH_GETNEIGHBORS: Regex = Regex::new(r#"^/v2/neighbors$"#).unwrap(); - static ref PATH_GETHEADERS: Regex = Regex::new(r#"^/v2/headers/([0-9]+)$"#).unwrap(); - static ref PATH_GETBLOCK: Regex = Regex::new(r#"^/v2/blocks/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_INDEXED: Regex = - Regex::new(r#"^/v2/microblocks/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_CONFIRMED: Regex = - Regex::new(r#"^/v2/microblocks/confirmed/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_UNCONFIRMED: Regex = - Regex::new(r#"^/v2/microblocks/unconfirmed/([0-9a-f]{64})/([0-9]{1,5})$"#).unwrap(); - static ref PATH_GETTRANSACTION_UNCONFIRMED: Regex = - Regex::new(r#"^/v2/transactions/unconfirmed/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_POSTTRANSACTION: Regex = Regex::new(r#"^/v2/transactions$"#).unwrap(); - static ref PATH_POST_FEE_RATE_ESIMATE: Regex = Regex::new(r#"^/v2/fees/transaction$"#).unwrap(); - static ref PATH_POSTBLOCK: Regex = Regex::new(r#"^/v2/blocks/upload/([0-9a-f]{40})$"#).unwrap(); - static ref PATH_POSTMICROBLOCK: Regex = Regex::new(r#"^/v2/microblocks$"#).unwrap(); - static ref PATH_GET_ACCOUNT: Regex = Regex::new(&format!( - "^/v2/accounts/(?P{})$", - *PRINCIPAL_DATA_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_DATA_VAR: Regex = Regex::new(&format!( - "^/v2/data_var/(?P

{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONSTANT_VAL: Regex = Regex::new(&format!( - "^/v2/constant_val/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_MAP_ENTRY: Regex = Regex::new(&format!( - "^/v2/map_entry/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_POST_CALL_READ_ONLY: Regex = Regex::new(&format!( - "^/v2/contracts/call-read/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONTRACT_SRC: Regex = Regex::new(&format!( - "^/v2/contracts/source/(?P
{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_IS_TRAIT_IMPLEMENTED: Regex = Regex::new(&format!( - "^/v2/traits/(?P
{})/(?P{})/(?P{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONTRACT_ABI: Regex = Regex::new(&format!( - "^/v2/contracts/interface/(?P
{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_TRANSFER_COST: Regex = Regex::new("^/v2/fees/transfer$").unwrap(); - static ref PATH_GET_ATTACHMENTS_INV: Regex = Regex::new("^/v2/attachments/inv$").unwrap(); - static ref PATH_GET_ATTACHMENT: Regex = - Regex::new(r#"^/v2/attachments/([0-9a-f]{40})$"#).unwrap(); - static ref PATH_POST_MEMPOOL_QUERY: Regex = - Regex::new(r#"^/v2/mempool/query$"#).unwrap(); - static ref PATH_GET_STACKERDB_METADATA: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_GET_STACKERDB_CHUNK: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_GET_STACKERDB_VERSIONED_CHUNK: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)/(?P[0-9]+)$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_POST_STACKERDB_CHUNK: Regex = - Regex::new(&format!( - r#"/v2/stackerdb/(?P
{})/(?P{})/chunks$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_OPTIONS_WILDCARD: Regex = Regex::new("^/v2/.{0,4096}$").unwrap(); -} - -/// HTTP headers that we really care about -#[derive(Debug, Clone, PartialEq)] -pub(crate) enum HttpReservedHeader { - ContentLength(u32), - ContentType(HttpContentType), - XRequestID(u32), - Host(PeerHost), - CanonicalStacksTipHeight(u64), -} - -/// Stacks block accepted struct -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct StacksBlockAcceptedData { - pub stacks_block_id: StacksBlockId, - pub accepted: bool, -} - -impl FromStr for PeerHost { - type Err = net_error; - - fn from_str(header: &str) -> Result { - // we're looser than the RFC allows for DNS names -- anything that doesn't parse to an IP - // address will be parsed to a DNS name. - // try as IP:port - match header.parse::() { - Ok(socketaddr) => Ok(PeerHost::IP( - PeerAddress::from_socketaddr(&socketaddr), - socketaddr.port(), - )), - Err(_) => { - // maybe missing :port - let hostport = format!("{}:80", header); - match hostport.parse::() { - Ok(socketaddr) => Ok(PeerHost::IP( - PeerAddress::from_socketaddr(&socketaddr), - socketaddr.port(), - )), - Err(_) => { - // try as DNS-name:port - let host; - let port; - let parts: Vec<&str> = header.split(":").collect(); - if parts.len() == 0 { - return Err(net_error::DeserializeError( - "Failed to parse PeerHost: no parts".to_string(), - )); - } else if parts.len() == 1 { - // no port - host = Some(parts[0].to_string()); - port = Some(80); - } else { - let np = parts.len(); - if parts[np - 1].chars().all(char::is_numeric) { - // ends in :port - let host_str = parts[0..np - 1].join(":"); - if host_str.len() == 0 { - return Err(net_error::DeserializeError( - "Empty host".to_string(), - )); - } - host = Some(host_str); - - let port_res = parts[np - 1].parse::(); - port = match port_res { - Ok(p) => Some(p), - Err(_) => { - return Err(net_error::DeserializeError( - "Failed to parse PeerHost: invalid port".to_string(), - )); - } - }; - } else { - // only host - host = Some(header.to_string()); - port = Some(80); - } - } - - match (host, port) { - (Some(h), Some(p)) => Ok(PeerHost::DNS(h, p)), - (_, _) => Err(net_error::DeserializeError( - "Failed to parse PeerHost: failed to extract host and/or port" - .to_string(), - )), // I don't think this is reachable - } - } - } - } - } - } -} - -impl HttpReservedHeader { - pub fn is_reserved(header: &str) -> bool { - let hdr = header.to_string(); - match hdr.as_str() { - "content-length" - | "content-type" - | "x-request-id" - | "host" - | "x-canonical-stacks-tip-height" => true, - _ => false, - } - } - - pub fn try_from_str(header: &str, value: &str) -> Option { - let hdr = header.to_string().to_lowercase(); - match hdr.as_str() { - "content-length" => match value.parse::() { - Ok(cl) => Some(HttpReservedHeader::ContentLength(cl)), - Err(_) => None, - }, - "content-type" => match value.parse::() { - Ok(ct) => Some(HttpReservedHeader::ContentType(ct)), - Err(_) => None, - }, - "x-request-id" => match value.parse::() { - Ok(rid) => Some(HttpReservedHeader::XRequestID(rid)), - Err(_) => None, - }, - "host" => match value.parse::() { - Ok(ph) => Some(HttpReservedHeader::Host(ph)), - Err(_) => None, - }, - "x-canonical-stacks-tip-height" => match value.parse::() { - Ok(h) => Some(HttpReservedHeader::CanonicalStacksTipHeight(h)), - Err(_) => None, - }, - _ => None, - } - } -} - -impl HttpRequestPreamble { - pub fn new( - version: HttpVersion, - verb: String, - path: String, - hostname: String, - port: u16, - keep_alive: bool, - ) -> HttpRequestPreamble { - HttpRequestPreamble { - version: version, - verb: verb, - path: path, - host: PeerHost::from_host_port(hostname, port), - content_type: None, - content_length: None, - keep_alive: keep_alive, - headers: HashMap::new(), - } - } - - pub fn new_serialized( - fd: &mut W, - version: &HttpVersion, - verb: &str, - path: &str, - host: &PeerHost, - keep_alive: bool, - content_length: Option, - content_type: Option<&HttpContentType>, - mut write_headers: F, - ) -> Result<(), codec_error> - where - F: FnMut(&mut W) -> Result<(), codec_error>, - { - // "$verb $path HTTP/1.${version}\r\n" - fd.write_all(verb.as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(" ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(path.as_bytes()) - .map_err(codec_error::WriteError)?; - - match *version { - HttpVersion::Http10 => { - fd.write_all(" HTTP/1.0\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - HttpVersion::Http11 => { - fd.write_all(" HTTP/1.1\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - - // "User-Agent: $agent\r\nHost: $host\r\n" - fd.write_all("User-Agent: stacks/2.0\r\nHost: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", host).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - - // content-type - match content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(c.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => {} - } - - // content-length - match content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => {} - } - - match *version { - HttpVersion::Http10 => { - if keep_alive { - fd.write_all("Connection: keep-alive\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - HttpVersion::Http11 => { - if !keep_alive { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - } - - // headers - write_headers(fd)?; - - // end-of-headers - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - Ok(()) - } - - #[cfg(test)] - pub fn from_headers( - version: HttpVersion, - verb: String, - path: String, - hostname: String, - port: u16, - keep_alive: bool, - mut keys: Vec, - values: Vec, - ) -> HttpRequestPreamble { - assert_eq!(keys.len(), values.len()); - let mut req = HttpRequestPreamble::new(version, verb, path, hostname, port, keep_alive); - - for (k, v) in keys.drain(..).zip(values) { - req.add_header(k, v); - } - req - } - - pub fn add_header(&mut self, key: String, value: String) -> () { - let hdr = key.to_lowercase(); - if HttpReservedHeader::is_reserved(&hdr) { - match HttpReservedHeader::try_from_str(&hdr, &value) { - Some(h) => match h { - HttpReservedHeader::Host(ph) => { - self.host = ph; - return; - } - HttpReservedHeader::ContentType(ct) => { - self.content_type = Some(ct); - return; - } - _ => {} // can just fall through and insert - }, - None => { - return; - } - } - } - - self.headers.insert(hdr, value); - } - - /// Content-Length for this request. - /// If there is no valid Content-Length header, then - /// the Content-Length is 0 - pub fn get_content_length(&self) -> u32 { - self.content_length.unwrap_or(0) - } - - /// Set the content-length for this request - pub fn set_content_length(&mut self, len: u32) -> () { - self.content_length = Some(len); - } - - /// Set the content-type for this request - pub fn set_content_type(&mut self, content_type: HttpContentType) -> () { - self.content_type = Some(content_type) - } -} - -fn empty_headers(_fd: &mut W) -> Result<(), codec_error> { - Ok(()) -} - -fn stacks_height_headers( - fd: &mut W, - md: &HttpRequestMetadata, -) -> Result<(), codec_error> { - match md.canonical_stacks_tip_height { - Some(height) => { - fd.write_all(format!("X-Canonical-Stacks-Tip-Height: {}\r\n", height).as_bytes()) - .map_err(codec_error::WriteError)?; - } - _ => {} - } - Ok(()) -} - -fn keep_alive_headers(fd: &mut W, md: &HttpResponseMetadata) -> Result<(), codec_error> { - match md.client_version { - HttpVersion::Http10 => { - // client expects explicit keep-alive - if md.client_keep_alive { - fd.write_all("Connection: keep-alive\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } else { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - HttpVersion::Http11 => { - // only need "connection: close" if we're explicitly _not_ doing keep-alive - if !md.client_keep_alive { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - } - match md.canonical_stacks_tip_height { - Some(height) => { - fd.write_all(format!("X-Canonical-Stacks-Tip-Height: {}\r\n", height).as_bytes()) - .map_err(codec_error::WriteError)?; - } - _ => {} - } - Ok(()) -} - -fn write_headers( - fd: &mut W, - headers: &HashMap, -) -> Result<(), codec_error> { - for (ref key, ref value) in headers.iter() { - fd.write_all(key.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(": ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(value.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - Ok(()) -} - -fn default_accept_header() -> String { - format!( - "Accept: {}, {}, {}", - HttpContentType::Bytes, - HttpContentType::JSON, - HttpContentType::Text - ) -} - -/// Read from a stream until we see '\r\n\r\n', with the purpose of reading an HTTP preamble. -/// It's gonna be important here that R does some bufferring, since this reads byte by byte. -/// EOF if we read 0 bytes. -fn read_to_crlf2(fd: &mut R) -> Result, codec_error> { - let mut ret = Vec::with_capacity(HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize); - while ret.len() < HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - let mut b = [0u8]; - fd.read_exact(&mut b).map_err(codec_error::ReadError)?; - ret.push(b[0]); - - if ret.len() > 4 { - let last_4 = &ret[(ret.len() - 4)..ret.len()]; - - // '\r\n\r\n' is [0x0d, 0x0a, 0x0d, 0x0a] - if last_4 == &[0x0d, 0x0a, 0x0d, 0x0a] { - break; - } - } - } - Ok(ret) -} - -impl StacksMessageCodec for HttpRequestPreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - HttpRequestPreamble::new_serialized( - fd, - &self.version, - &self.verb, - &self.path, - &self.host, - self.keep_alive, - self.content_length.clone(), - self.content_type.as_ref(), - |ref mut fd| write_headers(fd, &self.headers), - ) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // realistically, there won't be more than HTTP_PREAMBLE_MAX_NUM_HEADERS headers - let mut headers = [httparse::EMPTY_HEADER; HTTP_PREAMBLE_MAX_NUM_HEADERS]; - let mut req = httparse::Request::new(&mut headers); - - let buf_read = read_to_crlf2(fd)?; - - // consume request - match req.parse(&buf_read).map_err(|e| { - codec_error::DeserializeError(format!("Failed to parse HTTP request: {:?}", &e)) - })? { - httparse::Status::Partial => { - // partial - return Err(codec_error::UnderflowError( - "Not enough bytes to form a HTTP request preamble".to_string(), - )); - } - httparse::Status::Complete(_) => { - // consumed all headers. body_offset points to the start of the request body - let version = match req - .version - .ok_or(codec_error::DeserializeError("No HTTP version".to_string()))? - { - 0 => HttpVersion::Http10, - 1 => HttpVersion::Http11, - _ => { - return Err(codec_error::DeserializeError( - "Invalid HTTP version".to_string(), - )); - } - }; - - let verb = req - .method - .ok_or(codec_error::DeserializeError("No HTTP method".to_string()))? - .to_string(); - let path = req - .path - .ok_or(codec_error::DeserializeError("No HTTP path".to_string()))? - .to_string(); - - let mut peerhost = None; - let mut content_type = None; - let mut content_length = None; - let mut keep_alive = match version { - HttpVersion::Http10 => false, - HttpVersion::Http11 => true, - }; - - let mut headers: HashMap = HashMap::new(); - let mut all_headers: HashSet = HashSet::new(); - - for i in 0..req.headers.len() { - let value = String::from_utf8(req.headers[i].value.to_vec()).map_err(|_e| { - codec_error::DeserializeError( - "Invalid HTTP header value: not utf-8".to_string(), - ) - })?; - if !value.is_ascii() { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is not ASCII-US" - ))); - } - if value.len() > HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is too big" - ))); - } - - let key = req.headers[i].name.to_string().to_lowercase(); - if headers.contains_key(&key) || all_headers.contains(&key) { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: duplicate header \"{}\"", - key - ))); - } - all_headers.insert(key.clone()); - - if key == "host" { - peerhost = match value.parse::() { - Ok(ph) => Some(ph), - Err(_) => None, - }; - } else if key == "content-type" { - // parse - let ctype = value.to_lowercase().parse::()?; - content_type = Some(ctype); - } else if key == "content-length" { - // parse - content_length = match value.parse::() { - Ok(len) => Some(len), - Err(_) => None, - } - } else if key == "connection" { - // parse - if value.to_lowercase() == "close" { - keep_alive = false; - } else if value.to_lowercase() == "keep-alive" { - keep_alive = true; - } else { - return Err(codec_error::DeserializeError( - "Inavlid HTTP request: invalid Connection: header".to_string(), - )); - } - } else { - headers.insert(key, value); - } - } - - if peerhost.is_none() { - return Err(codec_error::DeserializeError( - "Missing Host header".to_string(), - )); - }; - - Ok(HttpRequestPreamble { - version: version, - verb: verb, - path: path, - host: peerhost.unwrap(), - content_type: content_type, - content_length: content_length, - keep_alive: keep_alive, - headers: headers, - }) - } - } - } -} - -impl HttpResponsePreamble { - pub fn new( - status_code: u16, - reason: String, - content_length_opt: Option, - content_type: HttpContentType, - keep_alive: bool, - request_id: u32, - ) -> HttpResponsePreamble { - HttpResponsePreamble { - status_code: status_code, - reason: reason, - keep_alive: keep_alive, - content_length: content_length_opt, - content_type: content_type, - request_id: request_id, - headers: HashMap::new(), - } - } - - pub fn ok_JSON_from_md( - fd: &mut W, - md: &HttpResponseMetadata, - ) -> Result<(), codec_error> { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - ) - } - - pub fn new_serialized( - fd: &mut W, - status_code: u16, - reason: &str, - content_length: Option, - content_type: &HttpContentType, - request_id: u32, - mut write_headers: F, - ) -> Result<(), codec_error> - where - F: FnMut(&mut W) -> Result<(), codec_error>, - { - fd.write_all("HTTP/1.1 ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{} {}\r\n", status_code, reason).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("Server: stacks/2.0\r\nDate: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(rfc7231_now().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Origin: *".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Headers: origin, content-type".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Methods: POST, GET, OPTIONS".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nContent-Type: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(content_type.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - - match content_length { - Some(len) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", len).as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => { - fd.write_all("Transfer-Encoding: chunked".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - - fd.write_all("\r\nX-Request-Id: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}\r\n", request_id).as_bytes()) - .map_err(codec_error::WriteError)?; - - write_headers(fd)?; - - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - Ok(()) - } - - pub fn new_error( - status_code: u16, - request_id: u32, - error_message: Option, - ) -> HttpResponsePreamble { - HttpResponsePreamble { - status_code: status_code, - keep_alive: true, - reason: HttpResponseType::error_reason(status_code).to_string(), - content_length: Some(error_message.unwrap_or("".to_string()).len() as u32), - content_type: HttpContentType::Text, - request_id: request_id, - headers: HashMap::new(), - } - } - - #[cfg(test)] - pub fn from_headers( - status_code: u16, - reason: String, - keep_alive: bool, - content_length: Option, - content_type: HttpContentType, - request_id: u32, - mut keys: Vec, - values: Vec, - ) -> HttpResponsePreamble { - assert_eq!(keys.len(), values.len()); - let mut res = HttpResponsePreamble::new( - status_code, - reason, - content_length, - content_type, - keep_alive, - request_id, - ); - - for (k, v) in keys.drain(..).zip(values) { - res.add_header(k, v); - } - res.set_request_id(request_id); - res - } - - pub fn add_header(&mut self, key: String, value: String) -> () { - let hdr = key.to_lowercase(); - if HttpReservedHeader::is_reserved(&hdr) { - match HttpReservedHeader::try_from_str(&hdr, &value) { - Some(h) => match h { - HttpReservedHeader::XRequestID(rid) => { - self.request_id = rid; - return; - } - HttpReservedHeader::ContentLength(cl) => { - self.content_length = Some(cl); - return; - } - HttpReservedHeader::ContentType(ct) => { - self.content_type = ct; - return; - } - _ => {} // can just fall through and insert - }, - None => { - return; - } - } - } - - self.headers.insert(hdr, value); - } - - pub fn set_request_id(&mut self, request_id: u32) -> () { - self.request_id = request_id; - } - - pub fn add_CORS_headers(&mut self) -> () { - self.headers - .insert("Access-Control-Allow-Origin".to_string(), "*".to_string()); - } - - // do we have Transfer-Encoding: chunked? - pub fn is_chunked(&self) -> bool { - self.content_length.is_none() - } -} - -/// Get an RFC 7231 date that represents the current time -fn rfc7231_now() -> String { - let now = time::PrimitiveDateTime::from(SystemTime::now()); - now.format("%a, %b %-d %-Y %-H:%M:%S GMT") -} - -impl StacksMessageCodec for HttpResponsePreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - HttpResponsePreamble::new_serialized( - fd, - self.status_code, - &self.reason, - self.content_length, - &self.content_type, - self.request_id, - |ref mut fd| write_headers(fd, &self.headers), - ) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // realistically, there won't be more than HTTP_PREAMBLE_MAX_NUM_HEADERS headers - let mut headers = [httparse::EMPTY_HEADER; HTTP_PREAMBLE_MAX_NUM_HEADERS]; - let mut resp = httparse::Response::new(&mut headers); - - let buf_read = read_to_crlf2(fd)?; - - // consume response - match resp.parse(&buf_read).map_err(|e| { - codec_error::DeserializeError(format!("Failed to parse HTTP response: {:?}", &e)) - })? { - httparse::Status::Partial => { - // try again - return Err(codec_error::UnderflowError( - "Not enough bytes to form a HTTP response preamble".to_string(), - )); - } - httparse::Status::Complete(_) => { - // consumed all headers. body_offset points to the start of the response body - let _ = resp - .version - .ok_or(codec_error::DeserializeError("No HTTP version".to_string()))?; - let status_code = resp.code.ok_or(codec_error::DeserializeError( - "No HTTP status code".to_string(), - ))?; - let reason = resp - .reason - .ok_or(codec_error::DeserializeError( - "No HTTP status reason".to_string(), - ))? - .to_string(); - - let mut headers: HashMap = HashMap::new(); - let mut all_headers: HashSet = HashSet::new(); - - let mut content_type = None; - let mut content_length = None; - let mut request_id = None; - let mut chunked_encoding = false; - let mut keep_alive = true; - - for i in 0..resp.headers.len() { - let value = - String::from_utf8(resp.headers[i].value.to_vec()).map_err(|_e| { - codec_error::DeserializeError( - "Invalid HTTP header value: not utf-8".to_string(), - ) - })?; - if !value.is_ascii() { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is not ASCII-US" - ))); - } - if value.len() > HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is too big" - ))); - } - - let key = resp.headers[i].name.to_string().to_lowercase(); - if headers.contains_key(&key) || all_headers.contains(&key) { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: duplicate header \"{}\"", - key - ))); - } - all_headers.insert(key.clone()); - - if key == "content-type" { - let ctype = value.to_lowercase().parse::()?; - content_type = Some(ctype); - } else if key == "content-length" { - let len = value.parse::().map_err(|_e| { - codec_error::DeserializeError( - "Invalid Content-Length header value".to_string(), - ) - })?; - content_length = Some(len); - } else if key == "x-request-id" { - match value.parse::() { - Ok(i) => { - request_id = Some(i); - } - Err(_) => {} - } - } else if key == "connection" { - // parse - if value.to_lowercase() == "close" { - keep_alive = false; - } else if value.to_lowercase() == "keep-alive" { - keep_alive = true; - } else { - return Err(codec_error::DeserializeError( - "Inavlid HTTP request: invalid Connection: header".to_string(), - )); - } - } else if key == "transfer-encoding" { - if value.to_lowercase() == "chunked" { - chunked_encoding = true; - } else { - return Err(codec_error::DeserializeError(format!( - "Unsupported transfer-encoding '{}'", - value - ))); - } - } else { - headers.insert(key, value); - } - } - - if content_length.is_some() && chunked_encoding { - return Err(codec_error::DeserializeError( - "Invalid HTTP response: incompatible transfer-encoding and content-length" - .to_string(), - )); - } - - if content_type.is_none() || (content_length.is_none() && !chunked_encoding) { - return Err(codec_error::DeserializeError( - "Invalid HTTP response: missing Content-Type, Content-Length".to_string(), - )); - } - - Ok(HttpResponsePreamble { - status_code: status_code, - reason: reason, - keep_alive: keep_alive, - content_type: content_type.unwrap(), - content_length: content_length, - request_id: request_id.unwrap_or(HTTP_REQUEST_ID_RESERVED), - headers: headers, - }) - } - } - } -} - -impl HttpRequestType { - fn try_parse( - protocol: &mut StacksHttp, - verb: &str, - regex: &Regex, - preamble: &HttpRequestPreamble, - path: &str, - query: Option<&str>, - fd: &mut R, - parser: F, - ) -> Result, net_error> - where - F: Fn( - &mut StacksHttp, - &HttpRequestPreamble, - &Captures, - Option<&str>, - &mut R, - ) -> Result, - { - if preamble.verb == verb { - if let Some(ref captures) = regex.captures(path) { - let payload = parser(protocol, preamble, captures, query, fd)?; - return Ok(Some(payload)); - } - } - - Ok(None) - } - - pub fn parse( - protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - // TODO: make this static somehow - let REQUEST_METHODS: &[( - &str, - &Regex, - &dyn Fn( - &mut StacksHttp, - &HttpRequestPreamble, - &Captures, - Option<&str>, - &mut R, - ) -> Result, - )] = &[ - ("GET", &PATH_GETINFO, &HttpRequestType::parse_getinfo), - ("GET", &PATH_GETPOXINFO, &HttpRequestType::parse_getpoxinfo), - ( - "GET", - &PATH_GETNEIGHBORS, - &HttpRequestType::parse_getneighbors, - ), - ("GET", &PATH_GETHEADERS, &HttpRequestType::parse_getheaders), - ("GET", &PATH_GETBLOCK, &HttpRequestType::parse_getblock), - ( - "GET", - &PATH_GETMICROBLOCKS_INDEXED, - &HttpRequestType::parse_getmicroblocks_indexed, - ), - ( - "GET", - &PATH_GETMICROBLOCKS_CONFIRMED, - &HttpRequestType::parse_getmicroblocks_confirmed, - ), - ( - "GET", - &PATH_GETMICROBLOCKS_UNCONFIRMED, - &HttpRequestType::parse_getmicroblocks_unconfirmed, - ), - ( - "GET", - &PATH_GETTRANSACTION_UNCONFIRMED, - &HttpRequestType::parse_gettransaction_unconfirmed, - ), - ( - "POST", - &PATH_POST_FEE_RATE_ESIMATE, - &HttpRequestType::parse_post_fee_rate_estimate, - ), - ( - "POST", - &PATH_POSTTRANSACTION, - &HttpRequestType::parse_posttransaction, - ), - ("POST", &PATH_POSTBLOCK, &HttpRequestType::parse_postblock), - ( - "POST", - &PATH_POSTMICROBLOCK, - &HttpRequestType::parse_postmicroblock, - ), - ( - "GET", - &PATH_GET_ACCOUNT, - &HttpRequestType::parse_get_account, - ), - ( - "GET", - &PATH_GET_DATA_VAR, - &HttpRequestType::parse_get_data_var, - ), - ( - "GET", - &PATH_GET_CONSTANT_VAL, - &HttpRequestType::parse_get_constant_val, - ), - ( - "POST", - &PATH_GET_MAP_ENTRY, - &HttpRequestType::parse_get_map_entry, - ), - ( - "GET", - &PATH_GET_TRANSFER_COST, - &HttpRequestType::parse_get_transfer_cost, - ), - ( - "GET", - &PATH_GET_CONTRACT_SRC, - &HttpRequestType::parse_get_contract_source, - ), - ( - "GET", - &PATH_GET_IS_TRAIT_IMPLEMENTED, - &HttpRequestType::parse_get_is_trait_implemented, - ), - ( - "GET", - &PATH_GET_CONTRACT_ABI, - &HttpRequestType::parse_get_contract_abi, - ), - ( - "POST", - &PATH_POST_CALL_READ_ONLY, - &HttpRequestType::parse_call_read_only, - ), - ( - "OPTIONS", - &PATH_OPTIONS_WILDCARD, - &HttpRequestType::parse_options_preflight, - ), - ( - "GET", - &PATH_GET_ATTACHMENT, - &HttpRequestType::parse_get_attachment, - ), - ( - "GET", - &PATH_GET_ATTACHMENTS_INV, - &HttpRequestType::parse_get_attachments_inv, - ), - ( - "POST", - &PATH_POST_MEMPOOL_QUERY, - &HttpRequestType::parse_post_mempool_query, - ), - ( - "GET", - &PATH_GET_STACKERDB_METADATA, - &HttpRequestType::parse_get_stackerdb_metadata, - ), - ( - "GET", - &PATH_GET_STACKERDB_CHUNK, - &HttpRequestType::parse_get_stackerdb_chunk, - ), - ( - "GET", - &PATH_GET_STACKERDB_VERSIONED_CHUNK, - &HttpRequestType::parse_get_stackerdb_versioned_chunk, - ), - ( - "POST", - &PATH_POST_STACKERDB_CHUNK, - &HttpRequestType::parse_post_stackerdb_chunk, - ), - ]; - - // use url::Url to parse path and query string - // Url will refuse to parse just a path, so create a dummy URL - let local_url = format!("http://local{}", &preamble.path); - let url = Url::parse(&local_url).map_err(|_e| { - net_error::DeserializeError("Http request path could not be parsed".to_string()) - })?; - - let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { - net_error::DeserializeError( - "Http request path could not be parsed as UTF-8".to_string(), - ) - })?; - - for (verb, regex, parser) in REQUEST_METHODS.iter() { - match HttpRequestType::try_parse( - protocol, - verb, - regex, - preamble, - &decoded_path, - url.query(), - fd, - parser, - )? { - Some(request) => { - let query = if let Some(q) = url.query() { - format!("?{}", q) - } else { - "".to_string() - }; - info!("Handle HTTPRequest"; "verb" => %verb, "peer_addr" => %protocol.peer_addr, "path" => %decoded_path, "query" => %query); - return Ok(request); - } - None => { - continue; - } - } - } - - let _path = preamble.path.clone(); - test_debug!("Failed to parse '{}'", &_path); - Err(net_error::ClientError(ClientError::NotFound( - preamble.path.clone(), - ))) - } - - fn parse_getinfo( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetInfo".to_string(), - )); - } - Ok(HttpRequestType::GetInfo( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - fn parse_getpoxinfo( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetPoxInfo".to_string(), - )); - } - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetPoxInfo( - HttpRequestMetadata::from_preamble(preamble), - tip, - )) - } - - fn parse_getneighbors( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetNeighbors".to_string(), - )); - } - - Ok(HttpRequestType::GetNeighbors( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - fn parse_get_transfer_cost( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetTransferCost".to_string(), - )); - } - - Ok(HttpRequestType::GetTransferCost( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - /// Check whether the given option query string sets proof=0 (setting proof to false). - /// Defaults to true. - fn get_proof_query(query: Option<&str>) -> bool { - let no_proof = if let Some(query_string) = query { - form_urlencoded::parse(query_string.as_bytes()) - .find(|(key, _v)| key == "proof") - .map(|(_k, value)| value == "0") - .unwrap_or(false) - } else { - false - }; - - !no_proof - } - - /// get the chain tip optional query argument (`tip`) - /// Take the first value we can parse. - fn get_chain_tip_query(query: Option<&str>) -> TipRequest { - match query { - Some(query_string) => { - for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { - if key != "tip" { - continue; - } - - if value == "latest" { - return TipRequest::UseLatestUnconfirmedTip; - } - if let Ok(tip) = StacksBlockId::from_hex(&value) { - return TipRequest::SpecificTip(tip); - } - } - return TipRequest::UseLatestAnchoredTip; - } - None => { - return TipRequest::UseLatestAnchoredTip; - } - } - } - - /// get the mempool page ID optional query argument (`page_id`) - /// Take the first value we can parse. - fn get_mempool_page_id_query(query: Option<&str>) -> Option { - match query { - Some(query_string) => { - for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { - if key != "page_id" { - continue; - } - if let Ok(page_id) = Txid::from_hex(&value) { - return Some(page_id); - } - } - return None; - } - None => { - return None; - } - } - } - - fn parse_get_account( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetAccount".to_string(), - )); - } - - let principal = PrincipalData::parse(&captures["principal"]).map_err(|_e| { - net_error::DeserializeError("Failed to parse account principal".into()) - })?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetAccount( - HttpRequestMetadata::from_preamble(preamble), - principal, - tip, - with_proof, - )) - } - - fn parse_get_data_var( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if content_len != 0 { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetDataVar ({})", - content_len - ))); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let var_name = ClarityName::try_from(captures["varname"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse data var name".into()))?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetDataVar( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - var_name, - tip, - with_proof, - )) - } - - fn parse_get_constant_val( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if content_len != 0 { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetConstantVal ({})", - content_len - ))); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let const_name = - ClarityName::try_from(captures["constname"].to_string()).map_err(|_e| { - net_error::DeserializeError("Failed to parse constant value name".into()) - })?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetConstantVal( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - const_name, - tip, - )) - } - - fn parse_get_map_entry( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < (BOUND_VALUE_SERIALIZATION_HEX)) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetMapEntry ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".into(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let map_name = ClarityName::try_from(captures["map"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse map name".into()))?; - - let value_hex: String = serde_json::from_reader(fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse JSON body".into()))?; - - let value = Value::try_deserialize_hex_untyped(&value_hex) - .map_err(|_e| net_error::DeserializeError("Failed to deserialize key value".into()))?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetMapEntry( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - map_name, - value, - tip, - with_proof, - )) - } - - fn parse_call_read_only( - protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < protocol.maximum_call_argument_size) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for CallReadOnly ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let func_name = ClarityName::try_from(captures["function"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - let body: CallReadOnlyRequestBody = serde_json::from_reader(fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse JSON body".into()))?; - - let sender = PrincipalData::parse(&body.sender) - .map_err(|_e| net_error::DeserializeError("Failed to parse sender principal".into()))?; - - let sponsor = if let Some(sponsor) = body.sponsor { - Some(PrincipalData::parse(&sponsor).map_err(|_e| { - net_error::DeserializeError("Failed to parse sponsor principal".into()) - })?) - } else { - None - }; - - let arguments = body - .arguments - .into_iter() - .map(|hex| Value::try_deserialize_hex_untyped(&hex).ok()) - .collect::>>() - .ok_or_else(|| { - net_error::DeserializeError("Failed to deserialize argument value".into()) - })?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::CallReadOnlyFunction( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - sender, - sponsor, - func_name, - arguments, - tip, - )) - } - - fn parse_get_contract_arguments( - preamble: &HttpRequestPreamble, - captures: &Captures, - ) -> Result<(HttpRequestMetadata, StacksAddress, ContractName), net_error> { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - Ok(( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - )) - } - - fn parse_get_contract_abi( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let tip = HttpRequestType::get_chain_tip_query(query); - HttpRequestType::parse_get_contract_arguments(preamble, captures).map( - |(preamble, addr, name)| HttpRequestType::GetContractABI(preamble, addr, name, tip), - ) - } - - fn parse_get_contract_source( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - HttpRequestType::parse_get_contract_arguments(preamble, captures).map( - |(preamble, addr, name)| { - HttpRequestType::GetContractSrc(preamble, addr, name, tip, with_proof) - }, - ) - } - - fn parse_get_is_trait_implemented( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let tip = HttpRequestType::get_chain_tip_query(query); - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let trait_name = ClarityName::try_from(captures["traitName"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse trait name".into()))?; - let trait_contract_addr = StacksAddress::from_string(&captures["traitContractAddr"]) - .ok_or_else(|| net_error::DeserializeError("Failed to parse contract address".into()))? - .into(); - let trait_contract_name = ContractName::try_from(captures["traitContractName"].to_string()) - .map_err(|_e| { - net_error::DeserializeError("Failed to parse trait contract name".into()) - })?; - let trait_id = TraitIdentifier::new(trait_contract_addr, trait_contract_name, trait_name); - - Ok(HttpRequestType::GetIsTraitImplemented( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - trait_id, - tip, - )) - } - - fn parse_getheaders( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetBlock".to_string(), - )); - } - - let quantity_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to reward cycle group".to_string(), - ))? - .as_str(); - - let quantity: u64 = quantity_str - .parse() - .map_err(|_| net_error::DeserializeError("Failed to parse reward cycle".to_string()))?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetHeaders( - HttpRequestMetadata::from_preamble(preamble), - quantity, - tip, - )) - } - - fn parse_getblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetBlock".to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to block hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str) - .map_err(|_e| net_error::DeserializeError("Failed to parse block hash".to_string()))?; - - Ok(HttpRequestType::GetBlock( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_indexed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicroblocksIndexed" - .to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksIndexed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_confirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocks".to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksConfirmed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_unconfirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocksUnconfirmed" - .to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let min_seq_str = captures - .get(2) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock minimum sequence group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - let min_seq = min_seq_str.parse::().map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock minimum sequence".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksUnconfirmed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - min_seq, - )) - } - - fn parse_gettransaction_unconfirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocksUnconfirmed" - .to_string(), - )); - } - - let txid_hex = regex - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to txid group".to_string(), - ))? - .as_str(); - - if txid_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid txid: expected 64 bytes".to_string(), - )); - } - - let txid = Txid::from_hex(&txid_hex) - .map_err(|_e| net_error::DeserializeError("Failed to decode txid hex".to_string()))?; - - Ok(HttpRequestType::GetTransactionUnconfirmed( - HttpRequestMetadata::from_preamble(preamble), - txid, - )) - } - - fn parse_post_fee_rate_estimate( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < MAX_PAYLOAD_LEN) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for FeeRateEstimate ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let bound_fd = BoundReader::from_reader(fd, content_len as u64); - - let body: FeeRateEstimateRequestBody = serde_json::from_reader(bound_fd).map_err(|e| { - net_error::DeserializeError(format!("Failed to parse JSON body: {}", e)) - })?; - - let payload_hex = if body.transaction_payload.starts_with("0x") { - &body.transaction_payload[2..] - } else { - &body.transaction_payload - }; - - let payload_data = hex_bytes(payload_hex).map_err(|_e| { - net_error::DeserializeError("Bad hex string supplied for transaction payload".into()) - })?; - - let payload = TransactionPayload::consensus_deserialize(&mut payload_data.as_slice()) - .map_err(|e| { - net_error::DeserializeError(format!( - "Failed to deserialize transaction payload: {}", - e - )) - })?; - - let estimated_len = - std::cmp::max(body.estimated_len.unwrap_or(0), payload_data.len() as u64); - - Ok(HttpRequestType::FeeRateEstimate( - HttpRequestMetadata::from_preamble(preamble), - payload, - estimated_len, - )) - } - - fn parse_posttransaction( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostTransaction" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostTransaction body is too big".to_string(), - )); - } - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for transaction".to_string(), - )); - } - Some(HttpContentType::Bytes) => { - HttpRequestType::parse_posttransaction_octets(preamble, &mut bound_fd) - } - Some(HttpContentType::JSON) => { - HttpRequestType::parse_posttransaction_json(preamble, &mut bound_fd) - } - _ => { - return Err(net_error::DeserializeError( - "Wrong Content-Type for transaction; expected application/json".to_string(), - )); - } - } - } - - fn parse_posttransaction_octets( - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - let tx = StacksTransaction::consensus_deserialize(fd).map_err(|e| { - if let codec_error::DeserializeError(msg) = e { - net_error::ClientError(ClientError::Message(format!( - "Failed to deserialize posted transaction: {}", - msg - ))) - } else { - e.into() - } - })?; - Ok(HttpRequestType::PostTransaction( - HttpRequestMetadata::from_preamble(preamble), - tx, - None, - )) - } - - fn parse_posttransaction_json( - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let body: PostTransactionRequestBody = serde_json::from_reader(&mut bound_fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse body".into()))?; - - let tx = { - let tx_bytes = hex_bytes(&body.tx) - .map_err(|_e| net_error::DeserializeError("Failed to parse tx".into()))?; - StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|e| { - if let codec_error::DeserializeError(msg) = e { - net_error::ClientError(ClientError::Message(format!( - "Failed to deserialize posted transaction: {}", - msg - ))) - } else { - e.into() - } - }) - }?; - - let attachment = match body.attachment { - None => None, - Some(attachment_content) => { - let content = hex_bytes(&attachment_content).map_err(|_e| { - net_error::DeserializeError("Failed to parse attachment".into()) - })?; - Some(Attachment::new(content)) - } - }; - - Ok(HttpRequestType::PostTransaction( - HttpRequestMetadata::from_preamble(preamble), - tx, - attachment, - )) - } - - fn parse_postblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostBlock body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for Stacks block".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for Stacks block; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let consensus_hash_str = regex - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match consensus hash in path group".to_string(), - ))? - .as_str(); - - let consensus_hash: ConsensusHash = - ConsensusHash::from_hex(consensus_hash_str).map_err(|_| { - net_error::DeserializeError("Failed to parse consensus hash".to_string()) - })?; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let stacks_block = StacksBlock::consensus_deserialize(&mut bound_fd)?; - - Ok(HttpRequestType::PostBlock( - HttpRequestMetadata::from_preamble(preamble), - consensus_hash, - stacks_block, - )) - } - - fn parse_postmicroblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostMicroblock" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostMicroblock body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for microblock".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for microblock; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - - let mb = StacksMicroblock::consensus_deserialize(&mut bound_fd)?; - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::PostMicroblock( - HttpRequestMetadata::from_preamble(preamble), - mb, - tip, - )) - } - - fn parse_get_attachment( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - let hex_content_hash = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to attachment hash group".to_string(), - ))? - .as_str(); - - let content_hash = Hash160::from_hex(&hex_content_hash).map_err(|_| { - net_error::DeserializeError("Failed to construct hash160 from inputs".to_string()) - })?; - - Ok(HttpRequestType::GetAttachment( - HttpRequestMetadata::from_preamble(preamble), - content_hash, - )) - } - - fn parse_get_attachments_inv( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let (index_block_hash, pages_indexes) = match query { - None => { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting index_block_hash and pages_indexes" - .to_string(), - )); - } - Some(query) => { - let mut index_block_hash = None; - let mut pages_indexes = HashSet::new(); - - for (key, value) in form_urlencoded::parse(query.as_bytes()) { - if key == "index_block_hash" { - index_block_hash = match StacksBlockId::from_hex(&value) { - Ok(index_block_hash) => Some(index_block_hash), - _ => None, - }; - } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(",") { - if let Ok(page_index) = entry.parse::() { - pages_indexes.insert(page_index); - } - } - } - } - } - - let index_block_hash = match index_block_hash { - None => { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting index_block_hash".to_string(), - )); - } - Some(index_block_hash) => index_block_hash, - }; - - if pages_indexes.is_empty() { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting pages_indexes".to_string(), - )); - } - - (index_block_hash, pages_indexes) - } - }; - - Ok(HttpRequestType::GetAttachmentsInv( - HttpRequestMetadata::from_preamble(preamble), - index_block_hash, - pages_indexes, - )) - } - - fn parse_post_mempool_query( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-empty body".to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: MemPoolQuery body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for MemPoolQuery".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for MemPoolQuery; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let mempool_query = MemPoolSyncData::consensus_deserialize(&mut bound_fd)?; - let page_id_opt = HttpRequestType::get_mempool_page_id_query(query); - - Ok(HttpRequestType::MemPoolQuery( - HttpRequestMetadata::from_preamble(preamble), - mempool_query, - page_id_opt, - )) - } - - fn parse_get_stackerdb_metadata( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBMetadata(preamble, contract_id) - }, - ) - } - - fn parse_get_stackerdb_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let slot_id: u32 = regex - .name("slot_id") - .ok_or(net_error::DeserializeError( - "Failed to match slot ID".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| net_error::DeserializeError("Failed to decode slot ID".to_string()))?; - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBChunk(preamble, contract_id, slot_id, None) - }, - ) - } - - fn parse_get_stackerdb_versioned_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let slot_id: u32 = regex - .name("slot_id") - .ok_or(net_error::DeserializeError( - "Failed to match slot ID".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| net_error::DeserializeError("Failed to decode slot ID".to_string()))?; - - let version: u32 = regex - .name("slot_version") - .ok_or(net_error::DeserializeError( - "Failed to match slot version".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| { - net_error::DeserializeError("Failed to decode slot version".to_string()) - })?; - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBChunk(preamble, contract_id, slot_id, Some(version)) - }, - ) - } - - fn parse_post_stackerdb_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostStackerDBChunk" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostStackerDBChunk body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/json - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for stackerdb chunk".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::JSON { - return Err(net_error::DeserializeError( - "Wrong Content-Type for stackerdb; expected application/json".to_string(), - )); - } - } - }; - - let contract_addr = StacksAddress::from_string(®ex["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(regex["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - let contract_id = QualifiedContractIdentifier::new(contract_addr.into(), contract_name); - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let chunk_data: StackerDBChunkData = - serde_json::from_reader(&mut bound_fd).map_err(|_e| { - net_error::DeserializeError("Failed to parse StackerDB chunk body".into()) - })?; - - Ok(HttpRequestType::PostStackerDBChunk( - HttpRequestMetadata::from_preamble(preamble), - contract_id, - chunk_data, - )) - } - - fn parse_options_preflight( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - Ok(HttpRequestType::OptionsPreflight( - HttpRequestMetadata::from_preamble(preamble), - preamble.path.to_string(), - )) - } - - pub fn metadata(&self) -> &HttpRequestMetadata { - match *self { - HttpRequestType::GetInfo(ref md) => md, - HttpRequestType::GetPoxInfo(ref md, ..) => md, - HttpRequestType::GetNeighbors(ref md) => md, - HttpRequestType::GetHeaders(ref md, ..) => md, - HttpRequestType::GetBlock(ref md, _) => md, - HttpRequestType::GetMicroblocksIndexed(ref md, _) => md, - HttpRequestType::GetMicroblocksConfirmed(ref md, _) => md, - HttpRequestType::GetMicroblocksUnconfirmed(ref md, _, _) => md, - HttpRequestType::GetTransactionUnconfirmed(ref md, _) => md, - HttpRequestType::PostTransaction(ref md, _, _) => md, - HttpRequestType::PostBlock(ref md, ..) => md, - HttpRequestType::PostMicroblock(ref md, ..) => md, - HttpRequestType::GetAccount(ref md, ..) => md, - HttpRequestType::GetDataVar(ref md, ..) => md, - HttpRequestType::GetConstantVal(ref md, ..) => md, - HttpRequestType::GetMapEntry(ref md, ..) => md, - HttpRequestType::GetTransferCost(ref md) => md, - HttpRequestType::GetContractABI(ref md, ..) => md, - HttpRequestType::GetContractSrc(ref md, ..) => md, - HttpRequestType::GetIsTraitImplemented(ref md, ..) => md, - HttpRequestType::CallReadOnlyFunction(ref md, ..) => md, - HttpRequestType::OptionsPreflight(ref md, ..) => md, - HttpRequestType::GetAttachmentsInv(ref md, ..) => md, - HttpRequestType::GetAttachment(ref md, ..) => md, - HttpRequestType::MemPoolQuery(ref md, ..) => md, - HttpRequestType::FeeRateEstimate(ref md, _, _) => md, - HttpRequestType::GetStackerDBMetadata(ref md, ..) => md, - HttpRequestType::GetStackerDBChunk(ref md, ..) => md, - HttpRequestType::PostStackerDBChunk(ref md, ..) => md, - HttpRequestType::ClientError(ref md, ..) => md, - } - } - - pub fn metadata_mut(&mut self) -> &mut HttpRequestMetadata { - match *self { - HttpRequestType::GetInfo(ref mut md) => md, - HttpRequestType::GetPoxInfo(ref mut md, ..) => md, - HttpRequestType::GetNeighbors(ref mut md) => md, - HttpRequestType::GetHeaders(ref mut md, ..) => md, - HttpRequestType::GetBlock(ref mut md, _) => md, - HttpRequestType::GetMicroblocksIndexed(ref mut md, _) => md, - HttpRequestType::GetMicroblocksConfirmed(ref mut md, _) => md, - HttpRequestType::GetMicroblocksUnconfirmed(ref mut md, _, _) => md, - HttpRequestType::GetTransactionUnconfirmed(ref mut md, _) => md, - HttpRequestType::PostTransaction(ref mut md, _, _) => md, - HttpRequestType::PostBlock(ref mut md, ..) => md, - HttpRequestType::PostMicroblock(ref mut md, ..) => md, - HttpRequestType::GetAccount(ref mut md, ..) => md, - HttpRequestType::GetDataVar(ref mut md, ..) => md, - HttpRequestType::GetConstantVal(ref mut md, ..) => md, - HttpRequestType::GetMapEntry(ref mut md, ..) => md, - HttpRequestType::GetTransferCost(ref mut md) => md, - HttpRequestType::GetContractABI(ref mut md, ..) => md, - HttpRequestType::GetContractSrc(ref mut md, ..) => md, - HttpRequestType::GetIsTraitImplemented(ref mut md, ..) => md, - HttpRequestType::CallReadOnlyFunction(ref mut md, ..) => md, - HttpRequestType::OptionsPreflight(ref mut md, ..) => md, - HttpRequestType::GetAttachmentsInv(ref mut md, ..) => md, - HttpRequestType::GetAttachment(ref mut md, ..) => md, - HttpRequestType::MemPoolQuery(ref mut md, ..) => md, - HttpRequestType::FeeRateEstimate(ref mut md, _, _) => md, - HttpRequestType::GetStackerDBMetadata(ref mut md, ..) => md, - HttpRequestType::GetStackerDBChunk(ref mut md, ..) => md, - HttpRequestType::PostStackerDBChunk(ref mut md, ..) => md, - HttpRequestType::ClientError(ref mut md, ..) => md, - } - } - - fn make_tip_query_string(tip_req: &TipRequest, with_proof: bool) -> String { - match tip_req { - TipRequest::UseLatestUnconfirmedTip => { - format!("?tip=latest{}", if with_proof { "" } else { "&proof=0" }) - } - TipRequest::SpecificTip(tip) => { - format!("?tip={}{}", tip, if with_proof { "" } else { "&proof=0" }) - } - TipRequest::UseLatestAnchoredTip => { - if !with_proof { - format!("?proof=0") - } else { - "".to_string() - } - } - } - } - - pub fn request_path(&self) -> String { - match self { - HttpRequestType::GetInfo(_md) => "/v2/info".to_string(), - HttpRequestType::GetPoxInfo(_md, tip_req) => format!( - "/v2/pox{}", - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetNeighbors(_md) => "/v2/neighbors".to_string(), - HttpRequestType::GetHeaders(_md, quantity, tip_req) => format!( - "/v2/headers/{}{}", - quantity, - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetBlock(_md, block_hash) => { - format!("/v2/blocks/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksIndexed(_md, block_hash) => { - format!("/v2/microblocks/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksConfirmed(_md, block_hash) => { - format!("/v2/microblocks/confirmed/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksUnconfirmed(_md, block_hash, min_seq) => format!( - "/v2/microblocks/unconfirmed/{}/{}", - block_hash.to_hex(), - min_seq - ), - HttpRequestType::GetTransactionUnconfirmed(_md, txid) => { - format!("/v2/transactions/unconfirmed/{}", txid) - } - HttpRequestType::PostTransaction(_md, ..) => "/v2/transactions".to_string(), - HttpRequestType::PostBlock(_md, ch, ..) => format!("/v2/blocks/upload/{}", &ch), - HttpRequestType::PostMicroblock(_md, _, tip_req) => format!( - "/v2/microblocks{}", - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetAccount(_md, principal, tip_req, with_proof) => { - format!( - "/v2/accounts/{}{}", - &principal.to_string(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof,) - ) - } - HttpRequestType::GetDataVar( - _md, - contract_addr, - contract_name, - var_name, - tip_req, - with_proof, - ) => format!( - "/v2/data_var/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - var_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetConstantVal( - _md, - contract_addr, - contract_name, - const_name, - tip_req, - ) => format!( - "/v2/constant_val/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - const_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetMapEntry( - _md, - contract_addr, - contract_name, - map_name, - _key, - tip_req, - with_proof, - ) => format!( - "/v2/map_entry/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - map_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetTransferCost(_md) => "/v2/fees/transfer".into(), - HttpRequestType::GetContractABI(_, contract_addr, contract_name, tip_req) => format!( - "/v2/contracts/interface/{}/{}{}", - contract_addr, - contract_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true,) - ), - HttpRequestType::GetContractSrc( - _, - contract_addr, - contract_name, - tip_req, - with_proof, - ) => format!( - "/v2/contracts/source/{}/{}{}", - contract_addr, - contract_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetIsTraitImplemented( - _, - contract_addr, - contract_name, - trait_id, - tip_req, - ) => format!( - "/v2/traits/{}/{}/{}/{}/{}{}", - contract_addr, - contract_name.as_str(), - trait_id.name.to_string(), - StacksAddress::from(trait_id.clone().contract_identifier.issuer), - trait_id.contract_identifier.name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::CallReadOnlyFunction( - _, - contract_addr, - contract_name, - _, - _, - func_name, - _, - tip_req, - ) => format!( - "/v2/contracts/call-read/{}/{}/{}{}", - contract_addr, - contract_name.as_str(), - func_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::OptionsPreflight(_md, path) => path.to_string(), - HttpRequestType::GetAttachmentsInv(_md, index_block_hash, pages_indexes) => { - let pages_query = match pages_indexes.len() { - 0 => format!(""), - _n => { - let mut indexes = pages_indexes - .iter() - .map(|i| format!("{}", i)) - .collect::>(); - indexes.sort(); - format!("&pages_indexes={}", indexes.join(",")) - } - }; - let index_block_hash = format!("index_block_hash={}", index_block_hash); - format!("/v2/attachments/inv?{}{}", index_block_hash, pages_query,) - } - HttpRequestType::GetAttachment(_, content_hash) => { - format!("/v2/attachments/{}", to_hex(&content_hash.0[..])) - } - HttpRequestType::MemPoolQuery(_, _, page_id_opt) => match page_id_opt { - Some(page_id) => { - format!("/v2/mempool/query?page_id={}", page_id) - } - None => "/v2/mempool/query".to_string(), - }, - HttpRequestType::GetStackerDBMetadata(_, contract_id) => format!( - "/v2/stackerdb/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name - ), - HttpRequestType::GetStackerDBChunk(_, contract_id, slot_id, slot_version_opt) => { - if let Some(version) = slot_version_opt { - format!( - "/v2/stackerdb/{}/{}/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name, - slot_id, - version - ) - } else { - format!( - "/v2/stackerdb/{}/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name, - slot_id - ) - } - } - HttpRequestType::PostStackerDBChunk(_, contract_id, ..) => { - format!( - "/v2/stackerdb/{}/{}/chunks", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name - ) - } - HttpRequestType::FeeRateEstimate(_, _, _) => self.get_path().to_string(), - HttpRequestType::ClientError(_md, e) => match e { - ClientError::NotFound(path) => path.to_string(), - _ => "error path unknown".into(), - }, - } - } - - pub fn get_path(&self) -> &'static str { - match self { - HttpRequestType::GetInfo(..) => "/v2/info", - HttpRequestType::GetPoxInfo(..) => "/v2/pox", - HttpRequestType::GetNeighbors(..) => "/v2/neighbors", - HttpRequestType::GetHeaders(..) => "/v2/headers/:height", - HttpRequestType::GetBlock(..) => "/v2/blocks/:hash", - HttpRequestType::GetMicroblocksIndexed(..) => "/v2/microblocks/:hash", - HttpRequestType::GetMicroblocksConfirmed(..) => "/v2/microblocks/confirmed/:hash", - HttpRequestType::GetMicroblocksUnconfirmed(..) => { - "/v2/microblocks/unconfirmed/:hash/:seq" - } - HttpRequestType::GetTransactionUnconfirmed(..) => "/v2/transactions/unconfirmed/:txid", - HttpRequestType::PostTransaction(..) => "/v2/transactions", - HttpRequestType::PostBlock(..) => "/v2/blocks/upload/:block", - HttpRequestType::PostMicroblock(..) => "/v2/microblocks", - HttpRequestType::GetAccount(..) => "/v2/accounts/:principal", - HttpRequestType::GetDataVar(..) => "/v2/data_var/:principal/:contract_name/:var_name", - HttpRequestType::GetConstantVal(..) => { - "/v2/constant_val/:principal/:contract_name/:const_name" - } - HttpRequestType::GetMapEntry(..) => "/v2/map_entry/:principal/:contract_name/:map_name", - HttpRequestType::GetTransferCost(..) => "/v2/fees/transfer", - HttpRequestType::GetContractABI(..) => { - "/v2/contracts/interface/:principal/:contract_name" - } - HttpRequestType::GetContractSrc(..) => "/v2/contracts/source/:principal/:contract_name", - HttpRequestType::CallReadOnlyFunction(..) => { - "/v2/contracts/call-read/:principal/:contract_name/:func_name" - } - HttpRequestType::GetAttachmentsInv(..) => "/v2/attachments/inv", - HttpRequestType::GetAttachment(..) => "/v2/attachments/:hash", - HttpRequestType::GetIsTraitImplemented(..) => "/v2/traits/:principal/:contract_name", - HttpRequestType::MemPoolQuery(..) => "/v2/mempool/query", - HttpRequestType::FeeRateEstimate(_, _, _) => "/v2/fees/transaction", - HttpRequestType::GetStackerDBMetadata(..) => "/v2/stackerdb/:principal/:contract_name", - HttpRequestType::GetStackerDBChunk(..) => { - "/v2/stackerdb/:principal/:contract_name/:slot_id(/:slot_version)?" - } - HttpRequestType::PostStackerDBChunk(..) => { - "/v2/stackerdb/:principal/:contract_name/chunks" - } - HttpRequestType::OptionsPreflight(..) | HttpRequestType::ClientError(..) => "/", - } - } - - pub fn send(&self, _protocol: &mut StacksHttp, fd: &mut W) -> Result<(), net_error> { - match self { - HttpRequestType::PostTransaction(md, tx, attachment) => { - let mut tx_bytes = vec![]; - write_next(&mut tx_bytes, tx)?; - let tx_hex = to_hex(&tx_bytes[..]); - - let (content_type, request_body_bytes) = match attachment { - None => { - // Transaction does not include an attachment: HttpContentType::Bytes (more compressed) - (Some(&HttpContentType::Bytes), tx_bytes) - } - Some(attachment) => { - // Transaction is including an attachment: HttpContentType::JSON - let request_body = PostTransactionRequestBody { - tx: tx_hex, - attachment: Some(to_hex(&attachment.content[..])), - }; - - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, &request_body).map_err( - |e| { - net_error::SerializeError(format!( - "Failed to serialize read-only call to JSON: {:?}", - &e - )) - }, - )?; - (Some(&HttpContentType::JSON), request_body_bytes) - } - }; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - content_type, - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::PostBlock(md, _ch, block) => { - let mut block_bytes = vec![]; - write_next(&mut block_bytes, block)?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(block_bytes.len() as u32), - Some(&HttpContentType::Bytes), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&block_bytes).map_err(net_error::WriteError)?; - } - HttpRequestType::PostMicroblock(md, mb, ..) => { - let mut mb_bytes = vec![]; - write_next(&mut mb_bytes, mb)?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(mb_bytes.len() as u32), - Some(&HttpContentType::Bytes), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&mb_bytes).map_err(net_error::WriteError)?; - } - HttpRequestType::GetMapEntry( - md, - _contract_addr, - _contract_name, - _map_name, - key, - .., - ) => { - let mut request_bytes = vec![]; - key.serialize_write(&mut request_bytes) - .map_err(net_error::WriteError)?; - let request_json = format!("\"{}\"", to_hex(&request_bytes)); - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_json.as_bytes().len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_json.as_bytes()) - .map_err(net_error::WriteError)?; - } - HttpRequestType::CallReadOnlyFunction( - md, - _contract_addr, - _contract_name, - sender, - sponsor, - _func_name, - func_args, - .., - ) => { - let mut args = vec![]; - for arg in func_args.iter() { - let mut arg_bytes = vec![]; - arg.serialize_write(&mut arg_bytes) - .map_err(net_error::WriteError)?; - args.push(to_hex(&arg_bytes)); - } - - let request_body = CallReadOnlyRequestBody { - sender: sender.to_string(), - sponsor: sponsor.as_ref().map(|sp| sp.to_string()), - arguments: args, - }; - - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, &request_body).map_err(|e| { - net_error::SerializeError(format!( - "Failed to serialize read-only call to JSON: {:?}", - &e - )) - })?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::MemPoolQuery(md, query, ..) => { - let request_body_bytes = query.serialize_to_vec(); - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::Bytes), - empty_headers, - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::PostStackerDBChunk(md, _, request) => { - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, request).map_err(|e| { - net_error::SerializeError(format!( - "Failed to serialize StackerDB POST chunk to JSON: {:?}", - &e - )) - })?; - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - other_type => { - let md = other_type.metadata(); - let request_path = other_type.request_path(); - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "GET", - &request_path, - &md.peer, - md.keep_alive, - None, - None, - |fd| stacks_height_headers(fd, md), - )?; - } - } - Ok(()) - } -} - -impl HttpResponseType { - fn try_parse( - protocol: &mut StacksHttp, - regex: &Regex, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - request_path: &str, - fd: &mut R, - len_hint: Option, - parser: F, - ) -> Result, net_error> - where - F: Fn( - &mut StacksHttp, - HttpVersion, - &HttpResponsePreamble, - &mut R, - Option, - ) -> Result, - { - if regex.is_match(request_path) { - let payload = parser(protocol, request_version, preamble, fd, len_hint)?; - Ok(Some(payload)) - } else { - Ok(None) - } - } - - fn parse_error( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - ) -> Result { - if preamble.status_code < 400 || preamble.status_code > 599 { - return Err(net_error::DeserializeError( - "Inavlid response: not an error".to_string(), - )); - } - - if preamble.content_type != HttpContentType::Text - && preamble.content_type != HttpContentType::JSON - { - return Err(net_error::DeserializeError(format!( - "Invalid error response: expected text/plain or application/json, got {:?}", - &preamble.content_type - ))); - } - - let mut error_text = String::new(); - fd.read_to_string(&mut error_text) - .map_err(net_error::ReadError)?; - - let md = HttpResponseMetadata::from_preamble(request_version, preamble); - let resp = match preamble.status_code { - 400 => HttpResponseType::BadRequest(md, error_text), - 401 => HttpResponseType::Unauthorized(md, error_text), - 402 => HttpResponseType::PaymentRequired(md, error_text), - 403 => HttpResponseType::Forbidden(md, error_text), - 404 => HttpResponseType::NotFound(md, error_text), - 500 => HttpResponseType::ServerError(md, error_text), - 503 => HttpResponseType::ServiceUnavailable(md, error_text), - _ => HttpResponseType::Error(md, preamble.status_code, error_text), - }; - Ok(resp) - } - - /// Parse a SIP-003 bytestream. The first 4 bytes are a big-endian length prefix - fn parse_bytestream( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result { - // content-type has to be Bytes - if preamble.content_type != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/octet-stream".to_string(), - )); - } - - let item: T = if preamble.is_chunked() && len_hint.is_none() { - let mut chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - read_next(&mut chunked_fd)? - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(content_length) = content_length_opt { - if (content_length as u64) > max_len { - return Err(net_error::DeserializeError( - "Invalid Content-Length header: too long".to_string(), - )); - } - - let mut bound_fd = BoundReader::from_reader(fd, content_length as u64); - read_next(&mut bound_fd)? - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - Ok(item) - } - - fn parse_json( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result { - // content-type has to be JSON - if preamble.content_type != HttpContentType::JSON { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let item_result: Result = if preamble.is_chunked() - && len_hint.is_none() - { - let chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - serde_json::from_reader(chunked_fd) - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(content_length) = content_length_opt { - if (content_length as u64) > max_len { - return Err(net_error::DeserializeError( - "Invalid Content-Length header: too long".to_string(), - )); - } - let bound_fd = BoundReader::from_reader(fd, content_length as u64); - serde_json::from_reader(bound_fd) - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - item_result.map_err(|e| { - if e.is_eof() { - net_error::UnderflowError(format!("Not enough bytes to parse JSON")) - } else { - net_error::DeserializeError(format!("Failed to parse JSON: {:?}", &e)) - } - }) - } - - fn parse_raw_bytes( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - expected_content_type: HttpContentType, - ) -> Result, net_error> { - if preamble.content_type != expected_content_type { - return Err(net_error::DeserializeError(format!( - "Invalid content-type: expected {}", - expected_content_type - ))); - } - let buf = if preamble.is_chunked() && len_hint.is_none() { - let mut chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - let mut buf = vec![]; - chunked_fd - .read_to_end(&mut buf) - .map_err(net_error::ReadError)?; - buf - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(len) = content_length_opt { - let mut buf = vec![0u8; len as usize]; - fd.read_exact(&mut buf).map_err(net_error::ReadError)?; - buf - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - Ok(buf) - } - - fn parse_text( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result, net_error> { - Self::parse_raw_bytes(preamble, fd, len_hint, max_len, HttpContentType::Text) - } - - fn parse_bytes( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result, net_error> { - Self::parse_raw_bytes(preamble, fd, len_hint, max_len, HttpContentType::Bytes) - } - - // len_hint is given by the StacksHttp protocol implementation - pub fn parse( - protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - request_path: String, - fd: &mut R, - len_hint: Option, - ) -> Result { - if preamble.status_code >= 400 { - return HttpResponseType::parse_error(protocol, request_version, preamble, fd); - } - - // TODO: make this static somehow - let RESPONSE_METHODS: &[( - &Regex, - &dyn Fn( - &mut StacksHttp, - HttpVersion, - &HttpResponsePreamble, - &mut R, - Option, - ) -> Result, - )] = &[ - (&PATH_GETINFO, &HttpResponseType::parse_peerinfo), - (&PATH_GETPOXINFO, &HttpResponseType::parse_poxinfo), - (&PATH_GETNEIGHBORS, &HttpResponseType::parse_neighbors), - (&PATH_GETHEADERS, &HttpResponseType::parse_headers), - (&PATH_GETBLOCK, &HttpResponseType::parse_block), - (&PATH_GET_DATA_VAR, &HttpResponseType::parse_get_data_var), - ( - &PATH_GET_CONSTANT_VAL, - &HttpResponseType::parse_get_constant_val, - ), - (&PATH_GET_MAP_ENTRY, &HttpResponseType::parse_get_map_entry), - ( - &PATH_GETMICROBLOCKS_INDEXED, - &HttpResponseType::parse_microblocks, - ), - ( - &PATH_GETMICROBLOCKS_CONFIRMED, - &HttpResponseType::parse_microblocks, - ), - ( - &PATH_GETMICROBLOCKS_UNCONFIRMED, - &HttpResponseType::parse_microblocks_unconfirmed, - ), - ( - &PATH_GETTRANSACTION_UNCONFIRMED, - &HttpResponseType::parse_transaction_unconfirmed, - ), - (&PATH_POSTTRANSACTION, &HttpResponseType::parse_txid), - ( - &PATH_POSTBLOCK, - &HttpResponseType::parse_stacks_block_accepted, - ), - ( - &PATH_POSTMICROBLOCK, - &HttpResponseType::parse_microblock_hash, - ), - (&PATH_GET_ACCOUNT, &HttpResponseType::parse_get_account), - ( - &PATH_GET_CONTRACT_SRC, - &HttpResponseType::parse_get_contract_src, - ), - ( - &PATH_GET_IS_TRAIT_IMPLEMENTED, - &HttpResponseType::parse_get_is_trait_implemented, - ), - ( - &PATH_GET_CONTRACT_ABI, - &HttpResponseType::parse_get_contract_abi, - ), - ( - &PATH_POST_CALL_READ_ONLY, - &HttpResponseType::parse_call_read_only, - ), - ( - &PATH_GET_ATTACHMENT, - &HttpResponseType::parse_get_attachment, - ), - ( - &PATH_GET_ATTACHMENTS_INV, - &HttpResponseType::parse_get_attachments_inv, - ), - ( - &PATH_POST_MEMPOOL_QUERY, - &HttpResponseType::parse_post_mempool_query, - ), - ( - &PATH_GET_STACKERDB_METADATA, - &HttpResponseType::parse_get_stackerdb_metadata, - ), - ( - &PATH_GET_STACKERDB_CHUNK, - &HttpResponseType::parse_get_stackerdb_chunk, - ), - ( - &PATH_GET_STACKERDB_VERSIONED_CHUNK, - &HttpResponseType::parse_get_stackerdb_chunk, - ), - ( - &PATH_POST_STACKERDB_CHUNK, - &HttpResponseType::parse_stackerdb_chunk_response, - ), - ]; - - // use url::Url to parse path and query string - // Url will refuse to parse just a path, so create a dummy URL - let local_url = format!("http://local{}", &request_path); - let url = Url::parse(&local_url).map_err(|_e| { - net_error::DeserializeError("Http request path could not be parsed".to_string()) - })?; - - let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { - net_error::DeserializeError( - "Http response path could not be parsed as UTF-8".to_string(), - ) - })?; - - for (regex, parser) in RESPONSE_METHODS.iter() { - match HttpResponseType::try_parse( - protocol, - regex, - request_version, - preamble, - &decoded_path.to_string(), - fd, - len_hint, - parser, - ) { - Ok(Some(request)) => { - return Ok(request); - } - Ok(None) => { - continue; - } - Err(e) => { - test_debug!("Failed to parse {}: {:?}", &request_path, &e); - return Err(e); - } - } - } - - test_debug!( - "Failed to match request path '{}' to a handler", - &request_path - ); - return Err(net_error::DeserializeError( - "Http response could not be parsed".to_string(), - )); - } - - fn parse_peerinfo( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let peer_info = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::PeerInfo( - HttpResponseMetadata::from_preamble(request_version, preamble), - peer_info, - )) - } - - fn parse_poxinfo( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let pox_info = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::PoxInfo( - HttpResponseMetadata::from_preamble(request_version, preamble), - pox_info, - )) - } - - fn parse_neighbors( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let neighbors_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Neighbors( - HttpResponseMetadata::from_preamble(request_version, preamble), - neighbors_data, - )) - } - - fn parse_headers( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let headers: Vec = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Headers( - HttpResponseMetadata::from_preamble(request_version, preamble), - headers, - )) - } - - fn parse_block( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let block: StacksBlock = - HttpResponseType::parse_bytestream(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Block( - HttpResponseMetadata::from_preamble(request_version, preamble), - block, - )) - } - - fn parse_microblocks( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let microblocks: Vec = - HttpResponseType::parse_bytestream(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Microblocks( - HttpResponseMetadata::from_preamble(request_version, preamble), - microblocks, - )) - } - - fn parse_get_account( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let account_entry = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetAccount( - HttpResponseMetadata::from_preamble(request_version, preamble), - account_entry, - )) - } - - fn parse_get_data_var( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let data_var = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetDataVar( - HttpResponseMetadata::from_preamble(request_version, preamble), - data_var, - )) - } - - fn parse_get_constant_val( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let constant_val = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetConstantVal( - HttpResponseMetadata::from_preamble(request_version, preamble), - constant_val, - )) - } - - fn parse_get_map_entry( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let map_entry = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetMapEntry( - HttpResponseMetadata::from_preamble(request_version, preamble), - map_entry, - )) - } - - fn parse_get_contract_src( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let src_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetContractSrc( - HttpResponseMetadata::from_preamble(request_version, preamble), - src_data, - )) - } - - fn parse_get_is_trait_implemented( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let src_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetIsTraitImplemented( - HttpResponseMetadata::from_preamble(request_version, preamble), - src_data, - )) - } - - fn parse_get_contract_abi( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let abi = HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetContractABI( - HttpResponseMetadata::from_preamble(request_version, preamble), - abi, - )) - } - - fn parse_call_read_only( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let call_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::CallReadOnlyFunction( - HttpResponseMetadata::from_preamble(request_version, preamble), - call_data, - )) - } - - fn parse_microblocks_unconfirmed( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - // NOTE: there will be no length prefix on this, but we won't ever get more than - // MAX_MICROBLOCKS_UNCONFIRMED microblocks - let mut microblocks = vec![]; - let max_len = len_hint.unwrap_or(MAX_MESSAGE_LEN as usize) as u64; - let mut bound_reader = BoundReader::from_reader(fd, max_len); - loop { - let mblock: StacksMicroblock = match read_next(&mut bound_reader) { - Ok(mblock) => Ok(mblock), - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - // end of stream -- this is fine - break; - } - _ => Err(e), - }, - _ => Err(e), - }, - }?; - - microblocks.push(mblock); - if microblocks.len() == MAX_MICROBLOCKS_UNCONFIRMED { - break; - } - } - Ok(HttpResponseType::Microblocks( - HttpResponseMetadata::from_preamble(request_version, preamble), - microblocks, - )) - } - - fn parse_transaction_unconfirmed( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let unconfirmed_status: UnconfirmedTransactionResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - // tx payload must decode to a transaction - let tx_bytes = hex_bytes(&unconfirmed_status.tx).map_err(|_| { - net_error::DeserializeError("Unconfirmed transaction is not hex-encoded".to_string()) - })?; - let _ = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|_| { - net_error::DeserializeError( - "Unconfirmed transaction is not a well-formed Stacks transaction".to_string(), - ) - })?; - - Ok(HttpResponseType::UnconfirmedTransaction( - HttpResponseMetadata::from_preamble(request_version, preamble), - unconfirmed_status, - )) - } - - fn parse_txid( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let txid_hex: String = HttpResponseType::parse_json(preamble, fd, len_hint, 66)?; - if txid_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid txid: expected 64 bytes".to_string(), - )); - } - - let txid = Txid::from_hex(&txid_hex) - .map_err(|_e| net_error::DeserializeError("Failed to decode txid hex".to_string()))?; - Ok(HttpResponseType::TransactionID( - HttpResponseMetadata::from_preamble(request_version, preamble), - txid, - )) - } - - fn parse_get_attachment( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let res: GetAttachmentResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - Ok(HttpResponseType::GetAttachment( - HttpResponseMetadata::from_preamble(request_version, preamble), - res, - )) - } - - fn parse_get_attachments_inv( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let res: GetAttachmentsInvResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - Ok(HttpResponseType::GetAttachmentsInv( - HttpResponseMetadata::from_preamble(request_version, preamble), - res, - )) - } - - fn parse_stacks_block_accepted( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let stacks_block_accepted: StacksBlockAcceptedData = - HttpResponseType::parse_json(preamble, fd, len_hint, 128)?; - Ok(HttpResponseType::StacksBlockAccepted( - HttpResponseMetadata::from_preamble(request_version, preamble), - stacks_block_accepted.stacks_block_id, - stacks_block_accepted.accepted, - )) - } - - fn parse_microblock_hash( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let mblock_hex: String = HttpResponseType::parse_json(preamble, fd, len_hint, 66)?; - if mblock_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid microblock hash: expected 64 bytes".to_string(), - )); - } - - let mblock_hash = BlockHeaderHash::from_hex(&mblock_hex).map_err(|_e| { - net_error::DeserializeError("Failed to decode microblock hash hex".to_string()) - })?; - Ok(HttpResponseType::MicroblockHash( - HttpResponseMetadata::from_preamble(request_version, preamble), - mblock_hash, - )) - } - - /// Read the trailing page ID from a transaction stream - fn parse_mempool_query_page_id( - pos: usize, - retry_reader: &mut RetryReader<'_, R>, - ) -> Result, net_error> { - // possibly end-of-transactions, in which case, the last 32 bytes should be - // a page ID. Expect end-of-stream after this. - retry_reader.set_position(pos); - let next_page: Txid = match read_next(retry_reader) { - Ok(txid) => txid, - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - if pos == retry_reader.position() { - // this is fine -- the node didn't get another page - return Ok(None); - } else { - // partial data -- corrupt stream - test_debug!("Unexpected EOF: {} != {}", pos, retry_reader.position()); - return Err(e.into()); - } - } - _ => { - return Err(e.into()); - } - }, - e => { - return Err(e.into()); - } - }, - }; - - test_debug!("Read page_id {:?}", &next_page); - Ok(Some(next_page)) - } - - /// Decode a transaction stream, returned from /v2/mempool/query. - /// The wire format is a list of transactions (no SIP-003 length prefix), followed by an - /// optional 32-byte page ID. Obtain both the transactions and page ID, if it exists. - pub fn decode_tx_stream( - fd: &mut R, - len_hint: Option, - ) -> Result<(Vec, Option), net_error> { - // The wire format is `tx, tx, tx, tx, .., tx, txid`. - // The last 32 bytes are the page ID for the next mempool query. - // NOTE: there will be no length prefix on this. - let mut txs: Vec = vec![]; - let max_len = len_hint.unwrap_or(MAX_MESSAGE_LEN as usize) as u64; - let mut bound_reader = BoundReader::from_reader(fd, max_len); - let mut retry_reader = RetryReader::new(&mut bound_reader); - let mut page_id = None; - let mut expect_eof = false; - - loop { - let pos = retry_reader.position(); - let next_msg: Result = read_next(&mut retry_reader); - match next_msg { - Ok(tx) => { - if expect_eof { - // this should have failed - test_debug!("Expected EOF; got transaction {}", tx.txid()); - return Err(net_error::ExpectedEndOfStream); - } - - test_debug!("Read transaction {}", tx.txid()); - txs.push(tx); - Ok(()) - } - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - if expect_eof { - if pos != retry_reader.position() { - // read partial data. The stream is corrupt. - test_debug!( - "Expected EOF; stream advanced from {} to {}", - pos, - retry_reader.position() - ); - return Err(net_error::ExpectedEndOfStream); - } - } else { - // couldn't read a full transaction. This is possibly a page ID, whose - // 32 bytes decode to the prefix of a well-formed transaction. - test_debug!("Try to read page ID trailer after ReadError"); - page_id = HttpResponseType::parse_mempool_query_page_id( - pos, - &mut retry_reader, - )?; - } - break; - } - _ => Err(e), - }, - codec_error::DeserializeError(_msg) => { - if expect_eof { - // this should have failed due to EOF - test_debug!("Expected EOF; got DeserializeError '{}'", &_msg); - return Err(net_error::ExpectedEndOfStream); - } - - // failed to parse a transaction. This is possibly a page ID. - test_debug!("Try to read page ID trailer after ReadError"); - page_id = - HttpResponseType::parse_mempool_query_page_id(pos, &mut retry_reader)?; - - // do one more pass to make sure we're actually end-of-stream. - // otherwise, the stream itself was corrupt, since any 32 bytes is a valid - // txid and the presence of more bytes means that we simply got a bad tx - // that we couldn't decode. - expect_eof = true; - Ok(()) - } - _ => Err(e), - }, - }?; - } - - Ok((txs, page_id)) - } - - fn parse_post_mempool_query( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let (txs, page_id) = HttpResponseType::decode_tx_stream(fd, len_hint)?; - Ok(HttpResponseType::MemPoolTxs( - HttpResponseMetadata::from_preamble(request_version, preamble), - page_id, - txs, - )) - } - - fn parse_get_stackerdb_metadata( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let slot_metadata = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::StackerDBMetadata( - HttpResponseMetadata::from_preamble(request_version, preamble), - slot_metadata, - )) - } - - fn parse_get_stackerdb_chunk( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let chunk = - HttpResponseType::parse_bytes(preamble, fd, len_hint, STACKERDB_MAX_CHUNK_SIZE as u64)?; - Ok(HttpResponseType::StackerDBChunk( - HttpResponseMetadata::from_preamble(request_version, preamble), - chunk, - )) - } - - fn parse_stackerdb_chunk_response( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let slot_ack = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::StackerDBChunkAck( - HttpResponseMetadata::from_preamble(request_version, preamble), - slot_ack, - )) - } - - fn error_reason(code: u16) -> &'static str { - match code { - 400 => "Bad Request", - 401 => "Unauthorized", - 402 => "Payment Required", - 403 => "Forbidden", - 404 => "Not Found", - 500 => "Internal Server Error", - 503 => "Service Temporarily Unavailable", - _ => "Error", - } - } - - fn error_response( - &self, - fd: &mut W, - code: u16, - message: &str, - ) -> Result<(), net_error> { - let md = self.metadata(); - HttpResponsePreamble::new_serialized( - fd, - code, - HttpResponseType::error_reason(code), - Some(message.len() as u32), - &HttpContentType::Text, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - fd.write_all(message.as_bytes()) - .map_err(net_error::WriteError)?; - Ok(()) - } - - pub fn metadata(&self) -> &HttpResponseMetadata { - match *self { - HttpResponseType::PeerInfo(ref md, _) => md, - HttpResponseType::PoxInfo(ref md, _) => md, - HttpResponseType::Neighbors(ref md, _) => md, - HttpResponseType::HeaderStream(ref md) => md, - HttpResponseType::Headers(ref md, _) => md, - HttpResponseType::Block(ref md, _) => md, - HttpResponseType::BlockStream(ref md) => md, - HttpResponseType::Microblocks(ref md, _) => md, - HttpResponseType::MicroblockStream(ref md) => md, - HttpResponseType::TransactionID(ref md, _) => md, - HttpResponseType::StacksBlockAccepted(ref md, ..) => md, - HttpResponseType::MicroblockHash(ref md, _) => md, - HttpResponseType::TokenTransferCost(ref md, _) => md, - HttpResponseType::GetDataVar(ref md, _) => md, - HttpResponseType::GetConstantVal(ref md, _) => md, - HttpResponseType::GetMapEntry(ref md, _) => md, - HttpResponseType::GetAccount(ref md, _) => md, - HttpResponseType::GetContractABI(ref md, _) => md, - HttpResponseType::GetContractSrc(ref md, _) => md, - HttpResponseType::GetIsTraitImplemented(ref md, _) => md, - HttpResponseType::CallReadOnlyFunction(ref md, _) => md, - HttpResponseType::UnconfirmedTransaction(ref md, _) => md, - HttpResponseType::GetAttachment(ref md, _) => md, - HttpResponseType::GetAttachmentsInv(ref md, _) => md, - HttpResponseType::MemPoolTxStream(ref md) => md, - HttpResponseType::MemPoolTxs(ref md, ..) => md, - HttpResponseType::OptionsPreflight(ref md) => md, - HttpResponseType::TransactionFeeEstimation(ref md, _) => md, - HttpResponseType::StackerDBMetadata(ref md, ..) => md, - HttpResponseType::StackerDBChunk(ref md, ..) => md, - HttpResponseType::StackerDBChunkAck(ref md, ..) => md, - // errors - HttpResponseType::BadRequestJSON(ref md, _) => md, - HttpResponseType::BadRequest(ref md, _) => md, - HttpResponseType::Unauthorized(ref md, _) => md, - HttpResponseType::PaymentRequired(ref md, _) => md, - HttpResponseType::Forbidden(ref md, _) => md, - HttpResponseType::NotFound(ref md, _) => md, - HttpResponseType::ServerError(ref md, _) => md, - HttpResponseType::ServiceUnavailable(ref md, _) => md, - HttpResponseType::Error(ref md, _, _) => md, - } - } - - fn send_bytestream( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - message: &T, - ) -> Result<(), codec_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - write_next(fd, message) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - write_next(&mut encoder, message)?; - encoder.flush().map_err(codec_error::WriteError)?; - Ok(()) - } - } - - fn send_text( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - text: &[u8], - ) -> Result<(), net_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - fd.write_all(text).map_err(net_error::WriteError) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - encoder.write_all(text).map_err(net_error::WriteError)?; - encoder.flush().map_err(net_error::WriteError)?; - Ok(()) - } - } - - fn send_json( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - message: &T, - ) -> Result<(), net_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - serde_json::to_writer(fd, message) - .map_err(|e| net_error::SerializeError(format!("Failed to send as JSON: {:?}", &e))) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - serde_json::to_writer(&mut encoder, message).map_err(|e| { - net_error::SerializeError(format!("Failed to send as chunk-encoded JSON: {:?}", &e)) - })?; - encoder.flush().map_err(net_error::WriteError)?; - Ok(()) - } - } - - pub fn send(&self, protocol: &mut StacksHttp, fd: &mut W) -> Result<(), net_error> { - match *self { - HttpResponseType::GetAccount(ref md, ref account_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, account_data)?; - } - HttpResponseType::TransactionFeeEstimation(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetContractABI(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetContractSrc(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetIsTraitImplemented(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::TokenTransferCost(ref md, ref cost) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, cost)?; - } - HttpResponseType::CallReadOnlyFunction(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetDataVar(ref md, ref var_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, var_data)?; - } - HttpResponseType::GetConstantVal(ref md, ref constant_val) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, constant_val)?; - } - HttpResponseType::GetMapEntry(ref md, ref map_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, map_data)?; - } - HttpResponseType::PeerInfo(ref md, ref peer_info) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, peer_info)?; - } - HttpResponseType::PoxInfo(ref md, ref pox_info) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, pox_info)?; - } - HttpResponseType::Neighbors(ref md, ref neighbor_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, neighbor_data)?; - } - HttpResponseType::GetAttachment(ref md, ref zonefile_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, zonefile_data)?; - } - HttpResponseType::GetAttachmentsInv(ref md, ref zonefile_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, zonefile_data)?; - } - HttpResponseType::Headers(ref md, ref headers) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, headers)?; - } - HttpResponseType::HeaderStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the headers data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::Block(ref md, ref block) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_bytestream(protocol, md, fd, block)?; - } - HttpResponseType::BlockStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the block data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::Microblocks(ref md, ref microblocks) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_bytestream(protocol, md, fd, microblocks)?; - } - HttpResponseType::MicroblockStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the microblock data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::TransactionID(ref md, ref txid) => { - let txid_bytes = txid.to_hex(); - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &txid_bytes)?; - } - HttpResponseType::StacksBlockAccepted(ref md, ref stacks_block_id, ref accepted) => { - let accepted_data = StacksBlockAcceptedData { - stacks_block_id: stacks_block_id.clone(), - accepted: *accepted, - }; - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &accepted_data)?; - } - HttpResponseType::MicroblockHash(ref md, ref mblock_hash) => { - let mblock_bytes = mblock_hash.to_hex(); - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &mblock_bytes)?; - } - HttpResponseType::UnconfirmedTransaction(ref md, ref unconfirmed_status) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, unconfirmed_status)?; - } - HttpResponseType::MemPoolTxStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the tx data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::MemPoolTxs(ref md, ref page_id, ref txs) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - match page_id { - Some(txid) => { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - write_next(fd, txs)?; - write_next(fd, txid)?; - Ok(()) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = - HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = - HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - write_next(&mut encoder, txs)?; - write_next(&mut encoder, txid)?; - encoder.flush().map_err(codec_error::WriteError)?; - Ok(()) - } - } - None => HttpResponseType::send_bytestream(protocol, md, fd, txs), - }?; - } - HttpResponseType::StackerDBMetadata(ref md, ref slot_metadata) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, slot_metadata)?; - } - HttpResponseType::StackerDBChunk(ref md, ref chunk, ..) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_text(protocol, md, fd, chunk)?; - } - HttpResponseType::StackerDBChunkAck(ref md, ref ack_data) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, ack_data)?; - } - HttpResponseType::OptionsPreflight(ref md) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Text, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_text(protocol, md, fd, "".as_bytes())?; - } - HttpResponseType::BadRequestJSON(ref md, ref data) => { - HttpResponsePreamble::new_serialized( - fd, - 400, - HttpResponseType::error_reason(400), - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::BadRequest(_, ref msg) => self.error_response(fd, 400, msg)?, - HttpResponseType::Unauthorized(_, ref msg) => self.error_response(fd, 401, msg)?, - HttpResponseType::PaymentRequired(_, ref msg) => self.error_response(fd, 402, msg)?, - HttpResponseType::Forbidden(_, ref msg) => self.error_response(fd, 403, msg)?, - HttpResponseType::NotFound(_, ref msg) => self.error_response(fd, 404, msg)?, - HttpResponseType::ServerError(_, ref msg) => self.error_response(fd, 500, msg)?, - HttpResponseType::ServiceUnavailable(_, ref msg) => { - self.error_response(fd, 503, msg)? - } - HttpResponseType::Error(_, ref error_code, ref msg) => { - self.error_response(fd, *error_code, msg)? - } - }; - Ok(()) - } -} - -impl StacksMessageCodec for StacksHttpPreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - match *self { - StacksHttpPreamble::Request(ref req) => req.consensus_serialize(fd), - StacksHttpPreamble::Response(ref res) => res.consensus_serialize(fd), - } - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let mut retry_fd = RetryReader::new(fd); - - // the byte stream can decode to a http request or a http response, but not both. - match HttpRequestPreamble::consensus_deserialize(&mut retry_fd) { - Ok(request) => Ok(StacksHttpPreamble::Request(request)), - Err(e_request) => { - // maybe a http response? - retry_fd.set_position(0); - match HttpResponsePreamble::consensus_deserialize(&mut retry_fd) { - Ok(response) => Ok(StacksHttpPreamble::Response(response)), - Err(e) => { - // underflow? - match (e_request, e) { - (codec_error::ReadError(ref ioe1), codec_error::ReadError(ref ioe2)) => { - if ioe1.kind() == io::ErrorKind::UnexpectedEof && ioe2.kind() == io::ErrorKind::UnexpectedEof { - // out of bytes - Err(codec_error::UnderflowError("Not enough bytes to form a HTTP request or response".to_string())) - } - else { - Err(codec_error::DeserializeError(format!("Neither a HTTP request ({:?}) or HTTP response ({:?})", ioe1, ioe2))) - } - }, - (e_req, e_res) => Err(codec_error::DeserializeError(format!("Failed to decode HTTP request or HTTP response (request error: {:?}; response error: {:?})", &e_req, &e_res))) - } - } - } - } - } - } -} - -impl MessageSequence for StacksHttpMessage { - fn request_id(&self) -> u32 { - // there is at most one in-flight HTTP request, as far as a Connection

is concerned - HTTP_REQUEST_ID_RESERVED - } - - fn get_message_name(&self) -> &'static str { - match *self { - StacksHttpMessage::Request(ref req) => match req { - HttpRequestType::GetInfo(_) => "HTTP(GetInfo)", - HttpRequestType::GetPoxInfo(_, _) => "HTTP(GetPoxInfo)", - HttpRequestType::GetNeighbors(_) => "HTTP(GetNeighbors)", - HttpRequestType::GetHeaders(..) => "HTTP(GetHeaders)", - HttpRequestType::GetBlock(_, _) => "HTTP(GetBlock)", - HttpRequestType::GetMicroblocksIndexed(_, _) => "HTTP(GetMicroblocksIndexed)", - HttpRequestType::GetMicroblocksConfirmed(_, _) => "HTTP(GetMicroblocksConfirmed)", - HttpRequestType::GetMicroblocksUnconfirmed(_, _, _) => { - "HTTP(GetMicroblocksUnconfirmed)" - } - HttpRequestType::GetTransactionUnconfirmed(_, _) => { - "HTTP(GetTransactionUnconfirmed)" - } - HttpRequestType::PostTransaction(_, _, _) => "HTTP(PostTransaction)", - HttpRequestType::PostBlock(..) => "HTTP(PostBlock)", - HttpRequestType::PostMicroblock(..) => "HTTP(PostMicroblock)", - HttpRequestType::GetAccount(..) => "HTTP(GetAccount)", - HttpRequestType::GetDataVar(..) => "HTTP(GetDataVar)", - HttpRequestType::GetConstantVal(..) => "HTTP(GetConstantVal)", - HttpRequestType::GetMapEntry(..) => "HTTP(GetMapEntry)", - HttpRequestType::GetTransferCost(_) => "HTTP(GetTransferCost)", - HttpRequestType::GetContractABI(..) => "HTTP(GetContractABI)", - HttpRequestType::GetContractSrc(..) => "HTTP(GetContractSrc)", - HttpRequestType::GetIsTraitImplemented(..) => "HTTP(GetIsTraitImplemented)", - HttpRequestType::CallReadOnlyFunction(..) => "HTTP(CallReadOnlyFunction)", - HttpRequestType::GetAttachment(..) => "HTTP(GetAttachment)", - HttpRequestType::GetAttachmentsInv(..) => "HTTP(GetAttachmentsInv)", - HttpRequestType::MemPoolQuery(..) => "HTTP(MemPoolQuery)", - HttpRequestType::OptionsPreflight(..) => "HTTP(OptionsPreflight)", - HttpRequestType::GetStackerDBMetadata(..) => "HTTP(GetStackerDBMetadata)", - HttpRequestType::GetStackerDBChunk(..) => "HTTP(GetStackerDBChunk)", - HttpRequestType::PostStackerDBChunk(..) => "HTTP(PostStackerDBChunk)", - HttpRequestType::ClientError(..) => "HTTP(ClientError)", - HttpRequestType::FeeRateEstimate(_, _, _) => "HTTP(FeeRateEstimate)", - }, - StacksHttpMessage::Response(ref res) => match res { - HttpResponseType::TokenTransferCost(_, _) => "HTTP(TokenTransferCost)", - HttpResponseType::GetDataVar(_, _) => "HTTP(GetDataVar)", - HttpResponseType::GetConstantVal(..) => "HTTP(GetConstantVal)", - HttpResponseType::GetMapEntry(_, _) => "HTTP(GetMapEntry)", - HttpResponseType::GetAccount(_, _) => "HTTP(GetAccount)", - HttpResponseType::GetContractABI(..) => "HTTP(GetContractABI)", - HttpResponseType::GetContractSrc(..) => "HTTP(GetContractSrc)", - HttpResponseType::GetIsTraitImplemented(..) => "HTTP(GetIsTraitImplemented)", - HttpResponseType::CallReadOnlyFunction(..) => "HTTP(CallReadOnlyFunction)", - HttpResponseType::GetAttachment(_, _) => "HTTP(GetAttachment)", - HttpResponseType::GetAttachmentsInv(_, _) => "HTTP(GetAttachmentsInv)", - HttpResponseType::PeerInfo(_, _) => "HTTP(PeerInfo)", - HttpResponseType::PoxInfo(_, _) => "HTTP(PeerInfo)", - HttpResponseType::Neighbors(_, _) => "HTTP(Neighbors)", - HttpResponseType::Headers(..) => "HTTP(Headers)", - HttpResponseType::HeaderStream(..) => "HTTP(HeaderStream)", - HttpResponseType::Block(_, _) => "HTTP(Block)", - HttpResponseType::BlockStream(_) => "HTTP(BlockStream)", - HttpResponseType::Microblocks(_, _) => "HTTP(Microblocks)", - HttpResponseType::MicroblockStream(_) => "HTTP(MicroblockStream)", - HttpResponseType::TransactionID(_, _) => "HTTP(Transaction)", - HttpResponseType::StacksBlockAccepted(..) => "HTTP(StacksBlockAccepted)", - HttpResponseType::MicroblockHash(_, _) => "HTTP(MicroblockHash)", - HttpResponseType::UnconfirmedTransaction(_, _) => "HTTP(UnconfirmedTransaction)", - HttpResponseType::MemPoolTxStream(..) => "HTTP(MemPoolTxStream)", - HttpResponseType::MemPoolTxs(..) => "HTTP(MemPoolTxs)", - HttpResponseType::StackerDBMetadata(..) => "HTTP(StackerDBMetadata)", - HttpResponseType::StackerDBChunk(..) => "HTTP(StackerDBChunk)", - HttpResponseType::StackerDBChunkAck(..) => "HTTP(StackerDBChunkAck)", - HttpResponseType::OptionsPreflight(_) => "HTTP(OptionsPreflight)", - HttpResponseType::BadRequestJSON(..) | HttpResponseType::BadRequest(..) => { - "HTTP(400)" - } - HttpResponseType::Unauthorized(_, _) => "HTTP(401)", - HttpResponseType::PaymentRequired(_, _) => "HTTP(402)", - HttpResponseType::Forbidden(_, _) => "HTTP(403)", - HttpResponseType::NotFound(_, _) => "HTTP(404)", - HttpResponseType::ServerError(_, _) => "HTTP(500)", - HttpResponseType::ServiceUnavailable(_, _) => "HTTP(503)", - HttpResponseType::Error(_, _, _) => "HTTP(other)", - HttpResponseType::TransactionFeeEstimation(_, _) => { - "HTTP(TransactionFeeEstimation)" - } - }, - } - } -} - -/// A partially-decoded, streamed HTTP message (response) being received. -/// Internally used by StacksHttp to keep track of chunk-decoding state. -#[derive(Debug, Clone, PartialEq)] -struct HttpRecvStream { - state: HttpChunkedTransferReaderState, - data: Vec, - total_consumed: usize, // number of *encoded* bytes consumed -} - -impl HttpRecvStream { - pub fn new(max_size: u64) -> HttpRecvStream { - HttpRecvStream { - state: HttpChunkedTransferReaderState::new(max_size), - data: vec![], - total_consumed: 0, - } - } - - /// Feed data into our chunked transfer reader state. If we finish reading a stream, return - /// the decoded bytes (as Some(Vec) and the total number of encoded bytes consumed). - /// Always returns the number of bytes consumed. - pub fn consume_data( - &mut self, - fd: &mut R, - ) -> Result<(Option<(Vec, usize)>, usize), net_error> { - let mut consumed = 0; - let mut blocked = false; - while !blocked { - let mut decoded_buf = vec![0u8; 8192]; - let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { - Ok((0, num_consumed)) => { - trace!( - "consume_data blocked on 0 decoded bytes ({} consumed)", - num_consumed - ); - blocked = true; - (0, num_consumed) - } - Ok((num_read, num_consumed)) => (num_read, num_consumed), - Err(e) => match e.kind() { - io::ErrorKind::WouldBlock | io::ErrorKind::TimedOut => { - trace!("consume_data blocked on read error"); - blocked = true; - (0, 0) - } - _ => { - return Err(net_error::ReadError(e)); - } - }, - }; - - consumed += consumed_pass; - if read_pass > 0 { - self.data.extend_from_slice(&decoded_buf[0..read_pass]); - } - } - - self.total_consumed += consumed; - - // did we get a message? - if self.state.is_eof() { - // reset - let message_data = mem::replace(&mut self.data, vec![]); - let total_consumed = self.total_consumed; - - self.state = HttpChunkedTransferReaderState::new(self.state.max_size); - self.total_consumed = 0; - - Ok((Some((message_data, total_consumed)), consumed)) - } else { - Ok((None, consumed)) - } - } -} - -/// Information about an in-flight request -#[derive(Debug, Clone, PartialEq)] -struct HttpReplyData { - request_id: u32, - stream: HttpRecvStream, -} - -/// Stacks HTTP implementation, for bufferring up data. -/// One of these exists per Connection. -/// There can be at most one HTTP request in-flight (i.e. we don't do pipelining) -#[derive(Debug, Clone, PartialEq)] -pub struct StacksHttp { - /// Address of peer - peer_addr: SocketAddr, - /// Version of client - request_version: Option, - /// Path we requested - request_path: Option, - /// Incoming reply - reply: Option, - /// Size of HTTP chunks to write - chunk_size: usize, - /// Maximum size of call arguments - pub maximum_call_argument_size: u32, -} - -impl StacksHttp { - pub fn new(peer_addr: SocketAddr) -> StacksHttp { - StacksHttp { - peer_addr, - reply: None, - request_version: None, - request_path: None, - chunk_size: 8192, - maximum_call_argument_size: 20 * BOUND_VALUE_SERIALIZATION_HEX, - } - } - - pub fn set_chunk_size(&mut self, size: usize) -> () { - self.chunk_size = size; - } - - pub fn num_pending(&self) -> usize { - if self.reply.is_some() { - 1 - } else { - 0 - } - } - - pub fn has_pending_reply(&self) -> bool { - self.reply.is_some() - } - - pub fn set_pending(&mut self, preamble: &HttpResponsePreamble) -> bool { - if self.reply.is_some() { - // already pending - return false; - } - self.reply = Some(HttpReplyData { - request_id: preamble.request_id, - stream: HttpRecvStream::new(MAX_MESSAGE_LEN as u64), - }); - true - } - - pub fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), net_error> { - // if we already have a pending message, then this preamble cannot be processed (indicates an un-compliant client) - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // request path must have been set - if self.request_path.is_none() { - return Err(net_error::DeserializeError( - "Possible bug: did not set the request path".to_string(), - )); - } - - if http_response_preamble.is_chunked() { - // will stream this. Make sure we're not doing so already (no collisions - // allowed on in-flight request IDs!) - if self.has_pending_reply() { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - - // mark as pending -- we can stream this - if !self.set_pending(http_response_preamble) { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - } - } - _ => {} - } - Ok(()) - } - - pub fn begin_request(&mut self, client_version: HttpVersion, request_path: String) -> () { - self.request_version = Some(client_version); - self.request_path = Some(request_path); - } - - pub fn reset(&mut self) -> () { - self.request_version = None; - self.request_path = None; - self.reply = None; - } - - /// Used for processing chunk-encoded streams. - /// Given the preamble and a Read, stream the bytes into a chunk-decoder. Return the decoded - /// bytes if we decode an entire stream. Always return the number of bytes consumed. - /// Returns Ok((Some(request path, decoded bytes we got, total number of encoded bytes), number of bytes gotten in this call)) - pub fn consume_data( - &mut self, - preamble: &HttpResponsePreamble, - fd: &mut R, - ) -> Result<(Option<(HttpVersion, String, Vec, usize)>, usize), net_error> { - assert!(preamble.is_chunked()); - assert!(self.reply.is_some()); - assert!(self.request_path.is_some()); - assert!(self.request_version.is_some()); - - let mut finished = false; - let res = match self.reply { - Some(ref mut reply) => { - assert_eq!(reply.request_id, preamble.request_id); - match reply.stream.consume_data(fd) { - Ok(res) => { - match res { - (None, sz) => Ok((None, sz)), - (Some((byte_vec, bytes_total)), sz) => { - // done receiving - finished = true; - Ok(( - Some(( - self.request_version.clone().unwrap(), - self.request_path.clone().unwrap(), - byte_vec, - bytes_total, - )), - sz, - )) - } - } - } - Err(e) => { - // broken stream - finished = true; - Err(e) - } - } - } - None => { - unreachable!(); - } - }; - - if finished { - // if we fetch the whole message, or encounter an error, then we're done -- we can free - // up this stream. - self.reset(); - } - res - } - - /// Given a HTTP request, serialize it out - #[cfg(test)] - pub fn serialize_request(req: &HttpRequestType) -> Result, net_error> { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let mut ret = vec![]; - req.send(&mut http, &mut ret)?; - Ok(ret) - } - - /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(test)] - pub fn parse_response( - request_path: &str, - response_buf: &[u8], - ) -> Result { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.reset(); - http.begin_request(HttpVersion::Http11, request_path.to_string()); - - let (preamble, message_offset) = http.read_preamble(response_buf)?; - let is_chunked = match preamble { - StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), - _ => { - return Err(net_error::DeserializeError( - "Invalid HTTP message: did not get a Response preamble".to_string(), - )); - } - }; - - let mut message_bytes = &response_buf[message_offset..]; - - if is_chunked { - match http.stream_payload(&preamble, &mut message_bytes) { - Ok((Some((message, _)), _)) => Ok(message), - Ok((None, _)) => Err(net_error::UnderflowError( - "Not enough bytes to form a streamed HTTP response".to_string(), - )), - Err(e) => Err(e), - } - } else { - let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; - Ok(message) - } - } -} - -impl ProtocolFamily for StacksHttp { - type Preamble = StacksHttpPreamble; - type Message = StacksHttpMessage; - - /// how big can a preamble get? - fn preamble_size_hint(&mut self) -> usize { - HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize - } - - /// how big is this message? Might not know if we're dealing with chunked encoding. - fn payload_len(&mut self, preamble: &StacksHttpPreamble) -> Option { - match *preamble { - StacksHttpPreamble::Request(ref http_request_preamble) => { - Some(http_request_preamble.get_content_length() as usize) - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - match http_response_preamble.content_length { - Some(len) => Some(len as usize), - None => None, - } - } - } - } - - /// StacksHttpMessage deals with HttpRequestPreambles and HttpResponsePreambles - fn read_preamble(&mut self, buf: &[u8]) -> Result<(StacksHttpPreamble, usize), net_error> { - let mut cursor = io::Cursor::new(buf); - - let preamble = { - let mut rd = BoundReader::from_reader(&mut cursor, 4096); - let preamble: StacksHttpPreamble = read_next(&mut rd)?; - preamble - }; - - let preamble_len = cursor.position() as usize; - - self.set_preamble(&preamble)?; - - Ok((preamble, preamble_len)) - } - - /// Stream a payload of unknown length. Only gets called if payload_len() returns None. - /// Returns the message if we get enough data to form one. - /// Always returns the number of bytes consumed. - fn stream_payload( - &mut self, - preamble: &StacksHttpPreamble, - fd: &mut R, - ) -> Result<(Option<(StacksHttpMessage, usize)>, usize), net_error> { - assert!(self.payload_len(preamble).is_none()); - match preamble { - StacksHttpPreamble::Request(_) => { - // HTTP requests can't be chunk-encoded, so this should never be reached - unreachable!() - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(http_response_preamble.is_chunked()); - assert!(self.request_path.is_some()); - - // message of unknown length. Buffer up and maybe we can parse it. - let (message_bytes_opt, num_read) = - self.consume_data(http_response_preamble, fd).map_err(|e| { - self.reset(); - e - })?; - - match message_bytes_opt { - Some((request_version, request_path, message_bytes, total_bytes_consumed)) => { - // can parse! - test_debug!( - "read http response payload of {} bytes (just buffered {}) for {}", - message_bytes.len(), - num_read, - &request_path - ); - - // we now know the content-length, so pass it into the parser. - let len_hint = message_bytes.len(); - let parse_res = HttpResponseType::parse( - self, - request_version, - http_response_preamble, - request_path, - &mut &message_bytes[..], - Some(len_hint), - ); - - // done parsing - self.reset(); - match parse_res { - Ok(data_response) => Ok(( - Some(( - StacksHttpMessage::Response(data_response), - total_bytes_consumed, - )), - num_read, - )), - Err(e) => { - info!("Failed to parse HTTP response: {:?}", &e); - Err(e) - } - } - } - None => { - // need more data - trace!( - "did not read http response payload, but buffered {}", - num_read - ); - Ok((None, num_read)) - } - } - } - } - } - - /// Parse a payload of known length. - /// Only gets called if payload_len() returns Some(...) - fn read_payload( - &mut self, - preamble: &StacksHttpPreamble, - buf: &[u8], - ) -> Result<(StacksHttpMessage, usize), net_error> { - match preamble { - StacksHttpPreamble::Request(ref http_request_preamble) => { - // all requests have a known length - let len = http_request_preamble.get_content_length() as usize; - assert!(len <= buf.len(), "{} > {}", len, buf.len()); - - trace!("read http request payload of {} bytes", len); - - let mut cursor = io::Cursor::new(buf); - match HttpRequestType::parse(self, http_request_preamble, &mut cursor) { - Ok(data_request) => Ok(( - StacksHttpMessage::Request(data_request), - cursor.position() as usize, - )), - Err(e) => { - info!("Failed to parse HTTP request: {:?}", &e); - if let net_error::ClientError(client_err) = e { - let req = HttpRequestType::ClientError( - HttpRequestMetadata::from_preamble(http_request_preamble), - client_err, - ); - // consume any remaining HTTP request content by returning bytes read = len - Ok((StacksHttpMessage::Request(req), len)) - } else { - Err(e) - } - } - } - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(!http_response_preamble.is_chunked()); - assert!(self.request_path.is_some()); - assert!(self.request_version.is_some()); - - let request_path = self.request_path.take().unwrap(); - let request_version = self.request_version.take().unwrap(); - - // message of known length - test_debug!( - "read http response payload of {} bytes for {}", - buf.len(), - &request_path - ); - - let mut cursor = io::Cursor::new(buf); - match HttpResponseType::parse( - self, - request_version, - http_response_preamble, - request_path, - &mut cursor, - None, - ) { - Ok(data_response) => Ok(( - StacksHttpMessage::Response(data_response), - cursor.position() as usize, - )), - Err(e) => Err(e), - } - } - } - } - - fn verify_payload_bytes( - &mut self, - _key: &StacksPublicKey, - _preamble: &StacksHttpPreamble, - _bytes: &[u8], - ) -> Result<(), net_error> { - // not defined for HTTP messages, but maybe we could add a signature header at some point - // in the future if needed. - Ok(()) - } - - fn write_message( - &mut self, - fd: &mut W, - message: &StacksHttpMessage, - ) -> Result<(), net_error> { - match *message { - StacksHttpMessage::Request(ref req) => { - if self.request_path.is_some() { - test_debug!("Have pending request already"); - return Err(net_error::InProgress); - } - req.send(self, fd)?; - - self.reset(); - self.begin_request(req.metadata().version, req.request_path()); - Ok(()) - } - StacksHttpMessage::Response(ref resp) => resp.send(self, fd), - } - } -} - -#[cfg(test)] -mod test { - use std::error::Error; - - use rand; - use rand::RngCore; - - use crate::burnchains::Txid; - use crate::chainstate::stacks::db::blocks::test::make_sample_microblock_stream; - use crate::chainstate::stacks::test::make_codec_test_block; - use crate::chainstate::stacks::StacksBlock; - use crate::chainstate::stacks::StacksMicroblock; - use crate::chainstate::stacks::StacksPrivateKey; - use crate::chainstate::stacks::StacksTransaction; - use crate::chainstate::stacks::TokenTransferMemo; - use crate::chainstate::stacks::TransactionAuth; - use crate::chainstate::stacks::TransactionPayload; - use crate::chainstate::stacks::TransactionPostConditionMode; - use crate::chainstate::stacks::TransactionVersion; - use crate::net::codec::test::check_codec_and_corruption; - use crate::net::test::*; - use crate::net::RPCNeighbor; - use crate::net::RPCNeighborsInfo; - use stacks_common::util::hash::to_hex; - use stacks_common::util::hash::Hash160; - use stacks_common::util::hash::MerkleTree; - use stacks_common::util::hash::Sha512Trunc256Sum; - - use stacks_common::types::chainstate::StacksAddress; - - use super::*; - - #[test] - fn test_parse_reserved_header() { - let tests = vec![ - ( - "Content-Length", - "123", - Some(HttpReservedHeader::ContentLength(123)), - ), - ( - "Content-Type", - "text/plain", - Some(HttpReservedHeader::ContentType(HttpContentType::Text)), - ), - ( - "Content-Type", - "application/octet-stream", - Some(HttpReservedHeader::ContentType(HttpContentType::Bytes)), - ), - ( - "Content-Type", - "application/json", - Some(HttpReservedHeader::ContentType(HttpContentType::JSON)), - ), - ( - "X-Request-Id", - "123", - Some(HttpReservedHeader::XRequestID(123)), - ), - ( - "Host", - "foo:123", - Some(HttpReservedHeader::Host(PeerHost::DNS( - "foo".to_string(), - 123, - ))), - ), - ( - "Host", - "1.2.3.4:123", - Some(HttpReservedHeader::Host(PeerHost::IP( - PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x01, 0x02, 0x03, 0x04, - ]), - 123, - ))), - ), - // errors - ("Content-Length", "-1", None), - ("Content-Length", "asdf", None), - ("Content-Length", "4294967296", None), - ("Content-Type", "blargh", None), - ("X-Request-Id", "-1", None), - ("X-Request-Id", "asdf", None), - ("X-Request-Id", "4294967296", None), - ("Unrecognized", "header", None), - ]; - - for (key, value, expected_result) in tests { - let result = HttpReservedHeader::try_from_str(key, value); - assert_eq!(result, expected_result); - } - } - - #[test] - fn test_parse_http_request_preamble_ok() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, true, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("GET /foo HTTP/1.1\r\nConnection: close\r\nHost: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, false, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, false, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\nConnection: close\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, false, vec!["foo".to_string()], vec!["Bar".to_string()])) - ]; - - for (data, request) in tests.iter() { - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(req.is_ok(), "{:?}", &req); - assert_eq!(req.unwrap(), *request); - - let sreq = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sreq.is_ok(), "{:?}", &sreq); - assert_eq!( - sreq.unwrap(), - StacksHttpPreamble::Request((*request).clone()) - ); - } - } - - #[test] - fn test_parse_http_request_options() { - let data = "OPTIONS /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n"; - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - let preamble = HttpRequestPreamble::from_headers( - HttpVersion::Http11, - "OPTIONS".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - true, - vec![], - vec![], - ); - assert_eq!(req.unwrap(), preamble); - } - - #[test] - fn test_parse_http_request_preamble_case_ok() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\nhOsT: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, true, vec![], vec![])), - ("GET /foo HTTP/1.1\r\ncOnNeCtIoN: cLoSe\r\nhOsT: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, false, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nhOsT: core.blockstack.org\r\nCOnNeCtIoN: kEeP-aLiVE\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ]; - - for (data, request) in tests.iter() { - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(req.is_ok(), "{:?}", &req); - assert_eq!(req.unwrap(), *request); - - let sreq = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sreq.is_ok(), "{:?}", &sreq); - assert_eq!( - sreq.unwrap(), - StacksHttpPreamble::Request((*request).clone()) - ); - } - } - - #[test] - fn test_parse_http_request_preamble_err() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\n", "failed to fill whole buffer"), - ("GET /foo HTTP/1.1\r\n\r\n", "Missing Host header"), - ( - "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", - "Missing Host header", - ), - ("GET /foo HTTP/\r\n\r\n", "Failed to parse HTTP request"), - ("GET /foo HTTP/1.1\r\nHost:", "failed to fill whole buffer"), - ( - "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", - "duplicate header", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", - "header value is not ASCII-US", - ), - ( - "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", - "Missing Host header", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", - "invalid Connection: header", - ), - ]; - - for (data, errstr) in tests.iter() { - let res = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}'", errstr); - assert!(res.is_err(), "{:?}", &res); - assert!( - res.as_ref().unwrap_err().to_string().find(errstr).is_some(), - "{:?}", - &res - ); - } - } - - #[test] - fn test_parse_stacks_http_preamble_request_err() { - let tests = vec![ - ( - "GET /foo HTTP/1.1\r\n", - "Not enough bytes to form a HTTP request or response", - ), - ( - "GET /foo HTTP/1.1\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost:", - "Not enough bytes to form a HTTP request or response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ]; - - for (data, errstr) in tests.iter() { - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}'", errstr); - assert!(sres.is_err(), "{:?}", &sres); - assert!( - sres.as_ref() - .unwrap_err() - .to_string() - .find(errstr) - .is_some(), - "{:?}", - &sres - ); - } - } - - #[test] - fn test_http_request_preamble_headers() { - let mut req = HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - true, - ); - let req_11 = HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - false, - ); - let req_10 = HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - false, - ); - - req.add_header("foo".to_string(), "bar".to_string()); - - assert_eq!(req.content_type, None); - req.set_content_type(HttpContentType::JSON); - assert_eq!(req.content_type, Some(HttpContentType::JSON)); - - req.add_header( - "content-type".to_string(), - "application/octet-stream".to_string(), - ); - assert_eq!(req.content_type, Some(HttpContentType::Bytes)); - - let mut bytes = vec![]; - req.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - - test_debug!("headers:\n{}", txt); - - assert!(txt.find("HTTP/1.1").is_some(), "HTTP version is missing"); - assert!( - txt.find("User-Agent: stacks/2.0\r\n").is_some(), - "User-Agnet header is missing" - ); - assert!( - txt.find("Host: localhost:6270\r\n").is_some(), - "Host header is missing" - ); - assert!(txt.find("foo: bar\r\n").is_some(), "foo header is missing"); - assert!( - txt.find("Content-Type: application/octet-stream\r\n") - .is_some(), - "content-type is missing" - ); - assert!(txt.find("Connection: ").is_none()); // not sent if keep_alive is true (for HTTP/1.1) - - let mut bytes_10 = vec![]; - req_10.consensus_serialize(&mut bytes_10).unwrap(); - let txt_10 = String::from_utf8(bytes_10).unwrap(); - - assert!(txt_10.find("HTTP/1.0").is_some(), "HTTP version is missing"); - - let mut bytes_11 = vec![]; - req_11.consensus_serialize(&mut bytes_11).unwrap(); - let txt_11 = String::from_utf8(bytes_11).unwrap(); - - assert!(txt_11.find("HTTP/1.1").is_some(), "HTTP version is wrong"); - assert!( - txt_11.find("Connection: close").is_some(), - "Explicit Connection: close is missing" - ); - } - - #[test] - fn test_parse_http_response_preamble_ok() { - let tests = vec![ - ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), true, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), true, Some(456), HttpContentType::JSON, 0, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nX-Request-Id: 123\r\nFoo: Bar\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), true, Some(456), HttpContentType::JSON, 123, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 200 Ok\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nConnection: close\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), false, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nConnection: close\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), false, Some(456), HttpContentType::JSON, 0, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Length: 456\r\nX-Request-Id: 123\r\nFoo: Bar\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), false, Some(456), HttpContentType::JSON, 123, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 200 Ok\r\nConnection: close\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), false, None, HttpContentType::Bytes, 0, vec![], vec![])), - ]; - - for (data, response) in tests.iter() { - test_debug!("Try parsing:\n{}\n", data); - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(res.is_ok(), "{:?}", &res); - assert_eq!(res.unwrap(), *response); - - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sres.is_ok(), "{:?}", &sres); - assert_eq!( - sres.unwrap(), - StacksHttpPreamble::Response((*response).clone()) - ); - } - } - - #[test] - fn test_parse_http_response_case_ok() { - let tests = vec![ - ("HTTP/1.1 200 OK\r\ncOnTeNt-TyPe: aPpLiCaTiOn/oCtEt-StReAm\r\ncOnTeNt-LeNgTh: 123\r\nx-ReQuEsT-iD: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), true, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnTeNt-tYpE: aPpLiCaTiOn/OcTeT-sTrEaM\r\ntRaNsFeR-eNcOdInG: cHuNkEd\r\nX-rEqUeSt-Id: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnNeCtIoN: cLoSe\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), false, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnNeCtIoN: kEeP-AlIvE\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ]; - - for (data, response) in tests.iter() { - test_debug!("Try parsing:\n{}\n", data); - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(res.is_ok(), "{:?}", &res); - assert_eq!(res.unwrap(), *response); - - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sres.is_ok(), "{:?}", &sres); - assert_eq!( - sres.unwrap(), - StacksHttpPreamble::Response((*response).clone()) - ); - } - } - - #[test] - fn test_http_response_preamble_headers() { - let mut res = HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(123), - HttpContentType::JSON, - true, - 123, - ); - assert_eq!(res.request_id, 123); - - res.set_request_id(456); - assert_eq!(res.request_id, 456); - - res.add_header("foo".to_string(), "bar".to_string()); - res.add_CORS_headers(); - - let mut bytes = vec![]; - res.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - assert!( - txt.find("Server: stacks/2.0\r\n").is_some(), - "Server header is missing" - ); - assert!( - txt.find("Content-Length: 123\r\n").is_some(), - "Content-Length is missing" - ); - assert!( - txt.find("Content-Type: application/json\r\n").is_some(), - "Content-Type is missing" - ); - assert!(txt.find("Date: ").is_some(), "Date header is missing"); - assert!(txt.find("foo: bar\r\n").is_some(), "foo header is missing"); - assert!( - txt.find("X-Request-Id: 456\r\n").is_some(), - "X-Request-Id is missing" - ); - assert!( - txt.find("Access-Control-Allow-Origin: *\r\n").is_some(), - "CORS header is missing" - ); - assert!( - txt.find("Access-Control-Allow-Headers: origin, content-type\r\n") - .is_some(), - "CORS header is missing" - ); - assert!( - txt.find("Access-Control-Allow-Methods: POST, GET, OPTIONS\r\n") - .is_some(), - "CORS header is missing" - ); - assert!(txt.find("Connection: ").is_none()); // not sent if keep_alive is true - } - - #[test] - fn test_parse_http_response_preamble_err() { - let tests = vec![ - ("HTTP/1.1 200", - "failed to fill whole buffer"), - ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", - "header value is not ASCII-US"), - ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", - "duplicate header"), - ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", - "Unsupported HTTP content type"), - ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", - "Invalid Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "missing Content-Type, Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", - "missing Content-Type, Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", - "incompatible transfer-encoding and content-length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", - "invalid Connection: header"), - ]; - - for (data, errstr) in tests.iter() { - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}', got: {:?}", errstr, &res); - assert!(res.is_err(), "{:?}", &res); - assert!(res.unwrap_err().to_string().find(errstr).is_some()); - } - } - - #[test] - fn test_parse_stacks_http_preamble_response_err() { - let tests = vec![ - ("HTTP/1.1 200", - "Not enough bytes to form a HTTP request or response"), - ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ]; - - for (data, errstr) in tests.iter() { - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}', got: {:?}", errstr, &sres); - assert!(sres.is_err(), "{:?}", &sres); - assert!( - sres.as_ref() - .unwrap_err() - .to_string() - .find(errstr) - .is_some(), - "{:?}", - &sres - ); - } - } - - fn make_test_transaction() -> StacksTransaction { - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); - let addr = auth.origin().address_testnet(); - let recv_addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - - let mut tx_stx_transfer = StacksTransaction::new( - TransactionVersion::Testnet, - auth.clone(), - TransactionPayload::TokenTransfer( - recv_addr.clone().into(), - 123, - TokenTransferMemo([0u8; 34]), - ), - ); - tx_stx_transfer.chain_id = 0x80000000; - tx_stx_transfer.post_condition_mode = TransactionPostConditionMode::Allow; - tx_stx_transfer.set_tx_fee(0); - tx_stx_transfer - } - - #[test] - fn test_http_parse_host_header_value() { - let hosts = vec![ - "1.2.3.4", - "1.2.3.4:5678", - "[1:203:405:607:809:a0b:c0d:e0f]", - "[1:203:405:607:809:a0b:c0d:e0f]:12345", - "www.foo.com", - "www.foo.com:12345", - // invalid IP addresses will be parsed to DNS names - "1.2.3.4.5", - "[1:203:405:607:809:a0b:c0d:e0f:1011]", - // these won't parse at all, since the port is invalid - "1.2.3.4:1234567", - "1.2.3.4.5:1234567", - "[1:203:405:607:809:a0b:c0d:e0f]:1234567", - "[1:203:405:607:809:a0b:c0d:e0f:1011]:1234567", - "www.foo.com:1234567", - ":", - ":123", - ]; - - let peerhosts = vec![ - Some(PeerHost::IP( - PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 1, 2, 3, 4]), - 80, - )), - Some(PeerHost::IP( - PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 1, 2, 3, 4]), - 5678, - )), - Some(PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 80, - )), - Some(PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 12345, - )), - Some(PeerHost::DNS("www.foo.com".to_string(), 80)), - Some(PeerHost::DNS("www.foo.com".to_string(), 12345)), - Some(PeerHost::DNS("1.2.3.4.5".to_string(), 80)), - Some(PeerHost::DNS( - "[1:203:405:607:809:a0b:c0d:e0f:1011]".to_string(), - 80, - )), - None, - None, - None, - None, - None, - None, - None, - ]; - - for (host, expected_host) in hosts.iter().zip(peerhosts.iter()) { - let peerhost = match host.parse::() { - Ok(ph) => Some(ph), - Err(_) => None, - }; - - match (peerhost, expected_host) { - (Some(ref ph), Some(ref expected_ph)) => assert_eq!(*ph, *expected_ph), - (None, None) => {} - (Some(ph), None) => { - eprintln!( - "Parsed {} successfully to {:?}, but expected error", - host, ph - ); - assert!(false); - } - (None, Some(expected_ph)) => { - eprintln!("Failed to parse {} successfully", host); - assert!(false); - } - } - } - } - - #[test] - fn test_http_request_type_codec() { - let http_request_metadata_ip = HttpRequestMetadata { - version: HttpVersion::Http11, - peer: PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 12345, - ), - keep_alive: true, - canonical_stacks_tip_height: None, - }; - let http_request_metadata_dns = HttpRequestMetadata { - version: HttpVersion::Http11, - peer: PeerHost::DNS("www.foo.com".to_string(), 80), - keep_alive: true, - canonical_stacks_tip_height: None, - }; - - let tests = vec![ - HttpRequestType::GetNeighbors(http_request_metadata_ip.clone()), - HttpRequestType::GetBlock(http_request_metadata_dns.clone(), StacksBlockId([2u8; 32])), - HttpRequestType::GetMicroblocksIndexed( - http_request_metadata_ip.clone(), - StacksBlockId([3u8; 32]), - ), - HttpRequestType::PostTransaction( - http_request_metadata_dns.clone(), - make_test_transaction(), - None, - ), - HttpRequestType::OptionsPreflight(http_request_metadata_ip.clone(), "/".to_string()), - ]; - - let mut tx_body = vec![]; - make_test_transaction() - .consensus_serialize(&mut tx_body) - .unwrap(); - - let mut post_transaction_preamble = HttpRequestPreamble::new( - HttpVersion::Http11, - "POST".to_string(), - "/v2/transactions".to_string(), - http_request_metadata_dns.peer.hostname(), - http_request_metadata_dns.peer.port(), - http_request_metadata_dns.keep_alive, - ); - post_transaction_preamble.set_content_type(HttpContentType::Bytes); - post_transaction_preamble.set_content_length(tx_body.len() as u32); - - // all of these should parse - let expected_http_preambles = vec![ - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/neighbors".to_string(), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - format!("/v2/blocks/{}", StacksBlockId([2u8; 32]).to_hex()), - http_request_metadata_dns.peer.hostname(), - http_request_metadata_dns.peer.port(), - http_request_metadata_dns.keep_alive, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - format!("/v2/microblocks/{}", StacksBlockId([3u8; 32]).to_hex()), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - post_transaction_preamble, - HttpRequestPreamble::new( - HttpVersion::Http11, - "OPTIONS".to_string(), - format!("/"), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - ]; - - let expected_http_bodies = vec![vec![], vec![], vec![], tx_body]; - - for (test, (expected_http_preamble, expected_http_body)) in tests.iter().zip( - expected_http_preambles - .iter() - .zip(expected_http_bodies.iter()), - ) { - let mut expected_bytes = vec![]; - expected_http_preamble - .consensus_serialize(&mut expected_bytes) - .unwrap(); - - test_debug!( - "Expected preamble:\n{}", - str::from_utf8(&expected_bytes).unwrap() - ); - - if expected_http_preamble.content_type.is_none() - || expected_http_preamble.content_type != Some(HttpContentType::Bytes) - { - test_debug!( - "Expected http body:\n{}", - str::from_utf8(&expected_http_body).unwrap() - ); - } else { - test_debug!("Expected http body (hex):\n{}", to_hex(&expected_http_body)); - } - - expected_bytes.append(&mut expected_http_body.clone()); - - let mut bytes = vec![]; - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.write_message(&mut bytes, &StacksHttpMessage::Request(test.clone())) - .unwrap(); - - assert_eq!(bytes, expected_bytes); - } - } - - #[test] - fn test_http_request_type_codec_err() { - let bad_content_lengths = vec![ - "GET /v2/neighbors HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "GET /v2/blocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "GET /v2/microblocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 0\r\n\r\n", - ]; - for bad_content_length in bad_content_lengths { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let (preamble, offset) = http.read_preamble(bad_content_length.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &bad_content_length.as_bytes()[offset..]); - - assert!(e.is_err(), "{:?}", &e); - assert!( - e.as_ref() - .unwrap_err() - .to_string() - .find("-length body for") - .is_some(), - "{:?}", - &e - ); - } - - let bad_content_types = vec![ - "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - ]; - for bad_content_type in bad_content_types { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let (preamble, offset) = http.read_preamble(bad_content_type.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &bad_content_type.as_bytes()[offset..]); - assert!(e.is_err()); - assert!(e.unwrap_err().to_string().find("Content-Type").is_some()); - } - } - - #[test] - fn test_http_response_type_codec() { - let test_neighbors_info = RPCNeighborsInfo { - bootstrap: vec![], - sample: vec![ - RPCNeighbor { - network_id: 1, - peer_version: 2, - addrbytes: PeerAddress([ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, - ]), - port: 12345, - public_key_hash: Hash160::from_bytes( - &hex_bytes("1111111111111111111111111111111111111111").unwrap(), - ) - .unwrap(), - authenticated: true, - stackerdbs: Some(vec![]), - }, - RPCNeighbor { - network_id: 3, - peer_version: 4, - addrbytes: PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x01, 0x02, 0x03, 0x04, - ]), - port: 23456, - public_key_hash: Hash160::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222").unwrap(), - ) - .unwrap(), - authenticated: false, - stackerdbs: Some(vec![]), - }, - ], - inbound: vec![], - outbound: vec![], - }; - - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let test_block_info = make_codec_test_block(5); - let test_microblock_info = - make_sample_microblock_stream(&privk, &test_block_info.block_hash()); - - let mut test_block_info_bytes = vec![]; - test_block_info - .consensus_serialize(&mut test_block_info_bytes) - .unwrap(); - - let mut test_microblock_info_bytes = vec![]; - test_microblock_info - .consensus_serialize(&mut test_microblock_info_bytes) - .unwrap(); - - let tests = vec![ - // length is known - ( - HttpResponseType::Neighbors( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), - true, - None, - ), - test_neighbors_info.clone(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Block( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(test_block_info_bytes.len() as u32), - true, - None, - ), - test_block_info.clone(), - ), - format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), - ), - ( - HttpResponseType::Microblocks( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(test_microblock_info_bytes.len() as u32), - true, - None, - ), - test_microblock_info.clone(), - ), - format!( - "/v2/microblocks/{}", - test_microblock_info[0].block_hash().to_hex() - ), - ), - ( - HttpResponseType::TransactionID( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some((Txid([0x1; 32]).to_hex().len() + 2) as u32), - true, - None, - ), - Txid([0x1; 32]), - ), - "/v2/transactions".to_string(), - ), - // length is unknown - ( - HttpResponseType::Neighbors( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_neighbors_info.clone(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Block( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_block_info.clone(), - ), - format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), - ), - ( - HttpResponseType::Microblocks( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_microblock_info.clone(), - ), - format!( - "/v2/microblocks/{}", - test_microblock_info[0].block_hash().to_hex() - ), - ), - ( - HttpResponseType::TransactionID( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - Txid([0x1; 32]), - ), - "/v2/transactions".to_string(), - ), - // errors without error messages - ( - HttpResponseType::BadRequest( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Unauthorized( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::PaymentRequired( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Forbidden( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::NotFound( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServerError( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServiceUnavailable( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Error( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - 502, - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - // errors with specific messages - ( - HttpResponseType::BadRequest( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Unauthorized( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::PaymentRequired( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Forbidden( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::NotFound( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServerError( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServiceUnavailable( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Error( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - 502, - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ]; - - let expected_http_preambles = vec![ - // length is known - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), - HttpContentType::JSON, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(test_block_info_bytes.len() as u32), - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(test_microblock_info_bytes.len() as u32), - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some((Txid([0x1; 32]).to_hex().len() + 2) as u32), - HttpContentType::JSON, - true, - 123, - ), - // length is unknown - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::JSON, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::JSON, - true, - 123, - ), - // errors - HttpResponsePreamble::new_error(400, 123, None), - HttpResponsePreamble::new_error(401, 123, None), - HttpResponsePreamble::new_error(402, 123, None), - HttpResponsePreamble::new_error(403, 123, None), - HttpResponsePreamble::new_error(404, 123, None), - HttpResponsePreamble::new_error(500, 123, None), - HttpResponsePreamble::new_error(503, 123, None), - // generic error - HttpResponsePreamble::new_error(502, 123, None), - // errors with messages - HttpResponsePreamble::new_error(400, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(401, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(402, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(403, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(404, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(500, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(503, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(502, 123, Some("foo".to_string())), - ]; - - let expected_http_bodies = vec![ - // with content-length - serde_json::to_string(&test_neighbors_info) - .unwrap() - .as_bytes() - .to_vec(), - test_block_info_bytes.clone(), - test_microblock_info_bytes.clone(), - Txid([0x1; 32]).to_hex().as_bytes().to_vec(), - // with transfer-encoding: chunked - serde_json::to_string(&test_neighbors_info) - .unwrap() - .as_bytes() - .to_vec(), - test_block_info_bytes, - test_microblock_info_bytes, - Txid([0x1; 32]).to_hex().as_bytes().to_vec(), - // errors - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - // errors with messages - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - ]; - - for ((test, request_path), (expected_http_preamble, _expected_http_body)) in - tests.iter().zip( - expected_http_preambles - .iter() - .zip(expected_http_bodies.iter()), - ) - { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let mut bytes = vec![]; - test_debug!("write body:\n{:?}\n", test); - - http.begin_request(HttpVersion::Http11, request_path.to_string()); - http.write_message(&mut bytes, &StacksHttpMessage::Response((*test).clone())) - .unwrap(); - - let (mut preamble, offset) = match http.read_preamble(&bytes) { - Ok((p, o)) => (p, o), - Err(e) => { - test_debug!("first 4096 bytes:\n{:?}\n", &bytes[0..].to_vec()); - test_debug!("error: {:?}", &e); - assert!(false); - unreachable!(); - } - }; - - test_debug!("read preamble of {} bytes\n{:?}\n", offset, preamble); - - test_debug!("read http body\n{:?}\n", &bytes[offset..].to_vec()); - - let (message, _total_len) = if expected_http_preamble.is_chunked() { - let (msg_opt, len) = http - .stream_payload(&preamble, &mut &bytes[offset..]) - .unwrap(); - (msg_opt.unwrap().0, len) - } else { - http.read_payload(&preamble, &bytes[offset..]).unwrap() - }; - - test_debug!("got message\n{:?}\n", &message); - - // check everything in the parsed preamble except for the extra headers - match preamble { - StacksHttpPreamble::Response(ref mut req) => { - assert_eq!(req.headers.len(), 5); - assert!(req.headers.get("access-control-allow-headers").is_some()); - assert!(req.headers.get("access-control-allow-methods").is_some()); - assert!(req.headers.get("access-control-allow-origin").is_some()); - assert!(req.headers.get("server").is_some()); - assert!(req.headers.get("date").is_some()); - req.headers.clear(); - } - StacksHttpPreamble::Request(_) => { - panic!("parsed a request"); - } - } - - assert_eq!( - preamble, - StacksHttpPreamble::Response((*expected_http_preamble).clone()) - ); - assert_eq!(message, StacksHttpMessage::Response((*test).clone())); - assert_eq!(http.num_pending(), 0); - } - } - - #[test] - fn test_http_response_type_codec_err() { - let request_paths = vec![ - "/v2/blocks/1111111111111111111111111111111111111111111111111111111111111111", - "/v2/transactions", - "/v2/neighbors", - "/v2/neighbors", - "/v2/neighbors", - ]; - let bad_request_payloads = vec![ - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 2\r\n\r\nab", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 4\r\n\r\n\"ab\"", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\n{", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\na", - "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/octet-stream\r\nContent-length: 2\r\n\r\n{}", - ]; - let expected_bad_request_payload_errors = vec![ - "Invalid content-type", - "Invalid txid:", - "Not enough bytes", - "Failed to parse", - "expected text/plain", - ]; - for (test, (expected_error, request_path)) in bad_request_payloads.iter().zip( - expected_bad_request_payload_errors - .iter() - .zip(request_paths), - ) { - test_debug!( - "Expect failure:\n{}\nExpected error: '{}'", - test, - expected_error - ); - - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.begin_request(HttpVersion::Http11, request_path.to_string()); - - let (preamble, offset) = http.read_preamble(test.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &test.as_bytes()[offset..]); - let errstr = format!("{:?}", &e); - assert!(e.is_err()); - assert!( - e.unwrap_err().to_string().find(expected_error).is_some(), - "{}", - errstr - ); - } - } - - #[test] - fn test_http_headers_too_big() { - let bad_header_value = std::iter::repeat("A") - .take(HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize) - .collect::(); - let bad_request_preamble = format!( - "GET /v2/neighbors HTTP/1.1\r\nHost: localhost:1234\r\nBad-Header: {}\r\n\r\n", - &bad_header_value - ); - let bad_response_preamble = format!("HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-ID: 123\r\nContent-Type: text/plain\r\nContent-Length: 64\r\nBad-Header: {}\r\n\r\n", &bad_header_value); - - let request_err = - HttpRequestPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let response_err = - HttpResponsePreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - let protocol_request_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let protocol_response_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - eprintln!("request_err: {:?}", &request_err); - eprintln!("response_err: {:?}", &response_err); - - eprintln!("protocol_request_err: {:?}", &protocol_request_err); - eprintln!("protocol_response_err: {:?}", &protocol_response_err); - - assert!(request_err - .to_string() - .find("Not enough bytes to form a HTTP request preamble") - .is_some()); - assert!(response_err - .to_string() - .find("Not enough bytes to form a HTTP response preamble") - .is_some()); - assert!(protocol_request_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - assert!(protocol_response_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - } - - #[test] - fn test_http_headers_too_many() { - let mut too_many_headers_list = vec![]; - for i in 0..HTTP_PREAMBLE_MAX_NUM_HEADERS { - too_many_headers_list.push(format!("H{}: {}\r\n", i + 1, i + 1)); - } - let too_many_headers = too_many_headers_list.join(""); - let bad_request_preamble = format!( - "GET /v2/neighbors HTTP/1.1\r\nHost: localhost:1234\r\n{}\r\n", - &too_many_headers - ); - let bad_response_preamble = format!("HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-ID: 123\r\nContent-Type: text/plain\r\nContent-Length: 64\r\n{}\r\n", &too_many_headers); - - let request_err = - HttpRequestPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let response_err = - HttpResponsePreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - let protocol_request_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let protocol_response_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - eprintln!("request_err: {:?}", &request_err); - eprintln!("response_err: {:?}", &response_err); - - eprintln!("protocol_request_err: {:?}", &protocol_request_err); - eprintln!("protocol_response_err: {:?}", &protocol_response_err); - - assert!(request_err - .to_string() - .find("Failed to parse HTTP request: TooManyHeaders") - .is_some()); - assert!(response_err - .to_string() - .find("Failed to parse HTTP response: TooManyHeaders") - .is_some()); - assert!(protocol_request_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - assert!(protocol_response_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - } - - #[test] - fn test_http_duplicate_concurrent_streamed_response_fails() { - // do not permit multiple in-flight chunk-encoded HTTP responses with the same request ID. - let valid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n37\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; - let invalid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n10\r\nxxxxxxxxxxxxxxxx\r\n0\r\n\r\n"; - let invalid_chunked_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n38\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; - - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(valid_neighbors_response.as_bytes()) - .unwrap(); - assert_eq!(http.num_pending(), 1); - - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // finish reading the body - let msg = http - .stream_payload( - &preamble, - &mut &valid_neighbors_response.as_bytes()[offset..], - ) - .unwrap(); - match msg { - ( - Some(( - StacksHttpMessage::Response(HttpResponseType::Neighbors(_, neighbors_data)), - _, - )), - _, - ) => assert_eq!( - neighbors_data, - RPCNeighborsInfo { - bootstrap: vec![], - sample: vec![], - inbound: vec![], - outbound: vec![] - } - ), - _ => { - error!("Got {:?}", &msg); - assert!(false); - } - } - assert_eq!(http.num_pending(), 0); - - // can read the preamble again, but only once - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(invalid_neighbors_response.as_bytes()) - .unwrap(); - assert_eq!(http.num_pending(), 1); - - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // reading a corrupt body unlocks the ability to read the preamble again - let res = http.stream_payload( - &preamble, - &mut &invalid_neighbors_response.as_bytes()[offset..], - ); - assert!(res.unwrap_err().to_string().find("JSON").is_some()); - assert_eq!(http.num_pending(), 0); - - // can read the premable again, but only once - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(invalid_chunked_response.as_bytes()) - .unwrap(); - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // reading a corrupt chunk stream unlocks the ability to read the preamble again - let res = http.stream_payload( - &preamble, - &mut &invalid_chunked_response.as_bytes()[offset..], - ); - assert!(res - .unwrap_err() - .to_string() - .find("Invalid chunk trailer") - .is_some()); - assert_eq!(http.num_pending(), 0); - } - - #[test] - fn test_http_request_version_keep_alive() { - let requests = vec![ - HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - true, - ), - HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - false, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - true, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - false, - ), - ]; - - // (have 'connection' header?, have 'keep-alive' value?) - let requests_connection_expected = - vec![(true, true), (false, false), (false, false), (true, false)]; - - for (r, (has_connection, is_keep_alive)) in - requests.iter().zip(requests_connection_expected.iter()) - { - let mut bytes = vec![]; - r.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - - eprintln!( - "has_connection: {}, is_keep_alive: {}\n{}", - *has_connection, *is_keep_alive, &txt - ); - if *has_connection { - if *is_keep_alive { - assert!(txt.find("Connection: keep-alive\r\n").is_some()); - } else { - assert!(txt.find("Connection: close\r\n").is_some()); - } - } else { - assert!(txt.find("Connection: ").is_none()); - } - } - } - - #[test] - fn test_http_response_version_keep_alive() { - // (version, explicit keep-alive?) - let responses_args = vec![ - (HttpVersion::Http10, true), - (HttpVersion::Http10, false), - (HttpVersion::Http11, true), - (HttpVersion::Http11, false), - ]; - - let mut responses = vec![]; - for res in responses_args.iter() { - let mut bytes = vec![]; - let md = HttpResponseMetadata::new(res.0.clone(), 123, None, res.1, None); - HttpResponsePreamble::new_serialized( - &mut bytes, - 200, - "OK", - None, - &HttpContentType::JSON, - 123, - |ref mut fd| keep_alive_headers(fd, &md), - ) - .unwrap(); - responses.push(String::from_utf8(bytes).unwrap()); - } - - for (response, (version, sent_keep_alive)) in responses.iter().zip(responses_args.iter()) { - test_debug!( - "version: {:?}, sent keep-alive: {}, response:\n{}", - version, - sent_keep_alive, - response - ); - match version { - HttpVersion::Http10 => { - // be explicit about Connection: with http/1.0 clients - if *sent_keep_alive { - assert!(response.find("Connection: keep-alive\r\n").is_some()); - } else { - assert!(response.find("Connection: close\r\n").is_some()); - } - } - HttpVersion::Http11 => { - if *sent_keep_alive { - // we don't send connection: keep-alive if the client is 1.1 and it didn't - // send its own connection:

is concerned + HTTP_REQUEST_ID_RESERVED + } + + fn get_message_name(&self) -> &'static str { + "StachsHttpMessage" + } +} + +/// A partially-decoded, streamed HTTP message (response) being received. +/// Internally used by StacksHttp to keep track of chunk-decoding state. +#[derive(Debug, Clone, PartialEq)] +struct StacksHttpRecvStream { + state: HttpChunkedTransferReaderState, + data: Vec, + total_consumed: usize, // number of *encoded* bytes consumed +} + +impl StacksHttpRecvStream { + pub fn new(max_size: u64) -> StacksHttpRecvStream { + StacksHttpRecvStream { + state: HttpChunkedTransferReaderState::new(max_size), + data: vec![], + total_consumed: 0, + } + } + + /// Feed data into our chunked transfer reader state. If we finish reading a stream, return + /// the decoded bytes (as Some(Vec) and the total number of encoded bytes consumed). + /// Always returns the number of bytes consumed. + pub fn consume_data( + &mut self, + fd: &mut R, + ) -> Result<(Option<(Vec, usize)>, usize), NetError> { + let mut consumed = 0; + let mut blocked = false; + while !blocked { + let mut decoded_buf = vec![0u8; CHUNK_BUF_LEN]; + let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { + Ok((0, num_consumed)) => { + trace!( + "consume_data blocked on 0 decoded bytes ({} consumed)", + num_consumed + ); + blocked = true; + (0, num_consumed) + } + Ok((num_read, num_consumed)) => (num_read, num_consumed), + Err(e) => match e.kind() { + io::ErrorKind::WouldBlock | io::ErrorKind::TimedOut => { + trace!("consume_data blocked on read error"); + blocked = true; + (0, 0) + } + _ => { + return Err(NetError::ReadError(e)); + } + }, + }; + + consumed += consumed_pass; + if read_pass > 0 { + self.data.extend_from_slice(&decoded_buf[0..read_pass]); + } + } + + self.total_consumed += consumed; + + // did we get a message? + if self.state.is_eof() { + // reset + let message_data = mem::replace(&mut self.data, vec![]); + let total_consumed = self.total_consumed; + + self.state = HttpChunkedTransferReaderState::new(self.state.max_size); + self.total_consumed = 0; + + Ok((Some((message_data, total_consumed)), consumed)) + } else { + Ok((None, consumed)) + } + } +} + +/// Information about an in-flight request +#[derive(Debug, Clone, PartialEq)] +struct StacksHttpReplyData { + request_id: u32, + stream: StacksHttpRecvStream, +} + +/// Stacks HTTP state machine implementation, for bufferring up data. +/// One of these exists per Connection. +/// There can be at most one HTTP request in-flight (i.e. we don't do pipelining). +/// +/// This state machine gets used for both clients and servers. A client issues an HTTP request, +/// and must receive a follow-up HTTP reply (or the state machine errors out). A server receives +/// an HTTP request, and sends an HTTP reply. +#[derive(Clone)] +pub struct StacksHttp { + /// Address of peer + peer_addr: SocketAddr, + /// offset body after '\r\n\r\n' if known + body_start: Option, + /// number of preamble bytes seen so far + num_preamble_bytes: usize, + /// last 4 bytes of the preamble we've seen, just in case the \r\n\r\n straddles two calls to + /// read_preamble() + last_four_preamble_bytes: [u8; 4], + /// Incoming reply state + reply: Option, + /// Size of HTTP chunks to write + chunk_size: usize, + /// Which request handler is active. + /// This is only used if this state-machine is used by a client to issue a request and then + /// parse a reply. If instead this state-machine is used by the server to parse a request and + /// send a reply, it will be unused. + request_handler_index: Option, + /// HTTP request handlers (verb, regex, request-handler, response-handler) + request_handlers: Vec<(String, Regex, Box)>, + /// Maximum size of call arguments + pub maximum_call_argument_size: u32, + /// Maximum execution budget of a read-only call + pub read_only_call_limit: ExecutionCost, +} + +impl StacksHttp { + pub fn new(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { + let mut http = StacksHttp { + peer_addr, + body_start: None, + num_preamble_bytes: 0, + last_four_preamble_bytes: [0u8; 4], + reply: None, + chunk_size: 8192, + request_handler_index: None, + request_handlers: vec![], + maximum_call_argument_size: conn_opts.maximum_call_argument_size, + read_only_call_limit: conn_opts.read_only_call_limit.clone(), + }; + http.register_rpc_methods(); + http + } + + /// Register an API RPC endpoint + pub fn register_rpc_endpoint( + &mut self, + handler: Handler, + ) { + self.request_handlers.push(( + handler.verb().to_string(), + handler.path_regex(), + Box::new(handler), + )); + } + + /// Find the HTTP request handler to use to process the reply, given the request path. + /// Returns the index into the list of handlers + fn find_response_handler(&self, request_verb: &str, request_path: &str) -> Option { + for (i, (verb, regex, _)) in self.request_handlers.iter().enumerate() { + if request_verb != verb { + continue; + } + let _captures = if let Some(caps) = regex.captures(request_path) { + caps + } else { + continue; + }; + + return Some(i); + } + None + } + + /// Force the state machine to expect a response + #[cfg(test)] + pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { + let handler_index = self + .find_response_handler(request_verb, request_path) + .expect(&format!( + "FATAL: could not find handler for '{}' '{}'", + request_verb, request_path + )); + self.request_handler_index = Some(handler_index); + } + + /// Try to parse an inbound HTTP request using a given handler, preamble, and body + #[cfg(test)] + pub fn handle_try_parse_request( + &self, + handler: &mut dyn RPCRequestHandler, + preamble: &HttpRequestPreamble, + body: &[u8], + ) -> Result { + let (decoded_path, query) = decode_request_path(&preamble.path_and_query_str)?; + let captures = if let Some(caps) = handler.path_regex().captures(&decoded_path) { + caps + } else { + return Err(NetError::NotFoundError); + }; + + let payload = match handler.try_parse_request( + preamble, + &captures, + if query.len() > 0 { Some(&query) } else { None }, + body, + ) { + Ok(p) => p, + Err(e) => { + handler.restart(); + return Err(e.into()); + } + }; + + let request = StacksHttpRequest::new(preamble.clone(), payload); + Ok(request) + } + + /// Try to parse an inbound HTTP request, given its decoded HTTP preamble. + /// The body will be in the `fd`. + /// Returns the parsed HTTP request if successful. + pub fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + body: &[u8], + ) -> Result { + let (decoded_path, query) = decode_request_path(&preamble.path_and_query_str)?; + test_debug!("decoded_path: '{}', query: '{}'", &decoded_path, &query); + + // NOTE: This loop starts out like `find_response_handler()`, but `captures`'s lifetime is + // bound to `regex` so we can't just return it from `find_response_handler()`. Thus, it's + // duplicated here. + for (verb, regex, request) in self.request_handlers.iter_mut() { + if &preamble.verb != verb { + continue; + } + let captures = if let Some(caps) = regex.captures(&decoded_path) { + caps + } else { + continue; + }; + + let payload = match request.try_parse_request( + preamble, + &captures, + if query.len() > 0 { Some(&query) } else { None }, + body, + ) { + Ok(p) => p, + Err(e) => { + request.restart(); + return Err(e.into()); + } + }; + + info!("Handle StacksHttpRequest"; "verb" => %verb, "peer_addr" => %self.peer_addr, "path" => %decoded_path, "query" => %query); + let request = StacksHttpRequest::new(preamble.clone(), payload); + return Ok(request); + } + + test_debug!("Failed to parse '{}'", &preamble.path_and_query_str); + Err(NetError::NotFoundError) + } + + /// Parse out an HTTP response error message + pub fn try_parse_error_response( + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + if preamble.status_code < 400 || preamble.status_code > 599 { + return Err(NetError::DeserializeError( + "Inavlid response: not an error".to_string(), + )); + } + + let payload = if preamble.content_type == HttpContentType::Text { + let mut error_text = String::new(); + let mut ioc = io::Cursor::new(body); + let mut bound_fd = BoundReader::from_reader(&mut ioc, MAX_MESSAGE_LEN as u64); + bound_fd + .read_to_string(&mut error_text) + .map_err(NetError::ReadError)?; + + HttpResponsePayload::Text(error_text) + } else if preamble.content_type == HttpContentType::JSON { + let mut ioc = io::Cursor::new(body); + let mut bound_fd = BoundReader::from_reader(&mut ioc, MAX_MESSAGE_LEN as u64); + let json_val = serde_json::from_reader(&mut bound_fd).map_err(|_| { + NetError::DeserializeError("Failed to decode JSON value".to_string()) + })?; + + HttpResponsePayload::JSON(json_val) + } else { + return Err(NetError::DeserializeError(format!( + "Invalid error response: expected text/plain or application/json, got {:?}", + &preamble.content_type + ))); + }; + + Ok(StacksHttpResponse::new(preamble.clone(), payload)) + } + + /// Try to parse an inbound HTTP response, given its decoded HTTP preamble, and the HTTP + /// version and request path that had originally sent. The body will be read from `fd`. + pub fn try_parse_response( + &mut self, + request_handler_index: usize, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + if preamble.status_code >= 400 { + return Self::try_parse_error_response(preamble, body); + } + + let (_, _, parser) = self + .request_handlers + .get(request_handler_index) + .expect("FATAL: tried to use nonexistent response handler"); + let payload = parser.try_parse_response(preamble, body)?; + let response = StacksHttpResponse::new(preamble.clone(), payload); + return Ok(response); + } + + /// Handle an HTTP request by generating an HTTP response. + /// Returns Ok((preamble, contents)) on success. Note that this could be an HTTP error + /// message. + /// Returns Err(..) on failure to decode or generate the response. + pub fn try_handle_request( + &mut self, + request: StacksHttpRequest, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let (decoded_path, _) = decode_request_path(&request.preamble().path_and_query_str)?; + let response_handler_index = + if let Some(i) = self.find_response_handler(&request.preamble().verb, &decoded_path) { + i + } else { + // method not found + return StacksHttpResponse::new_error( + &request.preamble, + &HttpNotFound::new(format!( + "No such API endpoint '{} {}'", + &request.preamble().verb, + &decoded_path + )), + ) + .try_into_contents(); + }; + + let (_, _, request_handler) = self + .request_handlers + .get_mut(response_handler_index) + .expect("FATAL: request points to a nonexistent handler"); + let request_preamble = request.preamble.clone(); + let request_result = + request_handler.try_handle_request(request.preamble, request.contents, node); + request_handler.restart(); + + let (response_preamble, response_contents) = match request_result { + Ok((rp, rc)) => (rp, rc), + Err(NetError::Http(e)) => { + return StacksHttpResponse::new_error(&request_preamble, &*e.into_http_error()) + .try_into_contents() + } + Err(e) => { + warn!("Irrecoverable error when handling request"; "path" => %request_preamble.path_and_query_str, "error" => %e); + return Err(e); + } + }; + Ok((response_preamble, response_contents)) + } + + #[cfg(test)] + pub fn num_pending(&self) -> usize { + if self.reply.is_some() { + 1 + } else { + 0 + } + } + + /// Set up the pending response + /// Called indirectly from ProtocolFamily::read_preamble() when handling an HTTP response + /// Used for dealing with streaming data + fn set_pending(&mut self, preamble: &HttpResponsePreamble) { + self.reply = Some(StacksHttpReplyData { + request_id: preamble + .get_request_id() + .unwrap_or(HTTP_REQUEST_ID_RESERVED), + stream: StacksHttpRecvStream::new(MAX_MESSAGE_LEN as u64), + }); + } + + /// Set the preamble. This is only relevant for receiving an HTTP response to a request that we + /// already sent. It gets called from ProtocolFamily::read_preamble(). + /// + /// This method will set up this state machine to consume the message associated with this + /// premable, if the response is chunked. + fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { + match preamble { + StacksHttpPreamble::Response(ref http_response_preamble) => { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); + } + + self.set_pending(http_response_preamble); + } + } + _ => {} + } + Ok(()) + } + + /// Clear any pending response state -- i.e. due to a failed request. + fn reset(&mut self) -> () { + self.request_handler_index = None; + self.reply = None; + } + + /// Used for processing chunk-encoded streams. + /// Given the preamble and a Read, stream the bytes into a chunk-decoder. Return the decoded + /// bytes if we decode an entire stream. Always return the number of bytes consumed. + /// Returns Ok((Some(decoded bytes we got, total number of encoded bytes), number of bytes gotten in this call)) if we're done decoding. + /// Returns Ok((None, number of bytes gotten in this call)) if there's more to decode. + pub fn consume_data( + &mut self, + preamble: &HttpResponsePreamble, + fd: &mut R, + ) -> Result<(Option<(Vec, usize)>, usize), NetError> { + assert!(preamble.is_chunked()); + assert!(self.reply.is_some()); + + if let Some(reply) = self.reply.as_mut() { + match reply.stream.consume_data(fd) { + Ok(res) => match res { + (None, sz) => Ok((None, sz)), + (Some((byte_vec, bytes_total)), sz) => { + // done receiving + self.reply = None; + Ok((Some((byte_vec, bytes_total)), sz)) + } + }, + Err(e) => { + // broken stream + self.reset(); + Err(e) + } + } + } else { + unreachable!(); + } + } + + /// Calculate the search window for \r\n\r\n + fn body_start_search_window(&self, i: usize, buf: &[u8]) -> [u8; 4] { + let window = match i { + 0 => [ + self.last_four_preamble_bytes[0], + self.last_four_preamble_bytes[1], + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + ], + 1 => [ + self.last_four_preamble_bytes[1], + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + buf[0], + ], + 2 => [ + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + buf[0], + buf[1], + ], + 3 => [self.last_four_preamble_bytes[3], buf[0], buf[1], buf[2]], + _ => [buf[i - 4], buf[i - 3], buf[i - 2], buf[i - 1]], + }; + window + } + + /// Given a fully-formed single HTTP response, parse it (used by clients). + #[cfg(test)] + pub fn parse_response( + verb: &str, + request_path: &str, + response_buf: &[u8], + ) -> Result { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + + let response_handler_index = + http.find_response_handler(verb, request_path) + .ok_or(NetError::SendError(format!( + "No such handler for '{} {}'", + verb, request_path + )))?; + http.request_handler_index = Some(response_handler_index); + + let (preamble, message_offset) = http.read_preamble(response_buf)?; + let is_chunked = match preamble { + StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), + _ => { + return Err(NetError::DeserializeError( + "Invalid HTTP message: did not get a Response preamble".to_string(), + )); + } + }; + + let mut message_bytes = &response_buf[message_offset..]; + + if is_chunked { + match http.stream_payload(&preamble, &mut message_bytes) { + Ok((Some((message, _)), _)) => Ok(message), + Ok((None, _)) => Err(NetError::UnderflowError( + "Not enough bytes to form a streamed HTTP response".to_string(), + )), + Err(e) => Err(e), + } + } else { + let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; + Ok(message) + } + } +} + +impl ProtocolFamily for StacksHttp { + type Preamble = StacksHttpPreamble; + type Message = StacksHttpMessage; + + /// how big can a preamble get? + fn preamble_size_hint(&mut self) -> usize { + HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize + } + + /// how big is this message? Might not know if we're dealing with chunked encoding. + fn payload_len(&mut self, preamble: &StacksHttpPreamble) -> Option { + match *preamble { + StacksHttpPreamble::Request(ref http_request_preamble) => { + Some(http_request_preamble.get_content_length() as usize) + } + StacksHttpPreamble::Response(ref http_response_preamble) => { + match http_response_preamble.content_length { + Some(len) => Some(len as usize), + None => None, + } + } + } + } + + /// Read the next HTTP preamble (be it a request or a response), and return the preamble and + /// the number of bytes consumed while reading it. + fn read_preamble(&mut self, buf: &[u8]) -> Result<(StacksHttpPreamble, usize), NetError> { + // does this contain end-of-headers marker, including the last four bytes of preamble we + // saw? + if self.body_start.is_none() { + for i in 0..=buf.len() { + let window = self.body_start_search_window(i, buf); + if window == [13, 10, 13, 10] { + self.body_start = Some(self.num_preamble_bytes + i); + } + } + } + if self.body_start.is_none() { + // haven't found the body yet, so update `last_four_preamble_bytes` + // and report underflow + let len = buf.len(); + let last_four_preamble_bytes = self.body_start_search_window(len, buf); + self.num_preamble_bytes += len; + self.last_four_preamble_bytes = last_four_preamble_bytes; + return Err(NetError::UnderflowError( + "Not enough bytes to form HTTP preamble".into(), + )); + } + + let mut cursor = io::Cursor::new(buf); + + let preamble = { + let mut rd = + BoundReader::from_reader(&mut cursor, HTTP_PREAMBLE_MAX_ENCODED_SIZE as u64); + let preamble: StacksHttpPreamble = read_next(&mut rd)?; + preamble + }; + + let preamble_len = cursor.position() as usize; + self.set_preamble(&preamble)?; + + Ok((preamble, preamble_len)) + } + + /// Stream a payload of unknown length. Only gets called if payload_len() returns None. + /// + /// Returns Ok((Some((message, num-bytes-consumed)), num-bytes-read)) if we read enough data to + /// form a message. `num-bytes-consumed` is the number of bytes required to parse the message, + /// and `num-bytes-read` is the number of bytes read in this call. + /// + /// Returns Ok((None, num-bytes-read)) if we consumed data (i.e. `num-bytes-read` bytes), but + /// did not yet have enough of the message to parse it. The caller should try again. + /// + /// Returns Error on irrecoverable error. + fn stream_payload( + &mut self, + preamble: &StacksHttpPreamble, + fd: &mut R, + ) -> Result<(Option<(StacksHttpMessage, usize)>, usize), NetError> { + assert!(self.payload_len(preamble).is_none()); + match preamble { + StacksHttpPreamble::Request(_) => { + // HTTP requests can't be chunk-encoded, so this should never be reached + unreachable!() + } + StacksHttpPreamble::Response(ref http_response_preamble) => { + assert!(http_response_preamble.is_chunked()); + + // sanity check -- if we're receiving a response, then we must have earlier issued + // a request. Thus, we must already know which response handler to use. + // Otherwise, someone sent us malforemd data. + if self.request_handler_index.is_none() { + self.reset(); + return Err(NetError::DeserializeError( + "Unsolicited HTTP response".to_string(), + )); + } + + // message of unknown length. Buffer up and maybe we can parse it. + let (message_bytes_opt, num_read) = + self.consume_data(http_response_preamble, fd).map_err(|e| { + self.reset(); + e + })?; + + match message_bytes_opt { + Some((message_bytes, total_bytes_consumed)) => { + // can parse! + test_debug!( + "read http response payload of {} bytes (just buffered {})", + message_bytes.len(), + num_read, + ); + + // we now know the content-length, so pass it into the parser. + let handler_index = + self.request_handler_index + .ok_or(NetError::DeserializeError( + "Unknown HTTP response handler".to_string(), + ))?; + + let parse_res = self.try_parse_response( + handler_index, + http_response_preamble, + &message_bytes[..], + ); + + // done parsing + self.reset(); + match parse_res { + Ok(data_response) => Ok(( + Some(( + StacksHttpMessage::Response(data_response), + total_bytes_consumed, + )), + num_read, + )), + Err(e) => { + info!("Failed to parse HTTP response: {:?}", &e); + Err(e) + } + } + } + None => { + // need more data + trace!( + "did not read http response payload, but buffered {}", + num_read + ); + Ok((None, num_read)) + } + } + } + } + } + + /// Parse a payload of known length. + /// Only gets called if payload_len() returns Some(...). + /// + /// Return Ok(message, num-bytes-consumed) if we decoded a message. The message will + /// have consumed `num-bytes-consumed` bytes. + /// + /// Return Err(..) if we failed to decode the message. + fn read_payload( + &mut self, + preamble: &StacksHttpPreamble, + buf: &[u8], + ) -> Result<(StacksHttpMessage, usize), NetError> { + match preamble { + StacksHttpPreamble::Request(ref http_request_preamble) => { + // all requests have a known length + let len = http_request_preamble.get_content_length() as usize; + assert!(len <= buf.len(), "{} > {}", len, buf.len()); + + trace!("read http request payload of {} bytes", len); + + match self.try_parse_request(http_request_preamble, &buf[0..len]) { + Ok(data_request) => Ok((StacksHttpMessage::Request(data_request), len)), + Err(e) => { + match e { + NetError::Http(http_error) => { + // convert into a response + let resp = StacksHttpResponse::new_error( + http_request_preamble, + &*http_error.into_http_error(), + ); + self.reset(); + return Ok(( + StacksHttpMessage::Error( + http_request_preamble.path_and_query_str.clone(), + resp, + ), + len, + )); + } + _ => { + info!("Failed to parse HTTP request: {:?}", &e); + self.reset(); + Err(e) + } + } + } + } + } + StacksHttpPreamble::Response(ref http_response_preamble) => { + assert!(!http_response_preamble.is_chunked()); + + // message of known length + test_debug!("read http response payload of {} bytes", buf.len(),); + + // sanity check -- if we're receiving a response, then we must have earlier issued + // a request. Thus, we must already know which response handler to use. + // Otherwise, someone sent us malformed data. + let handler_index = if let Some(i) = self.request_handler_index.as_ref() { + *i + } else { + self.reset(); + return Err(NetError::DeserializeError( + "Unsolicited HTTP response".to_string(), + )); + }; + + let res = self.try_parse_response(handler_index, http_response_preamble, buf); + self.reset(); + res.map(|data_response| (StacksHttpMessage::Response(data_response), buf.len())) + } + } + } + + fn verify_payload_bytes( + &mut self, + _key: &StacksPublicKey, + _preamble: &StacksHttpPreamble, + _bytes: &[u8], + ) -> Result<(), NetError> { + // not defined for HTTP messages, but maybe we could add a signature header at some point + // in the future if needed. + Ok(()) + } + + /// Write out a message to `fd`. + /// + /// NOTE: If we're sending a StacksHttpMessage::Request(..), then the next preamble and payload + /// received _must be_ a StacksHttpMessage::Response(..) in response to the request. + /// If it is not, then that decode will fail. + fn write_message( + &mut self, + fd: &mut W, + message: &StacksHttpMessage, + ) -> Result<(), NetError> { + match *message { + StacksHttpMessage::Request(ref req) => { + // client cannot send more than one request in parallel + if self.request_handler_index.is_some() { + test_debug!("Have pending request already"); + return Err(NetError::InProgress); + } + + // find the response handler we'll use + let (decoded_path, _) = decode_request_path(&req.preamble().path_and_query_str)?; + let handler_index = self + .find_response_handler(&req.preamble().verb, &decoded_path) + .ok_or(NetError::SendError(format!( + "No response handler found for `{} {}`", + &req.preamble().verb, + &decoded_path + )))?; + + req.send(fd)?; + + // remember this so we'll know how to decode the response. + // The next preamble and message we'll read _must be_ a response! + self.request_handler_index = Some(handler_index); + Ok(()) + } + StacksHttpMessage::Response(ref resp) => resp.send(fd), + StacksHttpMessage::Error(_, ref resp) => resp.send(fd), + } + } +} + +impl PeerNetwork { + /// Send a (non-blocking) HTTP request to a remote peer. + /// Returns the event ID on success. + pub fn connect_or_send_http_request( + &mut self, + data_url: UrlString, + addr: SocketAddr, + request: StacksHttpRequest, + mempool: &MemPoolDB, + chainstate: &mut StacksChainState, + ) -> Result { + PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { + PeerNetwork::with_http(network, |ref mut network, ref mut http| { + match http.connect_http( + network_state, + network, + data_url.clone(), + addr.clone(), + Some(request.clone()), + ) { + Ok(event_id) => Ok(event_id), + Err(NetError::AlreadyConnected(event_id, _)) => { + match http.get_conversation_and_socket(event_id) { + (Some(ref mut convo), Some(ref mut socket)) => { + convo.send_request(request)?; + HttpPeer::saturate_http_socket(socket, convo, mempool, chainstate)?; + Ok(event_id) + } + (_, _) => { + debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); + Err(NetError::PeerNotConnected) + } + } + } + Err(e) => { + return Err(e); + } + } + }) + }) + } +} + +/// Given a raw path, decode it (i.e. if it's url-encoded) +/// Return the (decoded-path, query-string) on success +pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { + let local_url = format!("http://local{}", path); + let url = Url::parse(&local_url).map_err(|_e| { + NetError::DeserializeError("Http request path could not be parsed".to_string()) + })?; + + let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { + NetError::DeserializeError("Http request path could not be parsed as UTF-8".to_string()) + })?; + + let query_str = url.query(); + Ok(( + decoded_path.to_string(), + query_str.unwrap_or("").to_string(), + )) +} From c6d48233675263bfd7834b0ef6ccd02c4b6cdbd3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:06:54 -0400 Subject: [PATCH 052/107] refactor: consolidate all testing for application-level concerns for Stacks HTTP support in one place --- stackslib/src/net/tests/httpcore.rs | 1054 +++++++++++++++++++++++++++ 1 file changed, 1054 insertions(+) create mode 100644 stackslib/src/net/tests/httpcore.rs diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs new file mode 100644 index 0000000000..6a629dd632 --- /dev/null +++ b/stackslib/src/net/tests/httpcore.rs @@ -0,0 +1,1054 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::Write; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::db::blocks::test::make_sample_microblock_stream; +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::{ + StacksTransaction, TokenTransferMemo, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionVersion, +}; +use crate::net::api::getneighbors::RPCNeighbor; +use crate::net::api::getneighbors::RPCNeighborsInfo; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{ + http_error_from_code_and_text, http_reason, HttpContentType, HttpErrorResponse, + HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePreamble, + HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::httpcore::{StacksHttp, StacksHttpMessage, StacksHttpPreamble}; +use crate::net::rpc::ConversationHttp; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::chunked_encoding::HttpChunkedTransferWriter; +use stacks_common::util::chunked_encoding::HttpChunkedTransferWriterState; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::{to_hex, Hash160}; + +#[test] +fn test_parse_stacks_http_preamble_request_err() { + let tests = vec![ + ( + "GET /foo HTTP/1.1\r\n", + "Not enough bytes to form a HTTP request or response", + ), + ( + "GET /foo HTTP/1.1\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost:", + "Not enough bytes to form a HTTP request or response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ]; + + for (data, errstr) in tests.iter() { + let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); + test_debug!("Expect '{}'", errstr); + assert!(sres.is_err(), "{:?}", &sres); + assert!( + sres.as_ref() + .unwrap_err() + .to_string() + .find(errstr) + .is_some(), + "{:?}", + &sres + ); + } +} + +#[test] +fn test_parse_stacks_http_preamble_response_err() { + let tests = vec![ + ("HTTP/1.1 200", + "Not enough bytes to form a HTTP request or response"), + ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ]; + + for (data, errstr) in tests.iter() { + let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); + test_debug!("Expect '{}', got: {:?}", errstr, &sres); + assert!(sres.is_err(), "{:?}", &sres); + assert!( + sres.as_ref() + .unwrap_err() + .to_string() + .find(errstr) + .is_some(), + "{:?}", + &sres + ); + } +} + +fn make_test_transaction() -> StacksTransaction { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let addr = auth.origin().address_testnet(); + let recv_addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let mut tx_stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::TokenTransfer( + recv_addr.clone().into(), + 123, + TokenTransferMemo([0u8; 34]), + ), + ); + tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.post_condition_mode = TransactionPostConditionMode::Allow; + tx_stx_transfer.set_tx_fee(0); + tx_stx_transfer +} + +#[test] +fn test_http_request_type_codec() { + let convo = ConversationHttp::new( + "127.0.0.1:12345".parse().unwrap(), + None, + PeerHost::DNS("localhost".to_string(), 12345), + &ConnectionOptions::default(), + 100, + ); + let tx = make_test_transaction(); + let tx_body = tx.serialize_to_vec(); + + let fixtures = vec![ + ( + StacksHttpRequest::new_getneighbors(convo.get_peer_host()), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/neighbors".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getinfo(convo.get_peer_host(), Some(1234)), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/info".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getinfo(convo.get_peer_host(), None), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/info".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getpoxinfo(convo.get_peer_host(), TipRequest::UseLatestUnconfirmedTip), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/pox?tip=latest".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getpoxinfo(convo.get_peer_host(), TipRequest::UseLatestAnchoredTip), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/pox".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getheaders(convo.get_peer_host(), 2100, TipRequest::SpecificTip(StacksBlockId([0x80; 32]))), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/headers/2100?tip=8080808080808080808080808080808080808080808080808080808080808080".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getblock(convo.get_peer_host(), StacksBlockId([2u8; 32])), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + format!("/v2/blocks/{}", StacksBlockId([2u8; 32]).to_hex()), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getmicroblocks_indexed(convo.get_peer_host(), StacksBlockId([3u8; 32])), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + format!("/v2/microblocks/{}", StacksBlockId([3u8; 32]).to_hex()), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_post_transaction(convo.get_peer_host(), tx.clone()), + HttpRequestPreamble::new( + HttpVersion::Http11, + "POST".to_string(), + "/v2/transactions".to_string(), + "localhost".to_string(), + 12345, + true, + ) + .with_content_type(HttpContentType::Bytes) + .with_content_length(tx.serialize_to_vec().len() as u32), + tx_body + ) + ]; + + for (mut test, mut expected_http_preamble, expected_http_body) in fixtures.into_iter() { + if test.preamble().get_request_id().is_none() { + test.preamble_mut().set_request_id(123); + } + expected_http_preamble.set_request_id(test.preamble().get_request_id().unwrap_or(0)); + if let Some(h) = test.preamble().get_canonical_stacks_tip_height() { + expected_http_preamble.set_canonical_stacks_tip_height(Some(h)); + } + + let mut expected_bytes = vec![]; + expected_http_preamble + .consensus_serialize(&mut expected_bytes) + .unwrap(); + + test_debug!( + "Expected preamble:\n{}", + str::from_utf8(&expected_bytes).unwrap() + ); + + if expected_http_body.len() > 0 { + expected_http_preamble.set_content_type(HttpContentType::Bytes); + expected_http_preamble.set_content_length(expected_http_body.len() as u32) + } + + if expected_http_preamble.content_type.is_none() + || expected_http_preamble.content_type != Some(HttpContentType::Bytes) + { + test_debug!( + "Expected http body:\n{}", + str::from_utf8(&expected_http_body).unwrap() + ); + } else { + test_debug!("Expected http body (hex):\n{}", to_hex(&expected_http_body)); + } + + expected_bytes.append(&mut expected_http_body.clone()); + + let mut bytes = vec![]; + let mut http = StacksHttp::new( + "127.0.0.1:12345".parse().unwrap(), + &ConnectionOptions::default(), + ); + http.write_message(&mut bytes, &StacksHttpMessage::Request(test.clone())) + .unwrap(); + + assert_eq!(bytes, expected_bytes); + } +} + +#[test] +fn test_http_request_type_codec_err() { + let bad_content_lengths = vec![ + "GET /v2/neighbors HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/info HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/pox HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/headers/2100 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/blocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/microblocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 0\r\n\r\n", + ]; + for bad_content_length in bad_content_lengths { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let (preamble, offset) = http.read_preamble(bad_content_length.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &bad_content_length.as_bytes()[offset..]); + + if let Ok(http_error) = e { + debug!("Got HTTP error: {:?}", &http_error); + + let error_str = format!("{:?}", &http_error); + assert!(error_str.find("-length body").is_some()); + assert!(error_str.find("status_code: 400").is_some()); + } else { + panic!("Expected error"); + } + } + + let bad_content_types = vec![ + "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + ]; + for bad_content_type in bad_content_types { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let (preamble, offset) = http.read_preamble(bad_content_type.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &bad_content_type.as_bytes()[offset..]); + + if let Ok(http_error) = e { + debug!("Got HTTP error: {:?}", &http_error); + + let error_str = format!("{:?}", &http_error); + assert!(error_str.find("Missing Content-Type").is_some()); + assert!(error_str.find("status_code: 400").is_some()); + } else { + panic!("Expected error"); + } + } +} + +#[test] +fn test_http_response_type_codec() { + let test_neighbors_info = RPCNeighborsInfo { + bootstrap: vec![], + sample: vec![ + RPCNeighbor { + network_id: 1, + peer_version: 2, + addrbytes: PeerAddress([ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, + 0x0d, 0x0e, 0x0f, + ]), + port: 12345, + public_key_hash: Hash160::from_bytes( + &hex_bytes("1111111111111111111111111111111111111111").unwrap(), + ) + .unwrap(), + authenticated: true, + stackerdbs: Some(vec![]), + }, + RPCNeighbor { + network_id: 3, + peer_version: 4, + addrbytes: PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, + 0x02, 0x03, 0x04, + ]), + port: 23456, + public_key_hash: Hash160::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + authenticated: false, + stackerdbs: Some(vec![]), + }, + ], + inbound: vec![], + outbound: vec![], + }; + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let test_block_info = make_codec_test_block(5); + let test_microblock_info = make_sample_microblock_stream(&privk, &test_block_info.block_hash()); + + let mut test_block_info_bytes = vec![]; + test_block_info + .consensus_serialize(&mut test_block_info_bytes) + .unwrap(); + + let mut test_microblock_info_bytes = vec![]; + test_microblock_info + .consensus_serialize(&mut test_microblock_info_bytes) + .unwrap(); + + let tests = vec![ + // length is known + ( + StacksHttpResponse::new_getneighbors(test_neighbors_info.clone(), true), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_getblock(test_block_info.clone(), true), + "GET".to_string(), + format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), + ), + ( + StacksHttpResponse::new_getmicroblocks_indexed(test_microblock_info.clone(), true), + "GET".to_string(), + format!( + "/v2/microblocks/{}", + test_microblock_info[0].block_hash().to_hex() + ), + ), + ( + StacksHttpResponse::new_posttransaction(Txid([0x01; 32]), true), + "POST".to_string(), + "/v2/transactions".to_string(), + ), + // length is unknown + ( + StacksHttpResponse::new_getneighbors(test_neighbors_info.clone(), false), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_getblock(test_block_info.clone(), false), + "GET".to_string(), + format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), + ), + ( + StacksHttpResponse::new_getmicroblocks_indexed(test_microblock_info.clone(), false), + "GET".to_string(), + format!( + "/v2/microblocks/{}", + test_microblock_info[0].block_hash().to_hex() + ), + ), + ( + StacksHttpResponse::new_posttransaction(Txid([0x01; 32]), false), + "POST".to_string(), + "/v2/transactions".to_string(), + ), + // errors without error messages + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(400, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(401, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(402, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(403, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(404, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(500, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(503, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(502, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + // errors with specific messages + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(400, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(401, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(402, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(403, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(404, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(500, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(503, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(502, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ]; + let expected_http_preambles = vec![ + // length is known + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), + HttpContentType::JSON, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(test_block_info.serialize_to_vec().len() as u32), + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(test_microblock_info_bytes.len() as u32), + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some((Txid([0x01; 32]).to_hex().len() + 2) as u32), + HttpContentType::JSON, + true, + ), + // length is unknown + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::JSON, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::JSON, + true, + ), + // errors + HttpResponsePreamble::error_text(400, http_reason(400), ""), + HttpResponsePreamble::error_text(401, http_reason(401), ""), + HttpResponsePreamble::error_text(402, http_reason(402), ""), + HttpResponsePreamble::error_text(403, http_reason(403), ""), + HttpResponsePreamble::error_text(404, http_reason(404), ""), + HttpResponsePreamble::error_text(500, http_reason(500), ""), + HttpResponsePreamble::error_text(503, http_reason(503), ""), + // generic error + HttpResponsePreamble::error_text(502, http_reason(502), ""), + // errors with messages + HttpResponsePreamble::error_text(400, http_reason(400), "foo"), + HttpResponsePreamble::error_text(401, http_reason(401), "foo"), + HttpResponsePreamble::error_text(402, http_reason(402), "foo"), + HttpResponsePreamble::error_text(403, http_reason(403), "foo"), + HttpResponsePreamble::error_text(404, http_reason(404), "foo"), + HttpResponsePreamble::error_text(500, http_reason(500), "foo"), + HttpResponsePreamble::error_text(503, http_reason(503), "foo"), + // generic error + HttpResponsePreamble::error_text(502, http_reason(502), "foo"), + ]; + + let expected_http_bodies = vec![ + // with content-length + serde_json::to_string(&test_neighbors_info) + .unwrap() + .as_bytes() + .to_vec(), + test_block_info.serialize_to_vec(), + test_microblock_info_bytes.clone(), + Txid([0x1; 32]).to_hex().as_bytes().to_vec(), + // with transfer-encoding: chunked + serde_json::to_string(&test_neighbors_info) + .unwrap() + .as_bytes() + .to_vec(), + test_block_info.serialize_to_vec(), + test_microblock_info_bytes.clone(), + Txid([0x1; 32]).to_hex().as_bytes().to_vec(), + // errors + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + // errors with messages + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + ]; + + for ((test, request_verb, request_path), (expected_http_preamble, _expected_http_body)) in + tests.iter().zip( + expected_http_preambles + .iter() + .zip(expected_http_bodies.iter()), + ) + { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let mut bytes = vec![]; + test_debug!("write body:\n{:?}\n", test); + + http.write_message(&mut bytes, &StacksHttpMessage::Response((*test).clone())) + .unwrap(); + + http.set_response_handler(request_verb, request_path); + let (mut preamble, offset) = match http.read_preamble(&bytes) { + Ok((p, o)) => (p, o), + Err(e) => { + test_debug!("first 4096 bytes:\n{:?}\n", &bytes[0..].to_vec()); + test_debug!("error: {:?}", &e); + assert!(false); + unreachable!(); + } + }; + + test_debug!( + "{} {}: read preamble of {} bytes\n{:?}\n", + request_verb, + request_path, + offset, + preamble + ); + + let (mut message, _total_len) = if expected_http_preamble.is_chunked() { + let (msg_opt, len) = http + .stream_payload(&preamble, &mut &bytes[offset..]) + .unwrap(); + (msg_opt.unwrap().0, len) + } else { + http.read_payload(&preamble, &bytes[offset..]).unwrap() + }; + + test_debug!("got message\n{:?}\n", &message); + + // check everything in the parsed preamble except for the extra headers + match preamble { + StacksHttpPreamble::Response(ref mut req) => { + assert_eq!(req.headers.len(), 5); + assert!(req.headers.get("access-control-allow-headers").is_some()); + assert!(req.headers.get("access-control-allow-methods").is_some()); + assert!(req.headers.get("access-control-allow-origin").is_some()); + assert!(req.headers.get("server").is_some()); + assert!(req.headers.get("date").is_some()); + req.headers.clear(); + } + StacksHttpPreamble::Request(_) => { + panic!("parsed a request"); + } + } + + assert_eq!( + preamble, + StacksHttpPreamble::Response((*expected_http_preamble).clone()) + ); + + // note that message's headers contain cors headers and the like, which we don't synthesize + // here + match message { + StacksHttpMessage::Response(ref mut response) => response.clear_headers(), + _ => { + panic!("Not an HTTP response"); + } + } + assert_eq!(message, StacksHttpMessage::Response((*test).clone())); + assert_eq!(http.num_pending(), 0); + } +} + +#[test] +fn test_http_response_type_codec_err() { + let request_paths = vec![ + ( + "GET", + "/v2/blocks/1111111111111111111111111111111111111111111111111111111111111111", + ), + ("POST", "/v2/transactions"), + ("GET", "/v2/neighbors"), + ("GET", "/v2/neighbors"), + ("GET", "/v2/neighbors"), + ]; + let bad_request_payloads = vec![ + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 2\r\n\r\nab", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 4\r\n\r\n\"ab\"", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\n{", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\na", + "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/octet-stream\r\nContent-length: 2\r\n\r\n{}", + ]; + let expected_bad_request_payload_errors = vec![ + "Invalid content-type", + "bad length 2 for hex string", + "Not enough bytes", + "Failed to parse", + "expected text/plain", + ]; + for (test, (expected_error, (request_verb, request_path))) in bad_request_payloads.iter().zip( + expected_bad_request_payload_errors + .iter() + .zip(request_paths), + ) { + test_debug!( + "Expect failure:\n{}\nExpected error: '{}'", + test, + expected_error + ); + + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + http.set_response_handler(request_verb, request_path); + + let (preamble, offset) = http.read_preamble(test.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &test.as_bytes()[offset..]); + let errstr = format!("{:?}", &e); + assert!(e.is_err()); + assert!( + e.unwrap_err().to_string().find(expected_error).is_some(), + "{}", + errstr + ); + } +} + +#[test] +fn test_http_duplicate_concurrent_streamed_response_fails() { + // do not permit multiple in-flight chunk-encoded HTTP responses with the same request ID. + let valid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n37\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; + let invalid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n10\r\nxxxxxxxxxxxxxxxx\r\n0\r\n\r\n"; + let invalid_chunked_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n38\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; + + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(valid_neighbors_response.as_bytes()) + .unwrap(); + assert_eq!(http.num_pending(), 1); + + // can't do this twice + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // finish reading the body + let msg = http + .stream_payload( + &preamble, + &mut &valid_neighbors_response.as_bytes()[offset..], + ) + .unwrap(); + match msg { + (Some((StacksHttpMessage::Response(response), _)), _) => assert_eq!( + response.decode_rpc_neighbors().unwrap(), + RPCNeighborsInfo { + bootstrap: vec![], + sample: vec![], + inbound: vec![], + outbound: vec![] + } + ), + _ => { + error!("Got {:?}", &msg); + assert!(false); + } + } + assert_eq!(http.num_pending(), 0); + + // can read the preamble again, but only once + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(invalid_neighbors_response.as_bytes()) + .unwrap(); + assert_eq!(http.num_pending(), 1); + + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // reading a corrupt body unlocks the ability to read the preamble again + let res = http.stream_payload( + &preamble, + &mut &invalid_neighbors_response.as_bytes()[offset..], + ); + assert!(res.unwrap_err().to_string().find("JSON").is_some()); + assert_eq!(http.num_pending(), 0); + + // can read the premable again, but only once + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(invalid_chunked_response.as_bytes()) + .unwrap(); + + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // reading a corrupt chunk stream unlocks the ability to read the preamble again + let res = http.stream_payload( + &preamble, + &mut &invalid_chunked_response.as_bytes()[offset..], + ); + assert!(res + .unwrap_err() + .to_string() + .find("Invalid chunk trailer") + .is_some()); + assert_eq!(http.num_pending(), 0); +} + +#[test] +fn test_http_parse_proof_tip_query() { + let query_txt = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" + ) + .unwrap() + ), + _ => panic!(), + } + + // last parseable tip is taken + let query_txt_dup = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_dup)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1" + ) + .unwrap() + ), + _ => panic!(), + } + + // last parseable tip is taken + let query_txt_dup = "tip=bad&tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_dup)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1" + ) + .unwrap() + ), + _ => panic!(), + } + + // tip can be skipped + let query_txt_bad = "tip=bad"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_bad)) + .tip_request(); + assert_eq!(tip_req, TipRequest::UseLatestAnchoredTip); + + // tip can be skipped + let query_txt_none = "tip="; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_none)) + .tip_request(); + assert_eq!(tip_req, TipRequest::UseLatestAnchoredTip); +} + +#[test] +fn test_http_parse_proof_request_query() { + let query_txt = ""; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=0"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); + + let query_txt = "proof=0&proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); + + let query_txt = "proof=1&proof=0"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=oops"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=oops&proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); +} From 254b5a1bff9c40f48d3c9132c795429f05d925ad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:07:41 -0400 Subject: [PATCH 053/107] refactor: put call-read-only RPC endpoint into its own file --- stackslib/src/net/api/callreadonly.rs | 390 ++++++++++++++++++++ stackslib/src/net/api/tests/callreadonly.rs | 285 ++++++++++++++ 2 files changed, 675 insertions(+) create mode 100644 stackslib/src/net/api/callreadonly.rs create mode 100644 stackslib/src/net/api/tests/callreadonly.rs diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs new file mode 100644 index 0000000000..6d385d9218 --- /dev/null +++ b/stackslib/src/net/api/callreadonly.rs @@ -0,0 +1,390 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPayload, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::analysis::CheckErrors; +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::errors::Error as ClarityRuntimeError; +use clarity::vm::errors::Error::Unchecked; +use clarity::vm::errors::InterpreterError; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; +use clarity::vm::SymbolicExpression; +use clarity::vm::Value; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Clone, Serialize, Deserialize)] +pub struct CallReadOnlyRequestBody { + pub sender: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub sponsor: Option, + pub arguments: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CallReadOnlyResponse { + pub okay: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub cause: Option, +} + +#[derive(Clone)] +pub struct RPCCallReadOnlyRequestHandler { + maximum_call_argument_size: u32, + read_only_call_limit: ExecutionCost, + + /// Runtime fields + pub contract_identifier: Option, + pub function: Option, + pub sender: Option, + pub sponsor: Option, + pub arguments: Option>, +} + +impl RPCCallReadOnlyRequestHandler { + pub fn new(maximum_call_argument_size: u32, read_only_call_limit: ExecutionCost) -> Self { + Self { + maximum_call_argument_size, + read_only_call_limit, + contract_identifier: None, + function: None, + sender: None, + sponsor: None, + arguments: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCCallReadOnlyRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/call-read/(?P

{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < self.maximum_call_argument_size) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for CallReadOnly ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let function = request::get_clarity_name(captures, "function")?; + let body: CallReadOnlyRequestBody = serde_json::from_slice(body) + .map_err(|_e| Error::DecodeError("Failed to parse JSON body".into()))?; + + let sender = PrincipalData::parse(&body.sender) + .map_err(|_e| Error::DecodeError("Failed to parse sender principal".into()))?; + + let sponsor = if let Some(sponsor) = body.sponsor { + Some( + PrincipalData::parse(&sponsor) + .map_err(|_e| Error::DecodeError("Failed to parse sponsor principal".into()))?, + ) + } else { + None + }; + + // arguments must be valid Clarity values + let arguments = body + .arguments + .into_iter() + .map(|hex| Value::try_deserialize_hex_untyped(&hex).ok()) + .collect::>>() + .ok_or_else(|| Error::DecodeError("Failed to deserialize argument value".into()))?; + + self.contract_identifier = Some(contract_identifier); + self.function = Some(function); + self.sender = Some(sender); + self.sponsor = sponsor; + self.arguments = Some(arguments); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.function = None; + self.sender = None; + self.sponsor = None; + self.arguments = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("Missing `contract_identifier`".into()))?; + let function = self + .function + .take() + .ok_or(NetError::SendError("Missing `function`".into()))?; + let sender = self + .sender + .take() + .ok_or(NetError::SendError("Missing `sender`".into()))?; + let sponsor = self.sponsor.clone(); + let arguments = self + .arguments + .take() + .ok_or(NetError::SendError("Missing `arguments`".into()))?; + + // run the read-only call + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let args: Vec<_> = arguments + .iter() + .map(|x| SymbolicExpression::atom_value(x.clone())) + .collect(); + + let mainnet = chainstate.mainnet; + let chain_id = chainstate.chain_id; + let mut cost_limit = self.read_only_call_limit.clone(); + cost_limit.write_length = 0; + cost_limit.write_count = 0; + + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + let cost_track = clarity_tx + .with_clarity_db_readonly(|clarity_db| { + LimitedCostTracker::new_mid_block( + mainnet, chain_id, cost_limit, clarity_db, epoch, + ) + }) + .map_err(|_| { + ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) + })?; + + let clarity_version = clarity_tx + .with_analysis_db_readonly(|analysis_db| { + analysis_db.get_clarity_version(&contract_identifier) + }) + .map_err(|_| { + ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( + "{}", + &contract_identifier + ))) + })?; + + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + clarity_version, + sender, + sponsor, + cost_track, + |env| { + // we want to execute any function as long as no actual writes are made as + // opposed to be limited to purely calling `define-read-only` functions, + // so use `read_only = false`. This broadens the number of functions that + // can be called, and also circumvents limitations on `define-read-only` + // functions that can not use `contrac-call?`, even when calling other + // read-only functions + env.execute_contract( + &contract_identifier, + function.as_str(), + &args, + false, + ) + }, + ) + }) + }); + + // decode the response + let data_resp = match data_resp { + Ok(Some(Ok(data))) => CallReadOnlyResponse { + okay: true, + result: Some(format!("0x{}", data.serialize_to_hex())), + cause: None, + }, + Ok(Some(Err(e))) => match e { + Unchecked(CheckErrors::CostBalanceExceeded(actual_cost, _)) + if actual_cost.write_count > 0 => + { + CallReadOnlyResponse { + okay: false, + result: None, + cause: Some("NotReadOnly".to_string()), + } + } + _ => CallReadOnlyResponse { + okay: false, + result: None, + cause: Some(e.to_string()), + }, + }, + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCCallReadOnlyRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let map_entry: CallReadOnlyResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(map_entry)?) + } +} + +impl StacksHttpRequest { + /// Make a new request to run a read-only function + pub fn new_callreadonlyfunction( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + sender: PrincipalData, + sponsor: Option, + function_name: ClarityName, + function_args: Vec, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/contracts/call-read/{}/{}/{}", + &contract_addr, &contract_name, &function_name + ), + HttpRequestContents::new().for_tip(tip_req).payload_json( + serde_json::to_value(CallReadOnlyRequestBody { + sender: sender.to_string(), + sponsor: sponsor.map(|s| s.to_string()), + arguments: function_args.into_iter().map(|v| v.to_string()).collect(), + }) + .expect("FATAL: failed to encode infallible data"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_call_readonly_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: CallReadOnlyResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/callreadonly.rs b/stackslib/src/net/api/tests/callreadonly.rs new file mode 100644 index 0000000000..59cf9c9db7 --- /dev/null +++ b/stackslib/src/net/api/tests/callreadonly.rs @@ -0,0 +1,285 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = + callreadonly::RPCCallReadOnlyRequestHandler::new(4096, BLOCK_LIMIT_MAINNET_21); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.function, Some("ro-test".into())); + assert_eq!( + handler.sender, + Some(PrincipalData::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap()) + ); + assert_eq!(handler.sponsor, None); + assert_eq!(handler.arguments, Some(vec![])); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + // restart clears the handler state + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.function.is_none()); + assert!(handler.sender.is_none()); + assert!(handler.sponsor.is_none()); + assert!(handler.arguments.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query confirmed tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-confirmed".try_into().unwrap(), + vec![], + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query unconfirmed tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent function + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "does-not-exist".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent contract + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-confirmed".try_into().unwrap(), + vec![], + TipRequest::SpecificTip(StacksBlockId([0x11; 32])), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // confirmed tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(resp.okay); + assert!(resp.result.is_some()); + assert!(resp.cause.is_none()); + + // u1 + assert_eq!(resp.result.unwrap(), "0x0100000000000000000000000000000001"); + + // unconfirmed tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(resp.okay); + assert!(resp.result.is_some()); + assert!(resp.cause.is_none()); + + // (ok 1) + assert_eq!( + resp.result.unwrap(), + "0x070000000000000000000000000000000001" + ); + + // non-existent function + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(!resp.okay); + assert!(resp.result.is_none()); + assert!(resp.cause.is_some()); + + assert!(resp.cause.unwrap().find("UndefinedFunction").is_some()); + + // non-existent function + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(!resp.okay); + assert!(resp.result.is_none()); + assert!(resp.cause.is_some()); + + assert!(resp.cause.unwrap().find("NoSuchContract").is_some()); + + // non-existent tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, payload) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 7acdfb849bf1bb0490f6fd75a0340dc923acb295 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:08:02 -0400 Subject: [PATCH 054/107] refactor: put get-account RPC handler into its own file --- stackslib/src/net/api/getaccount.rs | 270 ++++++++++++++++++++++ stackslib/src/net/api/tests/getaccount.rs | 217 +++++++++++++++++ 2 files changed, 487 insertions(+) create mode 100644 stackslib/src/net/api/getaccount.rs create mode 100644 stackslib/src/net/api/tests/getaccount.rs diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs new file mode 100644 index 0000000000..654babbf06 --- /dev/null +++ b/stackslib/src/net/api/getaccount.rs @@ -0,0 +1,270 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityVersion; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AccountEntryResponse { + pub balance: String, + pub locked: String, + pub unlock_height: u64, + pub nonce: u64, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub balance_proof: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub nonce_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetAccountRequestHandler { + pub account: Option, +} +impl RPCGetAccountRequestHandler { + pub fn new() -> Self { + Self { account: None } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAccountRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/accounts/(?P{})$", + *PRINCIPAL_DATA_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let account = if let Some(value) = captures.name("principal") { + PrincipalData::parse(value.into()) + .map_err(|_e| Error::DecodeError("Failed to parse `principal` field".to_string()))? + } else { + return Err(Error::DecodeError( + "Missing in request path: `principal`".into(), + )); + }; + + self.account = Some(account); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetAccountRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.account = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let account = self + .account + .take() + .ok_or(NetError::SendError("Missing `account`".into()))?; + let with_proof = contents.get_with_proof(); + + let account_opt_res = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_account_balance(&account); + let burn_block_height = + clarity_db.get_current_burnchain_block_height() as u64; + let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height(); + let (balance, balance_proof) = if with_proof { + clarity_db + .get_with_proof::(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) + } else { + clarity_db + .get::(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) + }; + + let key = ClarityDatabase::make_key_for_account_nonce(&account); + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; + + let unlocked = balance.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + + let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); + let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + + AccountEntryResponse { + balance, + locked, + unlock_height, + nonce, + balance_proof, + nonce_proof, + } + }) + }) + }); + + let account = if let Ok(Some(account)) = account_opt_res { + account + } else { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Chain tip '{}' not found", &tip)), + ) + .try_into_contents() + .map_err(NetError::from); + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&account)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAccountRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let account: AccountEntryResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(account)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for an account + pub fn new_getaccount( + host: PeerHost, + principal: PrincipalData, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/accounts/{}", &principal), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_account_entry_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: AccountEntryResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getaccount.rs b/stackslib/src/net/api/tests/getaccount.rs new file mode 100644 index 0000000000..34e6d81835 --- /dev/null +++ b/stackslib/src/net/api/tests/getaccount.rs @@ -0,0 +1,217 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getaccount::RPCGetAccountRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.account, + Some(PrincipalData::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap()) + ); + + assert_eq!(&preamble, request.preamble()); + + // reset works + handler.restart(); + assert!(handler.account.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing account + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ); + requests.push(request); + + // query existing account with proof + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query nonexistant + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST165ZBV86V4NJ0V73F52YZGBMJ0FZAQ1BM43C553") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing account with unconfirmed state + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9aca00"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 2); + assert!(resp.balance_proof.is_none()); + assert!(resp.nonce_proof.is_none()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9aca00"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 2); + assert!(resp.balance_proof.is_some()); + assert!(resp.nonce_proof.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x00000000000000000000000000000000"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 0); + assert_eq!(resp.balance_proof, Some("".to_string())); + assert_eq!(resp.nonce_proof, Some("".to_string())); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9ac985"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 4); + assert!(resp.balance_proof.is_some()); + assert!(resp.nonce_proof.is_some()); +} From 2da1bc9f7b046e8840ab70d678a85bc9cba3ee87 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:08:30 -0400 Subject: [PATCH 055/107] refactor: put get-attachment RPC handler into its own file --- stackslib/src/net/api/getattachment.rs | 175 +++++++++++++++++++ stackslib/src/net/api/tests/getattachment.rs | 124 +++++++++++++ 2 files changed, 299 insertions(+) create mode 100644 stackslib/src/net/api/getattachment.rs create mode 100644 stackslib/src/net/api/tests/getattachment.rs diff --git a/stackslib/src/net/api/getattachment.rs b/stackslib/src/net/api/getattachment.rs new file mode 100644 index 0000000000..fb9e5cecdd --- /dev/null +++ b/stackslib/src/net/api/getattachment.rs @@ -0,0 +1,175 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::collections::HashSet; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; + +use crate::net::atlas::{AttachmentPage, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; + +use crate::net::atlas::GetAttachmentResponse; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Hash160; + +use url::form_urlencoded; + +#[derive(Clone)] +pub struct RPCGetAttachmentRequestHandler { + pub attachment_hash: Option, +} + +impl RPCGetAttachmentRequestHandler { + pub fn new() -> Self { + Self { + attachment_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAttachmentRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/attachments/(?P[0-9a-f]{40})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let attachment_hash_str = captures + .name("attachment_hash") + .ok_or(Error::DecodeError( + "Failed to match path to attachment_hash group".to_string(), + ))? + .as_str(); + + self.attachment_hash = Some( + Hash160::from_hex(attachment_hash_str) + .map_err(|_| Error::DecodeError("Failed to decode `attachment_hash`".into()))?, + ); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetAttachmentRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.attachment_hash = None; + } + + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let attachment_hash = self + .attachment_hash + .take() + .ok_or(NetError::SendError("Missing `attachment_hash`".into()))?; + + let attachment_res = node.with_node_state( + |network, _sortdb, _chainstate, _mempool, _rpc_args| match network + .get_atlasdb() + .find_attachment(&attachment_hash) + { + Ok(Some(attachment)) => Ok(GetAttachmentResponse { attachment }), + _ => { + let msg = format!("Unable to find attachment"); + warn!("{}", msg); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )) + } + }, + ); + let attachment = match attachment_res { + Ok(attachment) => attachment, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&attachment)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAttachmentRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pages: GetAttachmentResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pages)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for an attachment + pub fn new_getattachment(host: PeerHost, attachment_id: Hash160) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/attachments/{}", &attachment_id), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_atlas_get_attachment(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetAttachmentResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getattachment.rs b/stackslib/src/net/api/tests/getattachment.rs new file mode 100644 index 0000000000..80159ca5f0 --- /dev/null +++ b/stackslib/src/net/api/tests/getattachment.rs @@ -0,0 +1,124 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; +use stacks_common::util::hash::Hash160; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::Attachment; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getattachment(addr.into(), Hash160([0x11; 20])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getattachment::RPCGetAttachmentRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.attachment_hash, Some(Hash160([0x11; 20]))); + + assert_eq!(&preamble, request.preamble()); + + // restart works + handler.restart(); + assert!(handler.attachment_hash.is_none()); +} + +#[test] +fn test_try_make_response() { + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + let attachment_hash = attachment.hash(); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing attachment + let request = StacksHttpRequest::new_getattachment(addr.into(), attachment_hash.clone()); + requests.push(request); + + // query non-existant + let request = StacksHttpRequest::new_getattachment(addr.into(), Hash160([0x22; 20])); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_atlas_get_attachment().unwrap(); + assert_eq!(resp.attachment, attachment); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 5c0c7cc1ea5b3743900366fd0154bf25b89b9357 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:08:55 -0400 Subject: [PATCH 056/107] refactor: put get-attachments-inventory RPC handler into its own file --- stackslib/src/net/api/getattachmentsinv.rs | 269 ++++++++++++++++++ .../src/net/api/tests/getattachmentsinv.rs | 164 +++++++++++ 2 files changed, 433 insertions(+) create mode 100644 stackslib/src/net/api/getattachmentsinv.rs create mode 100644 stackslib/src/net/api/tests/getattachmentsinv.rs diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs new file mode 100644 index 0000000000..50cdf80f9c --- /dev/null +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -0,0 +1,269 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::collections::HashSet; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; + +use crate::net::atlas::{AttachmentPage, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; + +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; + +use crate::net::atlas::GetAttachmentsInvResponse; + +use url::form_urlencoded; + +#[derive(Clone)] +pub struct RPCGetAttachmentsInvRequestHandler { + pub index_block_hash: Option, + pub page_indexes: Option>, +} + +impl RPCGetAttachmentsInvRequestHandler { + pub fn new() -> Self { + Self { + index_block_hash: None, + page_indexes: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAttachmentsInvRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new("^/v2/attachments/inv$").unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let query_str = if let Some(qs) = query { + qs + } else { + return Err(Error::DecodeError( + "Invalid Http request: expecting index_block_hash and pages_indexes".to_string(), + )); + }; + + let mut index_block_hash = None; + let mut page_indexes = HashSet::new(); + + // expect index_block_hash= and page_indexes= + for (key, value) in form_urlencoded::parse(query_str.as_bytes()) { + if key == "index_block_hash" { + index_block_hash = StacksBlockId::from_hex(&value).ok(); + } else if key == "pages_indexes" { + if let Ok(pages_indexes_value) = value.parse::() { + for entry in pages_indexes_value.split(",") { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); + } + } + } + } + } + + let index_block_hash = if let Some(ibh) = index_block_hash { + ibh + } else { + return Err(Error::DecodeError( + "Invalid Http request: expecting index_block_hash".to_string(), + )); + }; + + if page_indexes.is_empty() { + return Err(Error::DecodeError( + "Invalid Http request: expecting pages_indexes".to_string(), + )); + } + + let mut page_index_list: Vec = page_indexes.into_iter().collect(); + page_index_list.sort(); + + self.index_block_hash = Some(index_block_hash); + self.page_indexes = Some(page_index_list); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetAttachmentsInvRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.index_block_hash = None; + self.page_indexes = None; + } + + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let index_block_hash = self + .index_block_hash + .take() + .ok_or(NetError::SendError("Missing `index_block_hash`".into()))?; + let page_indexes = self + .page_indexes + .take() + .ok_or(NetError::SendError("Missing `page_indexes`".into()))?; + + // We are receiving a list of page indexes with a chain tip hash. + // The amount of pages_indexes is capped by MAX_ATTACHMENT_INV_PAGES_PER_REQUEST (8) + // Pages sizes are controlled by the constant ATTACHMENTS_INV_PAGE_SIZE (8), which + // means that a `GET v2/attachments/inv` request can be requesting for a 64 bit vector + // at once. + // Since clients can be asking for non-consecutive pages indexes (1, 5_000, 10_000, ...), + // we will be handling each page index separately. + // We could also add the notion of "budget" so that a client could only get a limited number + // of pages when they are spanning over many blocks. + if page_indexes.len() > MAX_ATTACHMENT_INV_PAGES_PER_REQUEST { + let msg = format!( + "Number of attachment inv pages is limited by {} per request", + MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + ); + warn!("{}", msg); + return StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + if page_indexes.len() == 0 { + let msg = format!("Page indexes missing"); + warn!("{}", msg); + return StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + + let mut pages = vec![]; + + for page_index in page_indexes.iter() { + let page_res = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + match network + .get_atlasdb() + .get_attachments_available_at_page_index(*page_index, &index_block_hash) + { + Ok(inventory) => Ok(AttachmentPage { + inventory, + index: *page_index, + }), + Err(e) => { + let msg = format!("Unable to read Atlas DB - {}", e); + warn!("{}", msg); + Err(msg) + } + } + }); + + match page_res { + Ok(page) => { + pages.push(page); + } + Err(msg) => { + return StacksHttpResponse::new_error(&preamble, &HttpNotFound::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + } + } + + let content = GetAttachmentsInvResponse { + block_id: index_block_hash.clone(), + pages, + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&content)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAttachmentsInvRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pages: GetAttachmentsInvResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pages)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for attachment inventory page + pub fn new_getattachmentsinv( + host: PeerHost, + index_block_hash: StacksBlockId, + page_indexes: HashSet, + ) -> StacksHttpRequest { + let page_list: Vec = page_indexes.into_iter().map(|i| format!("{}", i)).collect(); + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/attachments/inv".into(), + HttpRequestContents::new() + .query_arg("index_block_hash".into(), format!("{}", &index_block_hash)) + .query_arg("pages_indexes".into(), page_list[..].join(",")), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_atlas_attachments_inv_response( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetAttachmentsInvResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getattachmentsinv.rs b/stackslib/src/net/api/tests/getattachmentsinv.rs new file mode 100644 index 0000000000..2b0e12df15 --- /dev/null +++ b/stackslib/src/net/api/tests/getattachmentsinv.rs @@ -0,0 +1,164 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use std::collections::HashSet; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::Attachment; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use super::test_rpc; +use super::TestRPC; + +use serde_json; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let mut pages = HashSet::new(); + for i in 0..10 { + pages.insert(i); + } + + let request = + StacksHttpRequest::new_getattachmentsinv(addr.into(), StacksBlockId([0x11; 32]), pages); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.index_block_hash, Some(StacksBlockId([0x11; 32]))); + assert_eq!( + handler.page_indexes, + Some(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.index_block_hash.is_none()); + assert!(handler.page_indexes.is_none()); +} + +#[test] +fn test_try_make_response() { + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + let attachment_hash = attachment.hash(); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + + let mut requests = vec![]; + let mut pages = HashSet::new(); + pages.insert(1); + + // query existing attachment + let request = StacksHttpRequest::new_getattachmentsinv( + addr.into(), + stacks_chain_tip.clone(), + pages.clone(), + ); + requests.push(request); + + // query non-existant block + let request = StacksHttpRequest::new_getattachmentsinv( + addr.into(), + StacksBlockId([0x11; 32]), + pages.clone(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_atlas_attachments_inv_response().unwrap(); + + // there should be a bit set in the inventory vector + assert_eq!(resp.block_id, stacks_chain_tip); + assert_eq!(resp.pages.len(), 1); + assert_eq!(resp.pages[0].index, 1); + assert!(resp.pages[0].inventory.iter().find(|&&x| x == 1).is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_atlas_attachments_inv_response().unwrap(); + + // this is a HTTP 200, but no bits are set + assert_eq!(resp.block_id, StacksBlockId([0x11; 32])); + assert_eq!(resp.pages.len(), 1); + assert_eq!(resp.pages[0].index, 1); + assert!(resp.pages[0].inventory.iter().find(|&&x| x == 1).is_none()); +} From 0c302fe599ebba5ad765aced07799e0b9f890f3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:09:17 -0400 Subject: [PATCH 057/107] refactor: put get-block RPC handler into its own file --- stackslib/src/net/api/getblock.rs | 318 ++++++++++++++++++++++++ stackslib/src/net/api/tests/getblock.rs | 204 +++++++++++++++ 2 files changed, 522 insertions(+) create mode 100644 stackslib/src/net/api/getblock.rs create mode 100644 stackslib/src/net/api/tests/getblock.rs diff --git a/stackslib/src/net/api/getblock.rs b/stackslib/src/net/api/getblock.rs new file mode 100644 index 0000000000..ea280197e1 --- /dev/null +++ b/stackslib/src/net/api/getblock.rs @@ -0,0 +1,318 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{httpcore::StacksHttp, Error as NetError, TipRequest}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlock; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCBlocksRequestHandler { + pub block_id: Option, +} + +impl RPCBlocksRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct StacksBlockStream { + /// index block hash of the block to download + pub index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + pub offset: u64, + /// total number of bytes read. + pub total_bytes: u64, + + /// connection to the underlying chainstate + blocks_path: String, +} + +impl StacksBlockStream { + pub fn new(chainstate: &StacksChainState, block: &StacksBlockId) -> Result { + let _ = StacksChainState::load_staging_block_info(chainstate.db(), block)? + .ok_or(ChainError::NoSuchBlockError)?; + + let blocks_path = chainstate.blocks_path.clone(); + + Ok(StacksBlockStream { + index_block_hash: block.clone(), + offset: 0, + total_bytes: 0, + blocks_path, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCBlocksRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/blocks/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id_str = captures + .name("block_id") + .ok_or(Error::DecodeError( + "Failed to match path to block ID group".to_string(), + ))? + .as_str(); + + let block_id = StacksBlockId::from_hex(block_id_str) + .map_err(|_| Error::DecodeError("Invalid path: unparseable block ID".to_string()))?; + self.block_id = Some(block_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCBlocksRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("Missing `block_id`".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksBlockStream::new(chainstate, &block_id) + }); + + // start loading up the block + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCBlocksRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksBlockStream { + #[cfg(test)] + fn hint_chunk_size(&self) -> usize { + // make this hurt + 32 + } + + #[cfg(not(test))] + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + let block_path = + StacksChainState::get_index_block_path(&self.blocks_path, &self.index_block_hash) + .map_err(|e| { + let msg = format!( + "Failed to load block path for {}: {:?}", + &self.index_block_hash, &e + ); + warn!("{}", &msg); + msg + })?; + + // The reason we open a file on each call to stream data is because we don't want to + // exhaust the supply of file descriptors. Maybe a future version of this code will do + // something like cache the set of open files so we don't have to keep re-opening them. + let mut file_fd = fs::OpenOptions::new() + .read(true) + .write(false) + .create(false) + .truncate(false) + .open(&block_path) + .map_err(|e| { + if e.kind() == io::ErrorKind::NotFound { + let msg = format!("Blook file not found for {}", &self.index_block_hash); + warn!("{}", &msg); + msg + } else { + let msg = format!("Failed to open block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + } + })?; + + file_fd.seek(SeekFrom::Start(self.offset)).map_err(|e| { + let msg = format!("Failed to read block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + })?; + + let mut buf = vec![0u8; self.hint_chunk_size()]; + let num_read = file_fd.read(&mut buf).map_err(|e| { + let msg = format!("Failed to read block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + })?; + + buf.truncate(num_read); + + self.offset += num_read as u64; + self.total_bytes += num_read as u64; + + Ok(buf) + } +} + +impl StacksHttpRequest { + pub fn new_getblock(host: PeerHost, index_block_hash: StacksBlockId) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/blocks/{}", &index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_getblock(block: StacksBlock, with_content_length: bool) -> StacksHttpResponse { + let value = block.serialize_to_vec(); + let length = value.len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::Bytes, + true, + ); + let body = HttpResponsePayload::Bytes(value); + StacksHttpResponse::new(preamble, body) + } + + /// Decode an HTTP response into a block. + /// If it fails, return Self::Error(..) + pub fn decode_block(self) -> Result { + let contents = self.get_http_payload_ok()?; + + // contents will be raw bytes + let block_bytes: Vec = contents.try_into()?; + let block = StacksBlock::consensus_deserialize(&mut &block_bytes[..])?; + + Ok(block) + } +} diff --git a/stackslib/src/net/api/tests/getblock.rs b/stackslib/src/net/api/tests/getblock.rs new file mode 100644 index 0000000000..5ffa67afc0 --- /dev/null +++ b/stackslib/src/net/api/tests/getblock.rs @@ -0,0 +1,204 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use crate::util_lib::db::DBConn; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::api::getblock::StacksBlockStream; +use crate::net::http::HttpChunkGenerator; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getblock(addr.into(), StacksBlockId([0x11; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getblock::RPCBlocksRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.block_id, Some(StacksBlockId([0x11; 32]))); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing block + let request = StacksHttpRequest::new_getblock(addr.into(), stacks_chain_tip.clone()); + requests.push(request); + + // query non-existant block + let request = StacksHttpRequest::new_getblock(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the block + let response = responses.remove(0); + let resp = response.decode_block().unwrap(); + + assert_eq!( + StacksBlockHeader::make_index_block_hash(&consensus_hash, &resp.block_hash()), + stacks_chain_tip + ); + + // no block + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_blocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_16k_block(&privk); + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let index_block_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); + + // can't stream a non-existant block + assert!(StacksBlockStream::new(&chainstate, &index_block_header).is_err()); + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // should succeed now + let mut stream = StacksBlockStream::new(&chainstate, &index_block_header).unwrap(); + + // stream it back + let mut all_block_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + all_block_bytes.len() + ); + all_block_bytes.append(&mut next_bytes); + } + + // should decode back into the block + let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); + assert_eq!(staging_block, block); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + + // can still stream it + let mut stream = StacksBlockStream::new(&chainstate, &index_block_header).unwrap(); + + // stream from chunk store + let mut all_block_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from chunkstore; add to {} total", + next_bytes.len(), + all_block_bytes.len() + ); + all_block_bytes.append(&mut next_bytes); + } + + // should decode back into the block + let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); + assert_eq!(staging_block, block); +} From 765960a9fd6e0355b60af5df6a3b0f101782359c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:09:40 -0400 Subject: [PATCH 058/107] refactor: put get-constant-value RPC handler into its own file --- stackslib/src/net/api/getconstantval.rs | 242 ++++++++++++++++++ stackslib/src/net/api/tests/getconstantval.rs | 197 ++++++++++++++ 2 files changed, 439 insertions(+) create mode 100644 stackslib/src/net/api/getconstantval.rs create mode 100644 stackslib/src/net/api/tests/getconstantval.rs diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs new file mode 100644 index 0000000000..fc36e3664d --- /dev/null +++ b/stackslib/src/net/api/getconstantval.rs @@ -0,0 +1,242 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ConstantValResponse { + pub data: String, +} + +#[derive(Clone)] +pub struct RPCGetConstantValRequestHandler { + pub constname: Option, + pub contract_identifier: Option, +} + +impl RPCGetConstantValRequestHandler { + pub fn new() -> Self { + Self { + constname: None, + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetConstantValRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/constant_val/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let constname = request::get_clarity_name(captures, "constname")?; + + self.contract_identifier = Some(contract_identifier); + self.constname = Some(constname); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetConstantValRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.constname = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let constant_name = self + .constname + .take() + .ok_or(NetError::SendError("`constname` not set".to_string()))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let contract = clarity_db.get_contract(&contract_identifier).ok()?; + + let cst = contract + .contract_context + .lookup_variable(constant_name.as_str())? + .serialize_to_hex(); + + let data = format!("0x{cst}"); + Some(ConstantValResponse { data }) + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Constant not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetConstantValRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let constant_val: ConstantValResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(constant_val)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a constant val + pub fn new_getconstantval( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + constant_name: ClarityName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/constant_val/{}/{}/{}", + &contract_addr, &contract_name, &constant_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_constant_val_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ConstantValResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getconstantval.rs b/stackslib/src/net/api/tests/getconstantval.rs new file mode 100644 index 0000000000..58b191b219 --- /dev/null +++ b/stackslib/src/net/api/tests/getconstantval.rs @@ -0,0 +1,197 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-const".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getconstantval::RPCGetConstantValRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.constname, Some("test-const".into())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.constname.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "cst".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "cst-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant data + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "cst".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_constant_val_response().unwrap(); + + assert_eq!(resp.data, "0x000000000000000000000000000000007b"); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_constant_val_response().unwrap(); + + assert_eq!(resp.data, "0x00000000000000000000000000000001c8"); + + // no such data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 3d1cbbc667b4fcc5dbfd5a8d2ff17e80cc074891 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:10:01 -0400 Subject: [PATCH 059/107] refactor: put get-contract-abi RPC handler into its own file --- stackslib/src/net/api/getcontractabi.rs | 224 ++++++++++++++++++ stackslib/src/net/api/tests/getcontractabi.rs | 168 +++++++++++++ 2 files changed, 392 insertions(+) create mode 100644 stackslib/src/net/api/getcontractabi.rs create mode 100644 stackslib/src/net/api/tests/getcontractabi.rs diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs new file mode 100644 index 0000000000..841ca2ebc6 --- /dev/null +++ b/stackslib/src/net/api/getcontractabi.rs @@ -0,0 +1,224 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::analysis::contract_interface_builder::ContractInterface; +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::clarity_store::make_contract_hash_key; +use clarity::vm::database::clarity_store::ContractCommitment; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Clone)] +pub struct RPCGetContractAbiRequestHandler { + pub contract_identifier: Option, +} + +impl RPCGetContractAbiRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetContractAbiRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/interface/(?P
{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + + self.contract_identifier = Some(contract_identifier); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetContractAbiRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + clarity_tx.with_analysis_db_readonly(|db| { + let contract = db.load_contract(&contract_identifier, &epoch)?; + contract.contract_interface + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No contract interface data found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetContractAbiRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let contract_src: ContractInterface = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(contract_src)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a contract ABI + pub fn new_getcontractabi( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/contracts/interface/{}/{}", + &contract_addr, &contract_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_contract_abi_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ContractInterface = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getcontractabi.rs b/stackslib/src/net/api/tests/getcontractabi.rs new file mode 100644 index 0000000000..3c44a6fe6f --- /dev/null +++ b/stackslib/src/net/api/tests/getcontractabi.rs @@ -0,0 +1,168 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getcontractabi::RPCGetContractAbiRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_abi_response().unwrap(); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_abi_response().unwrap(); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From e9b07c4f11309fc70e31d526de8e21f25e5f3ec7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:10:26 -0400 Subject: [PATCH 060/107] chore: put get-contract-source RPC handler into its own file --- stackslib/src/net/api/getcontractsrc.rs | 247 ++++++++++++++++++ stackslib/src/net/api/tests/getcontractsrc.rs | 177 +++++++++++++ 2 files changed, 424 insertions(+) create mode 100644 stackslib/src/net/api/getcontractsrc.rs create mode 100644 stackslib/src/net/api/tests/getcontractsrc.rs diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs new file mode 100644 index 0000000000..42bcf0ef5b --- /dev/null +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -0,0 +1,247 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::clarity_store::make_contract_hash_key; +use clarity::vm::database::clarity_store::ContractCommitment; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ContractSrcResponse { + pub source: String, + pub publish_height: u32, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetContractSrcRequestHandler { + pub contract_identifier: Option, +} + +impl RPCGetContractSrcRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetContractSrcRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/source/(?P
{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + self.contract_identifier = Some(contract_identifier); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetContractSrcRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let with_proof = contents.get_with_proof(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let source = db.get_contract_src(&contract_identifier)?; + let contract_commit_key = make_contract_hash_key(&contract_identifier); + let (contract_commit, proof) = if with_proof { + db.get_with_proof::(&contract_commit_key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .expect("BUG: obtained source, but couldn't get contract commit") + } else { + db.get::(&contract_commit_key) + .map(|a| (a, None)) + .expect("BUG: obtained source, but couldn't get contract commit") + }; + + let publish_height = contract_commit.block_height; + Some(ContractSrcResponse { + source, + publish_height, + marf_proof: proof, + }) + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No contract source data found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetContractSrcRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let contract_src: ContractSrcResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(contract_src)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a contract's source code + pub fn new_getcontractsrc( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/contracts/source/{}/{}", &contract_addr, &contract_name), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_contract_src_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ContractSrcResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getcontractsrc.rs b/stackslib/src/net/api/tests/getcontractsrc.rs new file mode 100644 index 0000000000..c038d148c1 --- /dev/null +++ b/stackslib/src/net/api/tests/getcontractsrc.rs @@ -0,0 +1,177 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getcontractsrc::RPCGetContractSrcRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_src_response().unwrap(); + assert_eq!(resp.publish_height, 1); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_src_response().unwrap(); + assert_eq!(resp.publish_height, 2); + assert!(resp.marf_proof.is_some()); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 92f47fde94a68b6f1eda1cbf4abb215a455e1283 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:10:46 -0400 Subject: [PATCH 061/107] refactor: put get-data-var RPC handler into its own file --- stackslib/src/net/api/getdatavar.rs | 257 ++++++++++++++++++++++ stackslib/src/net/api/tests/getdatavar.rs | 204 +++++++++++++++++ 2 files changed, 461 insertions(+) create mode 100644 stackslib/src/net/api/getdatavar.rs create mode 100644 stackslib/src/net/api/tests/getdatavar.rs diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs new file mode 100644 index 0000000000..1ea7065266 --- /dev/null +++ b/stackslib/src/net/api/getdatavar.rs @@ -0,0 +1,257 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataVarResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetDataVarRequestHandler { + pub contract_identifier: Option, + pub varname: Option, +} +impl RPCGetDataVarRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + varname: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetDataVarRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/data_var/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let varname = request::get_clarity_name(captures, "varname")?; + + self.contract_identifier = Some(contract_identifier); + self.varname = Some(varname); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetDataVarRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.varname = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let var_name = self + .varname + .take() + .ok_or(NetError::SendError("`varname` not set".to_string()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let with_proof = contents.get_with_proof(); + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_trip( + &contract_identifier, + StoreType::Variable, + &var_name, + ); + + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db.get(&key).map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(DataVarResponse { data, marf_proof }) + }) + }) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Data var not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetDataVarRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let datavar: DataVarResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(datavar)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a data var + pub fn new_getdatavar( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + var_name: ClarityName, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/data_var/{}/{}/{}", + &contract_addr, &contract_name, &var_name + ), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_data_var_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: DataVarResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getdatavar.rs b/stackslib/src/net/api/tests/getdatavar.rs new file mode 100644 index 0000000000..f0a9d2ff0f --- /dev/null +++ b/stackslib/src/net/api/tests/getdatavar.rs @@ -0,0 +1,204 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::httpcore::RPCRequestHandler; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-var".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getdatavar::RPCGetDataVarRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.varname, Some("test-var".into())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.varname.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "bar-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant var + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0000000000000000000000000000000000"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0100000000000000000000000000000001"); + assert!(resp.marf_proof.is_some()); + + // no such var + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From d0229d9474c915d53db0345dfd2a7fc1c98c84f5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:11:04 -0400 Subject: [PATCH 062/107] refactor: put get-headers RPC endpoint into its own file --- stackslib/src/net/api/getheaders.rs | 336 ++++++++++++++++++ stackslib/src/net/api/tests/getheaders.rs | 414 ++++++++++++++++++++++ 2 files changed, 750 insertions(+) create mode 100644 stackslib/src/net/api/getheaders.rs create mode 100644 stackslib/src/net/api/tests/getheaders.rs diff --git a/stackslib/src/net/api/getheaders.rs b/stackslib/src/net/api/getheaders.rs new file mode 100644 index 0000000000..7918baadf5 --- /dev/null +++ b/stackslib/src/net/api/getheaders.rs @@ -0,0 +1,336 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{ + httpcore::{request, StacksHttp}, + Error as NetError, TipRequest, +}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::ExtendedStacksHeader; +use crate::chainstate::stacks::db::StacksChainState; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCHeadersRequestHandler { + pub quantity: Option, +} + +impl RPCHeadersRequestHandler { + pub fn new() -> Self { + Self { quantity: None } + } +} + +#[derive(Debug)] +pub struct StacksHeaderStream { + /// index block hash of the block to download + pub index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + pub offset: u64, + /// total number of bytes read. + pub total_bytes: u64, + /// number of headers remaining to stream + pub num_headers: u32, + + /// header buffer data + pub end_of_stream: bool, + pub corked: bool, + + /// connection to the underlying chainstate + chainstate_db: DBConn, + blocks_path: String, +} + +impl StacksHeaderStream { + pub fn new( + chainstate: &StacksChainState, + tip: &StacksBlockId, + num_headers_requested: u32, + ) -> Result { + let header_info = StacksChainState::load_staging_block_info(chainstate.db(), tip)? + .ok_or(ChainError::NoSuchBlockError)?; + + let num_headers = if header_info.height < (num_headers_requested as u64) { + header_info.height as u32 + } else { + num_headers_requested + }; + + let db = chainstate.reopen_db()?; + let blocks_path = chainstate.blocks_path.clone(); + + Ok(StacksHeaderStream { + index_block_hash: tip.clone(), + offset: 0, + total_bytes: 0, + num_headers: num_headers, + end_of_stream: false, + corked: false, + chainstate_db: db, + blocks_path, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCHeadersRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/headers/(?P[0-9]+)$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + + let quantity = request::get_u32(captures, "quantity")?; + self.quantity = Some(quantity); + + let contents = HttpRequestContents::new().query_string(query); + + Ok(contents) + } +} + +impl RPCRequestHandler for RPCHeadersRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.quantity = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let quantity = self + .quantity + .take() + .ok_or(NetError::SendError("`quantity` not set".to_string()))?; + if (quantity as usize) > MAX_HEADERS { + return StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Invalid request: requested more than {} headers\n", + MAX_HEADERS + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + + // find requested chain tip + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksHeaderStream::new(chainstate, &tip, quantity) + }); + + // start loading headers + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &tip)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block header: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::JSON, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCHeadersRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let headers: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(headers)?) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksHeaderStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + if self.total_bytes == 0 { + // headers are a JSON array. Start by writing '[', then write each header, and + // then write ']' + test_debug!("Opening header stream"); + self.total_bytes += 1; + return Ok(vec!['[' as u8]); + } + if self.num_headers == 0 { + test_debug!("End of header stream"); + self.end_of_stream = true; + } + if self.total_bytes > 0 && !self.end_of_stream && !self.corked { + // have more data to send. + // read next header as JSON + match StacksChainState::read_extended_header( + &self.chainstate_db, + &self.blocks_path, + &self.index_block_hash, + ) { + Ok(extended_header) => { + // serialize + let mut header_bytes = vec![]; + serde_json::to_writer(&mut header_bytes, &extended_header).map_err(|e| { + let msg = format!("Failed to encoded Stacks header: {:?}", &e); + warn!("{}", &msg); + msg + })?; + + // advance + self.index_block_hash = extended_header.parent_block_id; + self.num_headers -= 1; + + if self.num_headers > 0 { + header_bytes.push(',' as u8); + } else { + self.end_of_stream = true; + } + + self.total_bytes += header_bytes.len() as u64; + return Ok(header_bytes); + } + Err(ChainError::DBError(DBError::NotFoundError)) => { + // end of headers + test_debug!("Header not found; ending stream"); + self.end_of_stream = true; + } + Err(e) => { + warn!("Header DB error: {:?}", &e); + self.end_of_stream = true; + return Err(format!( + "Failed to read extended header {}: {:?}", + &self.index_block_hash, &e + )); + } + }; + } + if self.end_of_stream && !self.corked { + // sent all the headers we're gonna send. + test_debug!("Corking header stream"); + self.corked = true; + self.total_bytes += 1; + return Ok(vec![']' as u8]); + } + + test_debug!("Header stream terminated"); + // end of stream and corked. we're done! + return Ok(vec![]); + } +} + +impl StacksHttpRequest { + pub fn new_getheaders(host: PeerHost, quantity: u64, tip_req: TipRequest) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/headers/{}", quantity), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_headers(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let headers: Vec = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(headers) + } +} diff --git a/stackslib/src/net/api/tests/getheaders.rs b/stackslib/src/net/api/tests/getheaders.rs new file mode 100644 index 0000000000..d3b80c7a03 --- /dev/null +++ b/stackslib/src/net/api/tests/getheaders.rs @@ -0,0 +1,414 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use crate::util_lib::db::DBConn; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::api::getheaders::StacksHeaderStream; +use crate::net::http::HttpChunkGenerator; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use crate::net::httpcore::RPCRequestHandler; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getheaders::RPCHeadersRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.quantity, Some(2100)); + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.quantity.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing headers + let request = + StacksHttpRequest::new_getheaders(addr.into(), 2100, TipRequest::UseLatestAnchoredTip); + requests.push(request); + + // this fails if we use a microblock tip + let request = + StacksHttpRequest::new_getheaders(addr.into(), 2100, TipRequest::UseLatestUnconfirmedTip); + requests.push(request); + + // query existing headers + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(stacks_chain_tip.clone()), + ); + requests.push(request); + + // query non-existant headers + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(StacksBlockId([0x11; 32])), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_headers().unwrap(); + + assert_eq!(resp.len(), 1); + + // fails on microblock tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); + + // got the headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_headers().unwrap(); + + assert_eq!(resp.len(), 1); + + // no headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +fn stream_headers_to_vec(stream: &mut StacksHeaderStream) -> Vec { + let mut header_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + header_bytes.append(&mut next_bytes); + } + header_bytes +} + +#[test] +fn test_stream_getheaders() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let mut blocks: Vec = vec![]; + let mut blocks_index_hashes: Vec = vec![]; + + // make a linear stream + for i in 0..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 0 { + block.header.total_work.work = 1; + block.header.total_work.burn = 1; + } + if i > 0 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 1; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8); 20]); + let parent_consensus_hash = ConsensusHash([(i as u8); 20]); + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks.push(block); + } + + let mut blocks_fork = blocks[0..16].to_vec(); + let mut blocks_fork_index_hashes = blocks_index_hashes[0..16].to_vec(); + + // make a stream that branches off + for i in 16..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 16 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 2; + } else { + block.header.parent_block = blocks_fork.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = + blocks_fork.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = + blocks_fork.get(i - 1).unwrap().header.total_work.burn + 2; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8) | 0x80; 20]); + let parent_consensus_hash = if i == 16 { + ConsensusHash([(i as u8); 20]) + } else { + ConsensusHash([(i as u8) | 0x80; 20]) + }; + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_fork_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks_fork.push(block); + } + + // can't stream a non-existant header + assert!(StacksHeaderStream::new(&chainstate, &StacksBlockId([0x11; 32]), 1).is_err()); + + // stream back individual headers + for i in 0..blocks.len() { + let mut stream = StacksHeaderStream::new(&chainstate, &blocks_index_hashes[i], 1).unwrap(); + let next_header_bytes = stream_headers_to_vec(&mut stream); + + test_debug!("Got {} total bytes", next_header_bytes.len()); + test_debug!( + "bytes: '{}'", + std::str::from_utf8(&next_header_bytes).unwrap() + ); + let header: Vec = + serde_json::from_reader(&mut &next_header_bytes[..]).unwrap(); + + assert_eq!(header.len(), 1); + let header = header[0].clone(); + assert_eq!(header.consensus_hash, ConsensusHash([(i + 1) as u8; 20])); + assert_eq!(header.header, blocks[i].header); + + if i > 0 { + assert_eq!(header.parent_block_id, blocks_index_hashes[i - 1]); + } + } + + // stream back a run of headers + let block_expected_headers: Vec = + blocks.iter().rev().map(|blk| blk.header.clone()).collect(); + + let block_expected_index_hashes: Vec = blocks_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + let block_fork_expected_headers: Vec = blocks_fork + .iter() + .rev() + .map(|blk| blk.header.clone()) + .collect(); + + let block_fork_expected_index_hashes: Vec = blocks_fork_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + // get them all -- ask for more than there is + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_index_hashes.last().unwrap(), 4096).unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + + eprintln!( + "headers: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), block_expected_headers.len()); + for ((i, h), eh) in headers + .iter() + .enumerate() + .zip(block_expected_headers.iter()) + { + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, ConsensusHash([(32 - i) as u8; 20])); + if i + 1 < block_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_expected_index_hashes[i + 1]); + } + } + + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_fork_index_hashes.last().unwrap(), 4096) + .unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + let fork_headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(fork_headers.len(), block_fork_expected_headers.len()); + for ((i, h), eh) in fork_headers + .iter() + .enumerate() + .zip(block_fork_expected_headers.iter()) + { + let consensus_hash = if i >= 16 { + ConsensusHash([((32 - i) as u8); 20]) + } else { + ConsensusHash([((32 - i) as u8) | 0x80; 20]) + }; + + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, consensus_hash); + if i + 1 < block_fork_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } + } + + assert_eq!(fork_headers[16..32], headers[16..32]); + + // ask for only a few + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_index_hashes.last().unwrap(), 10).unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + eprintln!( + "header bytes: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_expected_index_hashes[i + 1]); + } + + // ask for only a few + let mut stream = + StacksHeaderStream::new(&chainstate, &blocks_fork_index_hashes.last().unwrap(), 10) + .unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_fork_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } +} From 568de364c4be5b62073d7584d0d1d081cfbced21 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:11:32 -0400 Subject: [PATCH 063/107] refactor: put get-node-info RPC handler into its own file --- stackslib/src/net/api/getinfo.rs | 271 +++++++++++++++++++++++++ stackslib/src/net/api/tests/getinfo.rs | 116 +++++++++++ 2 files changed, 387 insertions(+) create mode 100644 stackslib/src/net/api/getinfo.rs create mode 100644 stackslib/src/net/api/tests/getinfo.rs diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs new file mode 100644 index 0000000000..86b0bdd13f --- /dev/null +++ b/stackslib/src/net/api/getinfo.rs @@ -0,0 +1,271 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; + +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; + +use crate::version_string; + +/// The request to GET /v2/info +#[derive(Clone)] +pub struct RPCPeerInfoRequestHandler {} +impl RPCPeerInfoRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCAffirmationData { + pub heaviest: AffirmationMap, + pub stacks_tip: AffirmationMap, + pub sortition_tip: AffirmationMap, + pub tentative_best: AffirmationMap, +} + +/// Information about the last PoX anchor block +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCLastPoxAnchorData { + pub anchor_block_hash: BlockHeaderHash, + pub anchor_block_txid: Txid, +} + +/// The response to GET /v2/info +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPeerInfoData { + pub peer_version: u32, + pub pox_consensus: ConsensusHash, + pub burn_block_height: u64, + pub stable_pox_consensus: ConsensusHash, + pub stable_burn_block_height: u64, + pub server_version: String, + pub network_id: u32, + pub parent_network_id: u32, + pub stacks_tip_height: u64, + pub stacks_tip: BlockHeaderHash, + pub stacks_tip_consensus_hash: ConsensusHash, + pub genesis_chainstate_hash: Sha256Sum, + pub unanchored_tip: Option, + pub unanchored_seq: Option, + pub exit_at_block_height: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub node_public_key: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub node_public_key_hash: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub affirmations: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_pox_anchor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub stackerdbs: Option>, +} + +impl RPCPeerInfoData { + pub fn from_network( + network: &PeerNetwork, + chainstate: &StacksChainState, + exit_at_block_height: Option, + genesis_chainstate_hash: &Sha256Sum, + ) -> RPCPeerInfoData { + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let (unconfirmed_tip, unconfirmed_seq) = match chainstate.unconfirmed_state { + Some(ref unconfirmed) => { + if unconfirmed.num_mined_txs() > 0 { + ( + Some(unconfirmed.unconfirmed_chain_tip.clone()), + Some(unconfirmed.last_mblock_seq), + ) + } else { + (None, None) + } + } + None => (None, None), + }; + + let public_key = StacksPublicKey::from_private(&network.get_local_peer().private_key); + let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); + let public_key_hash = Hash160::from_node_public_key(&public_key); + let stackerdb_contract_ids = network.get_local_peer().stacker_dbs.clone(); + + RPCPeerInfoData { + peer_version: network.burnchain.peer_version, + pox_consensus: network.burnchain_tip.consensus_hash.clone(), + burn_block_height: network.chain_view.burn_block_height, + stable_pox_consensus: network.chain_view_stable_consensus_hash.clone(), + stable_burn_block_height: network.chain_view.burn_stable_block_height, + server_version, + network_id: network.local_peer.network_id, + parent_network_id: network.local_peer.parent_network_id, + stacks_tip_height: network.burnchain_tip.canonical_stacks_tip_height, + stacks_tip: network.burnchain_tip.canonical_stacks_tip_hash.clone(), + stacks_tip_consensus_hash: network + .burnchain_tip + .canonical_stacks_tip_consensus_hash + .clone(), + unanchored_tip: unconfirmed_tip, + unanchored_seq: unconfirmed_seq, + exit_at_block_height: exit_at_block_height, + genesis_chainstate_hash: genesis_chainstate_hash.clone(), + node_public_key: Some(public_key_buf), + node_public_key_hash: Some(public_key_hash), + affirmations: Some(RPCAffirmationData { + heaviest: network.heaviest_affirmation_map.clone(), + stacks_tip: network.stacks_tip_affirmation_map.clone(), + sortition_tip: network.sortition_tip_affirmation_map.clone(), + tentative_best: network.tentative_best_affirmation_map.clone(), + }), + last_pox_anchor: Some(RPCLastPoxAnchorData { + anchor_block_hash: network.last_anchor_block_hash.clone(), + anchor_block_txid: network.last_anchor_block_txid.clone(), + }), + stackerdbs: Some( + stackerdb_contract_ids + .into_iter() + .map(|cid| format!("{}", cid)) + .collect(), + ), + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPeerInfoRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/info$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPeerInfoRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let rpc_peer_info = + node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { + RPCPeerInfoData::from_network( + network, + chainstate, + rpc_args.exit_at_block_height.clone(), + &rpc_args.genesis_chainstate_hash, + ) + }); + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&rpc_peer_info)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPeerInfoRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let peer_info: RPCPeerInfoData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(peer_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getinfo(host: PeerHost, stacks_height: Option) -> StacksHttpRequest { + let mut req = StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/info".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + req.preamble_mut() + .set_canonical_stacks_tip_height(stacks_height); + req + } +} + +impl StacksHttpResponse { + pub fn decode_peer_info(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let peer_info: RPCPeerInfoData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(peer_info) + } +} diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs new file mode 100644 index 0000000000..8e79c98b69 --- /dev/null +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -0,0 +1,116 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::httpcore::RPCRequestHandler; +use serde_json; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getinfo(addr.into(), Some(123)); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .try_parse_request(&parsed_preamble.expect_request(), &bytes[offset..]) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + parsed_request.add_header( + "X-Canonical-Stacks-Tip-Height".to_string(), + "123".to_string(), + ); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_getinfo_compat() { + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + + // they all parse + for json_obj in &[ + &old_getinfo_json, + &getinfo_no_pubkey_json, + &getinfo_no_pubkey_hash_json, + &getinfo_full_json, + ] { + let _v: RPCPeerInfoData = serde_json::from_str(json_obj).unwrap(); + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing account + let request = StacksHttpRequest::new_getinfo(addr.into(), Some(123)); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_peer_info().unwrap(); +} From 3056aa3c372ba313ce3abd6ceb8f64945cd2fcd3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:11:57 -0400 Subject: [PATCH 064/107] refactor: put get-is-trait-implemented RPC handler into its own file --- .../src/net/api/getistraitimplemented.rs | 277 ++++++++++++++++++ .../net/api/tests/getistraitimplemented.rs | 245 ++++++++++++++++ 2 files changed, 522 insertions(+) create mode 100644 stackslib/src/net/api/getistraitimplemented.rs create mode 100644 stackslib/src/net/api/tests/getistraitimplemented.rs diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs new file mode 100644 index 0000000000..09c772d1a0 --- /dev/null +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -0,0 +1,277 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::types::TraitIdentifier; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GetIsTraitImplementedResponse { + pub is_implemented: bool, +} + +#[derive(Clone)] +pub struct RPCGetIsTraitImplementedRequestHandler { + pub contract_identifier: Option, + pub trait_contract_identifier: Option, + pub trait_name: Option, +} +impl RPCGetIsTraitImplementedRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + trait_contract_identifier: None, + trait_name: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetIsTraitImplementedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/traits/(?P
{})/(?P{})/(?P{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let trait_contract_identifier = + request::get_contract_address(captures, "traitContractAddr", "traitContractName")?; + let trait_name = request::get_clarity_name(captures, "traitName")?; + + self.contract_identifier = Some(contract_identifier); + self.trait_contract_identifier = Some(trait_contract_identifier); + self.trait_name = Some(trait_name); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.trait_contract_identifier = None; + self.trait_name = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let trait_contract_id = + self.trait_contract_identifier + .take() + .ok_or(NetError::SendError( + "`trait_contract_identifier` not set".into(), + ))?; + let trait_name = self + .trait_name + .take() + .ok_or(NetError::SendError("`trait_name` not set".into()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let trait_id = + TraitIdentifier::new(trait_contract_id.issuer, trait_contract_id.name, trait_name); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let analysis = db.load_contract_analysis(&contract_identifier)?; + if analysis.implemented_traits.contains(&trait_id) { + Some(GetIsTraitImplementedResponse { + is_implemented: true, + }) + } else { + let trait_defining_contract = + db.load_contract_analysis(&trait_id.contract_identifier)?; + let trait_definition = + trait_defining_contract.get_defined_trait(&trait_id.name)?; + let is_implemented = analysis + .check_trait_compliance( + &db.get_clarity_epoch_version(), + &trait_id, + trait_definition, + ) + .is_ok(); + Some(GetIsTraitImplementedResponse { is_implemented }) + } + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new( + "No contract analysis found or trait definition not found".to_string(), + ), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetIsTraitImplementedRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let is_implemented: GetIsTraitImplementedResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(is_implemented)?) + } +} + +impl StacksHttpResponse { + pub fn decode_is_trait_implemented_response( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetIsTraitImplementedResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} + +impl StacksHttpRequest { + /// Make a new is-trait-implemented request + pub fn new_get_is_trait_implemented( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + trait_contract_addr: StacksAddress, + trait_contract_name: ContractName, + trait_name: ClarityName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/traits/{}/{}/{}/{}/{}", + &contract_addr, + &contract_name, + &trait_contract_addr, + &trait_contract_name, + &trait_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/tests/getistraitimplemented.rs b/stackslib/src/net/api/tests/getistraitimplemented.rs new file mode 100644 index 0000000000..8760fe8ecf --- /dev/null +++ b/stackslib/src/net/api/tests/getistraitimplemented.rs @@ -0,0 +1,245 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed-trait-def".try_into().unwrap(), + "trait-name".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getistraitimplemented::RPCGetIsTraitImplementedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!( + handler.trait_contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed-trait-def" + ) + .unwrap() + ) + ); + assert_eq!(handler.trait_name, Some("trait-name".into())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.trait_contract_identifier.is_none()); + assert!(handler.trait_name.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing, but does not conform + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait-2".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant trait + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(resp.is_implemented); + + // latest data but not conforming + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(!resp.is_implemented); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(resp.is_implemented); + + // no such trait + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 4c744356de25572a1bb01affd62aef4144c3b13a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:12:15 -0400 Subject: [PATCH 065/107] refactor: put get-map-entry RPC handler into its own file --- stackslib/src/net/api/getmapentry.rs | 284 ++++++++++++++ stackslib/src/net/api/tests/getmapentry.rs | 419 +++++++++++++++++++++ 2 files changed, 703 insertions(+) create mode 100644 stackslib/src/net/api/getmapentry.rs create mode 100644 stackslib/src/net/api/tests/getmapentry.rs diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs new file mode 100644 index 0000000000..4e8df13fb2 --- /dev/null +++ b/stackslib/src/net/api/getmapentry.rs @@ -0,0 +1,284 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttp, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPayload, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::TipRequest; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::STXBalance; +use clarity::vm::database::StoreType; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; +use clarity::vm::ClarityName; +use clarity::vm::ClarityVersion; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MapEntryResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetMapEntryRequestHandler { + pub contract_identifier: Option, + pub map_name: Option, + pub key: Option, +} +impl RPCGetMapEntryRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + map_name: None, + key: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetMapEntryRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/map_entry/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// The body must be a hex string, encoded as a JSON string. + /// So, something like `"123abc"`. It encodes the map key as a serialized Clarity value. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < BOUND_VALUE_SERIALIZATION_HEX) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for GetMapEntry ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".into(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let map_name = request::get_clarity_name(captures, "map")?; + + let mut body_ptr = body; + let value_hex: String = serde_json::from_reader(&mut body_ptr) + .map_err(|_e| Error::DecodeError("Failed to parse JSON body".into()))?; + + let value = Value::try_deserialize_hex_untyped(&value_hex) + .map_err(|_e| Error::DecodeError("Failed to deserialize key value".into()))?; + + self.contract_identifier = Some(contract_identifier); + self.map_name = Some(map_name); + self.key = Some(value); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetMapEntryRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.map_name = None; + self.key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let map_name = self + .map_name + .take() + .ok_or(NetError::SendError("`map_name` not set".into()))?; + let key = self + .key + .take() + .ok_or(NetError::SendError("`key` not set".into()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let with_proof = contents.get_with_proof(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_data_map_entry( + &contract_identifier, + &map_name, + &key, + ); + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), Some("".into())) + }) + } else { + clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), None) + }) + }; + + let data = format!("0x{}", value_hex); + MapEntryResponse { data, marf_proof } + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(data)) => data, + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetMapEntryRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let map_entry: MapEntryResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(map_entry)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a data map + pub fn new_getmapentry( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + map_name: ClarityName, + key: Value, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/map_entry/{}/{}/{}", + &contract_addr, &contract_name, &map_name + ), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()) + .payload_json(serde_json::Value::String(key.serialize_to_hex())), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_map_entry_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: MapEntryResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getmapentry.rs b/stackslib/src/net/api/tests/getmapentry.rs new file mode 100644 index 0000000000..00b2152567 --- /dev/null +++ b/stackslib/src/net/api/tests/getmapentry.rs @@ -0,0 +1,419 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-map".into(), + Value::UInt(13), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + false, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), false); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmapentry::RPCGetMapEntryRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.map_name, Some("test-map".into())); + assert_eq!(handler.key, Some(Value::UInt(13))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.map_name.is_none()); + assert!(handler.key.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-map".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-map-unconfirmed".try_into().unwrap(), + Value::Int(3), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant map + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "test-map".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x0a0100000000000000000000000000000002"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x0a0000000000000000000000000000000004"); + assert!(resp.marf_proof.is_some()); + + // no such map (this just returns `none`) + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x09"); + assert_eq!(resp.marf_proof, Some("".to_string())); + + // no such contract (this just returns `none`) + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x09"); + assert_eq!(resp.marf_proof, Some("".to_string())); +} + +/* +#[test] +#[ignore] +fn test_rpc_get_map_entry() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we don't set any tip parameters, and we expect that querying for map data + // against the canonical Stacks tip will succeed. + test_rpc( + function_name!(), + 40130, + 40131, + 50130, + 50131, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(123))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_get_map_entry_unconfirmed() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for map data + // against the unconfirmed state will succeed. + test_rpc( + function_name!(), + 40140, + 40141, + 50140, + 50141, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let unconfirmed_tip = peer_client + .chainstate() + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::SpecificTip(unconfirmed_tip), + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(1))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_get_map_entry_use_latest_tip() { + test_rpc( + function_name!(), + 40142, + 40143, + 50142, + 50143, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(1))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} +*/ From 34c5af59439419af500be18441bdc74bcf5bf9de Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:12:33 -0400 Subject: [PATCH 066/107] refactor: put get-confirmed-microblocks RPC handler into its own file --- .../src/net/api/getmicroblocks_confirmed.rs | 207 ++++++++++++ .../net/api/tests/getmicroblocks_confirmed.rs | 311 ++++++++++++++++++ 2 files changed, 518 insertions(+) create mode 100644 stackslib/src/net/api/getmicroblocks_confirmed.rs create mode 100644 stackslib/src/net/api/tests/getmicroblocks_confirmed.rs diff --git a/stackslib/src/net/api/getmicroblocks_confirmed.rs b/stackslib/src/net/api/getmicroblocks_confirmed.rs new file mode 100644 index 0000000000..f2817e4c9e --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_confirmed.rs @@ -0,0 +1,207 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{httpcore::StacksHttp, Error as NetError, TipRequest}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use stacks_common::codec::read_next; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; + +#[derive(Clone)] +pub struct RPCMicroblocksConfirmedRequestHandler { + pub block_id: Option, +} + +impl RPCMicroblocksConfirmedRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +impl StacksIndexedMicroblockStream { + /// Make a new indexed microblock streamer using the descendent Stacks anchored block + pub fn new_confirmed( + chainstate: &StacksChainState, + child_block_id: &StacksBlockId, + ) -> Result { + let tail_microblock_index_hash = + if let Some(bhh) = chainstate.get_confirmed_microblock_index_hash(child_block_id)? { + bhh + } else { + return Err(ChainError::NoSuchBlockError); + }; + + StacksIndexedMicroblockStream::new(chainstate, &tail_microblock_index_hash) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksConfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/confirmed/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id = request::get_block_hash(captures, "block_id")?; + + self.block_id = Some(block_id); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksConfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("`block_id` not set".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + let res = StacksIndexedMicroblockStream::new_confirmed(chainstate, &block_id); + res + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + test_debug!("Failed to load block {}: Not found", &block_id); + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksConfirmedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_confirmed( + host: PeerHost, + child_block_id: StacksBlockId, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/confirmed/{}", &child_block_id), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs new file mode 100644 index 0000000000..d78de0c0ec --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -0,0 +1,311 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; + +use crate::util_lib::db::DBConn; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::test::make_codec_test_block; + +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; + +use crate::net::http::HttpChunkGenerator; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = + StacksHttpRequest::new_getmicroblocks_confirmed(addr.into(), StacksBlockId([0x22; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_confirmed::RPCMicroblocksConfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.block_id, Some(StacksBlockId([0x22; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + // store an additional block and microblock stream, so we can fetch it. + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let parent_block = make_codec_test_block(25); + let parent_consensus_hash = ConsensusHash([0x02; 20]); + + let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); + mblocks.truncate(15); + + let mut child_block = make_codec_test_block(25); + let child_consensus_hash = ConsensusHash([0x03; 20]); + + child_block.header.parent_block = parent_block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let child_index_block_hash = + StacksBlockHeader::make_index_block_hash(&child_consensus_hash, &child_block.block_hash()); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block, + &ConsensusHash([0x01; 20]), + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + true, + ); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block, + &parent_consensus_hash, + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + &mblock, + ); + } + + set_microblocks_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + let mut requests = vec![]; + + // query existing microblock stream + let request = StacksHttpRequest::new_getmicroblocks_confirmed( + addr.into(), + child_index_block_hash.clone(), + ); + requests.push(request); + + // query non-existant microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_confirmed(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let mut resp = response.decode_microblocks().unwrap(); + + resp.reverse(); + debug!("microblocks: {:?}", &resp); + assert_eq!(resp, mblocks); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_confirmed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(5); + + let mut child_block = make_empty_coinbase_block(&privk); + child_block.header.parent_block = block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let child_consensus_hash = ConsensusHash([3u8; 20]); + + // store microblocks to staging + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + } + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // store child block to staging + store_staging_block( + &mut chainstate, + &child_consensus_hash, + &child_block, + &consensus_hash, + 1, + 2, + ); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + set_block_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for i in 0..mblocks.len() { + set_microblocks_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + &mblocks[i].block_hash(), + ); + } + + // verify that we can stream everything + let child_block_header = + StacksBlockHeader::make_index_block_hash(&child_consensus_hash, &child_block.block_hash()); + + let mut stream = + StacksIndexedMicroblockStream::new_confirmed(&chainstate, &child_block_header).unwrap(); + + let mut confirmed_mblock_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + confirmed_mblock_bytes.len() + ); + confirmed_mblock_bytes.append(&mut next_bytes); + } + + // decode stream (should be length-prefixed) + let mut confirmed_mblocks = + Vec::::consensus_deserialize(&mut &confirmed_mblock_bytes[..]).unwrap(); + + confirmed_mblocks.reverse(); + + assert_eq!(confirmed_mblocks.len(), mblocks.len()); + for i in 0..mblocks.len() { + test_debug!("check {}", i); + assert_eq!(confirmed_mblocks[i], mblocks[i]) + } +} From 7ad3b28276e70ab2d4d65f1344a22c5987fe70c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:12:54 -0400 Subject: [PATCH 067/107] refactor: put get-indexed-microblocks handler into its own file --- .../src/net/api/getmicroblocks_indexed.rs | 329 ++++++++++++++++++ .../net/api/tests/getmicroblocks_indexed.rs | 314 +++++++++++++++++ 2 files changed, 643 insertions(+) create mode 100644 stackslib/src/net/api/getmicroblocks_indexed.rs create mode 100644 stackslib/src/net/api/tests/getmicroblocks_indexed.rs diff --git a/stackslib/src/net/api/getmicroblocks_indexed.rs b/stackslib/src/net/api/getmicroblocks_indexed.rs new file mode 100644 index 0000000000..d2a89c2ca3 --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_indexed.rs @@ -0,0 +1,329 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{ + httpcore::{request, StacksHttp}, + Error as NetError, TipRequest, +}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use stacks_common::codec::read_next; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCMicroblocksIndexedRequestHandler { + pub tail_microblock_id: Option, +} +impl RPCMicroblocksIndexedRequestHandler { + pub fn new() -> Self { + Self { + tail_microblock_id: None, + } + } +} + +#[derive(Debug)] +pub struct StacksIndexedMicroblockStream { + /// length prefix + pub num_items_buf: [u8; 4], + pub num_items_ptr: usize, + + /// microblock pointer + pub microblock_hash: BlockHeaderHash, + pub parent_index_block_hash: StacksBlockId, + + /// connection to the chain state + chainstate_db: DBConn, +} + +impl StacksIndexedMicroblockStream { + pub fn new( + chainstate: &StacksChainState, + tail_index_microblock_hash: &StacksBlockId, + ) -> Result { + // look up parent + let mblock_info = StacksChainState::load_staging_microblock_info_indexed( + &chainstate.db(), + tail_index_microblock_hash, + )? + .ok_or(ChainError::NoSuchBlockError)?; + + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &mblock_info.consensus_hash, + &mblock_info.anchored_block_hash, + ); + + // need to send out the consensus_serialize()'ed array length before sending microblocks. + // this is exactly what seq tells us, though. + test_debug!( + "Will stream {} microblocks back from {}", + mblock_info.sequence, + &tail_index_microblock_hash + ); + let num_items_buf = ((mblock_info.sequence as u32) + 1).to_be_bytes(); + + Ok(StacksIndexedMicroblockStream { + microblock_hash: mblock_info.microblock_hash, + parent_index_block_hash: parent_index_block_hash, + num_items_buf: num_items_buf, + num_items_ptr: 0, + chainstate_db: chainstate.reopen_db()?, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksIndexedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let tail_microblock_id = request::get_block_hash(captures, "tail_microblock_id")?; + + self.tail_microblock_id = Some(tail_microblock_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksIndexedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.tail_microblock_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tail_microblock_id = self + .tail_microblock_id + .take() + .ok_or(NetError::SendError("`tail_microblock_id` not set".into()))?; + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksIndexedMicroblockStream::new(chainstate, &tail_microblock_id) + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such microblock {:?}\n", &tail_microblock_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load microblock: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksIndexedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksIndexedMicroblockStream { + #[cfg(not(test))] + fn hint_chunk_size(&self) -> usize { + 4096 + } + + #[cfg(test)] + fn hint_chunk_size(&self) -> usize { + // make this hurt + 32 + } + + /// Stream back microblock chunks. + /// The first chunk is a 4-byte length prefix + /// Subsequent chunks are microblocks + fn generate_next_chunk(&mut self) -> Result, String> { + if self.num_items_ptr == 0 { + // send length prefix + self.num_items_ptr += self.num_items_buf.len(); + return Ok(self.num_items_buf.to_vec()); + } + + // load next microblock + let mblock_info_opt = StacksChainState::load_staging_microblock_indexed( + &self.chainstate_db, + &self.parent_index_block_hash, + &self.microblock_hash, + ).map_err(|e| { + warn!("Failed to load microblock"; "microblock" => %self.microblock_hash, "parent anchored block" => %self.parent_index_block_hash, "error" => %e); + let msg = format!("Failed to load microblock {}-{}: {:?}", &self.parent_index_block_hash, &self.microblock_hash, &e); + msg + })?; + + let mblock_info = if let Some(x) = mblock_info_opt { + x + } else { + // out of microblocks + debug!( + "Out of microblocks to stream"; + "last microblock" => %self.microblock_hash, + "parent anchored block" => %self.parent_index_block_hash + ); + return Ok(vec![]); + }; + + let buf = mblock_info.block_data; + + self.microblock_hash = mblock_info.parent_hash; + return Ok(buf); + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_indexed( + host: PeerHost, + index_microblock_hash: StacksBlockId, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/{}", &index_microblock_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_getmicroblocks_indexed( + mblocks: Vec, + with_content_length: bool, + ) -> StacksHttpResponse { + let value = mblocks.serialize_to_vec(); + let length = value.len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::Bytes, + true, + ); + let body = HttpResponsePayload::Bytes(value); + StacksHttpResponse::new(preamble, body) + } + + /// Decode an HTTP response into a microblock stream + /// If it fails, return Self::Error(..) + pub fn decode_microblocks(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + + // contents will be a SIP-003 bytestream + let mblock_bytes: Vec = contents.try_into()?; + let microblocks: Vec = read_next(&mut &mblock_bytes[..])?; + + Ok(microblocks) + } +} diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs new file mode 100644 index 0000000000..1a192afb53 --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -0,0 +1,314 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; + +use crate::util_lib::db::DBConn; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::test::make_codec_test_block; + +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; + +use crate::net::http::HttpChunkGenerator; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), StacksBlockId([0x22; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_indexed::RPCMicroblocksIndexedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.tail_microblock_id, Some(StacksBlockId([0x22; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tail_microblock_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + // store an additional block and microblock stream, so we can fetch it. + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + let parent_block = make_codec_test_block(25); + let parent_consensus_hash = ConsensusHash([0x02; 20]); + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_block.block_hash(), + ); + + let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); + mblocks.truncate(15); + + let mut child_block = make_codec_test_block(25); + let child_consensus_hash = ConsensusHash([0x03; 20]); + + child_block.header.parent_block = parent_block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + store_staging_block( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block, + &ConsensusHash([0x01; 20]), + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + true, + ); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block, + &parent_consensus_hash, + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + let index_microblock_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + &mblock, + ); + } + + set_microblocks_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + let mut requests = vec![]; + + // query existing microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), index_microblock_hash.clone()); + requests.push(request); + + // query non-existant microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let mut resp = response.decode_microblocks().unwrap(); + + resp.reverse(); + debug!("microblocks: {:?}", &resp); + assert_eq!(resp, mblocks); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_indexed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(5); + + let mut child_block = make_empty_coinbase_block(&privk); + child_block.header.parent_block = block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let child_consensus_hash = ConsensusHash([3u8; 20]); + + // store microblocks to staging + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + } + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // store child block to staging + store_staging_block( + &mut chainstate, + &child_consensus_hash, + &child_block, + &consensus_hash, + 1, + 2, + ); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + set_block_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for i in 0..mblocks.len() { + // set different parts of this stream as confirmed + set_microblocks_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + &mblocks[i].block_hash(), + ); + + // verify that we can stream everything + let microblock_index_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &mblocks[i].block_hash()); + + let mut stream = + StacksIndexedMicroblockStream::new(&chainstate, µblock_index_header).unwrap(); + + let mut confirmed_mblock_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + confirmed_mblock_bytes.len() + ); + confirmed_mblock_bytes.append(&mut next_bytes); + } + + // decode stream (should be length-prefixed) + let mut confirmed_mblocks = + Vec::::consensus_deserialize(&mut &confirmed_mblock_bytes[..]) + .unwrap(); + + confirmed_mblocks.reverse(); + + assert_eq!(confirmed_mblocks.len(), mblocks[0..(i + 1)].len()); + for j in 0..(i + 1) { + test_debug!("check {}", j); + assert_eq!(confirmed_mblocks[j], mblocks[j]) + } + } +} From 7ad39eb03ed4c7f103d3e59f51d5a3866b5eeb24 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:13:25 -0400 Subject: [PATCH 068/107] refactor: put get-unconfirmed-microblocks RPC handler into its own file --- .../src/net/api/getmicroblocks_unconfirmed.rs | 334 ++++++++++++++++++ .../api/tests/getmicroblocks_unconfirmed.rs | 264 ++++++++++++++ 2 files changed, 598 insertions(+) create mode 100644 stackslib/src/net/api/getmicroblocks_unconfirmed.rs create mode 100644 stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs diff --git a/stackslib/src/net/api/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs new file mode 100644 index 0000000000..6a81e4ce1f --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs @@ -0,0 +1,334 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::MAX_MICROBLOCKS_UNCONFIRMED; +use crate::net::{httpcore::StacksHttp, Error as NetError, TipRequest}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use stacks_common::codec::read_next; +use stacks_common::codec::Error as CodecError; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::retry::BoundReader; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCMicroblocksUnconfirmedRequestHandler { + pub parent_block_id: Option, + pub start_sequence: Option, +} +impl RPCMicroblocksUnconfirmedRequestHandler { + pub fn new() -> Self { + Self { + parent_block_id: None, + start_sequence: None, + } + } +} + +#[derive(Debug)] +pub struct StacksUnconfirmedMicroblockStream { + /// microblock pointer + pub microblock_hash: BlockHeaderHash, + pub parent_index_block_hash: StacksBlockId, + pub seq: u16, + pub finished: bool, + pub next_microblock: StacksMicroblock, + + /// connection to the chain state + chainstate_db: DBConn, +} + +impl StacksUnconfirmedMicroblockStream { + pub fn new( + chainstate: &StacksChainState, + parent_block_id: &StacksBlockId, + seq: u16, + ) -> Result { + let mblock_info = StacksChainState::load_next_descendant_microblock( + &chainstate.db(), + parent_block_id, + seq, + )? + .ok_or(ChainError::NoSuchBlockError)?; + + // need to send out the consensus_serialize()'ed array length before sending microblocks. + // this is exactly what seq tells us, though. + Ok(StacksUnconfirmedMicroblockStream { + microblock_hash: mblock_info.block_hash(), + parent_index_block_hash: parent_block_id.clone(), + seq, + finished: false, + next_microblock: mblock_info, + chainstate_db: chainstate.reopen_db()?, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksUnconfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/unconfirmed/(?P[0-9a-f]{64})/(?P[0-9]{1,6})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + + let parent_block_id = request::get_block_hash(captures, "parent_block_id")?; + let start_sequence_u32 = request::get_u32(captures, "start_sequence")?; + + if start_sequence_u32 > u16::MAX.into() { + return Err(Error::DecodeError("`start_sequence` is too big".into())); + } + + let start_sequence = start_sequence_u32 as u16; + + self.parent_block_id = Some(parent_block_id); + self.start_sequence = Some(start_sequence); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksUnconfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.parent_block_id = None; + self.start_sequence = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .parent_block_id + .take() + .ok_or(NetError::SendError("`parent_block_id` not set".into()))?; + let start_seq = self + .start_sequence + .take() + .ok_or(NetError::SendError("`start_seq` not set".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksUnconfirmedMicroblockStream::new(chainstate, &block_id, start_seq) + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load microblock: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksUnconfirmedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksUnconfirmedMicroblockStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + /// Stream back microblock chunks. + /// The first chunk is a 4-byte length prefix + /// Subsequent chunks are microblocks + fn generate_next_chunk(&mut self) -> Result, String> { + if self.finished { + // no more to load + return Ok(vec![]); + } + + // advance streamer to next microblock in the sequence + let next_seq = match self.seq { + u16::MAX => { + return Err("No more microblocks; exceeded maximum sequence number".to_string()); + } + x => x + 1, + }; + + let next_mblock_opt = StacksChainState::load_next_descendant_microblock( + &self.chainstate_db, + &self.parent_index_block_hash, + next_seq, + ).map_err(|e| { + warn!("Failed to query for next descendant microblock"; "parent anchored block" => %self.parent_index_block_hash, "next_seq" => %next_seq); + let msg = format!("Failed to query for next descendant microblock of {} at {}: {:?}", &self.parent_index_block_hash, next_seq, &e); + msg + })?; + + let buf = self.next_microblock.serialize_to_vec(); + if let Some(mblock) = next_mblock_opt { + test_debug!( + "Switch to {}-{} ({})", + &self.parent_index_block_hash, + &mblock.block_hash(), + next_seq + ); + self.microblock_hash = mblock.block_hash(); + self.seq = next_seq; + self.next_microblock = mblock; + } else { + // we're EOF + self.finished = true; + } + + return Ok(buf); + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_unconfirmed( + host: PeerHost, + parent_block_id: StacksBlockId, + seq: u16, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/unconfirmed/{}/{}", &parent_block_id, seq), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into an unconfirmed microblock stream + pub fn decode_microblocks_unconfirmed(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let mblock_bytes: Vec = contents.try_into()?; + let mut mblock_bytes_ptr = mblock_bytes.as_slice(); + + let mut microblocks = vec![]; + let mut bound_reader = + BoundReader::from_reader(&mut mblock_bytes_ptr, MAX_MESSAGE_LEN.into()); + loop { + let mblock: StacksMicroblock = match read_next(&mut bound_reader) { + Ok(mblock) => Ok(mblock), + Err(e) => match e { + CodecError::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + // end of stream -- this is fine + break; + } + _ => Err(e), + }, + _ => Err(e), + }, + }?; + + microblocks.push(mblock); + if microblocks.len() == MAX_MICROBLOCKS_UNCONFIRMED { + break; + } + } + + Ok(microblocks) + } +} diff --git a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs new file mode 100644 index 0000000000..b559597145 --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs @@ -0,0 +1,264 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; + +use crate::util_lib::db::DBConn; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; + +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; + +use crate::net::api::getmicroblocks_unconfirmed::StacksUnconfirmedMicroblockStream; + +use crate::net::http::HttpChunkGenerator; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getmicroblocks_unconfirmed( + addr.into(), + StacksBlockId([0x22; 32]), + 123, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_unconfirmed::RPCMicroblocksUnconfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.parent_block_id, Some(StacksBlockId([0x22; 32]))); + assert_eq!(handler.start_sequence, Some(123)); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.parent_block_id.is_none()); + assert!(handler.start_sequence.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let consensus_hash = ConsensusHash([0x02; 20]); + let anchored_block_hash = BlockHeaderHash([0x03; 32]); + let index_block_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); + + let mut mblocks = make_sample_microblock_stream(&privk, &anchored_block_hash); + mblocks.truncate(15); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &consensus_hash, + &anchored_block_hash, + &mblock, + ); + } + + let mut requests = vec![]; + + // get the unconfirmed stream starting at the 5th microblock + let request = + StacksHttpRequest::new_getmicroblocks_unconfirmed(addr.into(), index_block_hash.clone(), 5); + requests.push(request); + + // get an unconfirmed stream for a non-existant block + let request = StacksHttpRequest::new_getmicroblocks_unconfirmed( + addr.into(), + StacksBlockId([0x11; 32]), + 5, + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let resp = response.decode_microblocks_unconfirmed().unwrap(); + + debug!("microblocks: {:?}", &resp); + assert_eq!(resp.len(), 10); + assert_eq!(resp, mblocks[5..].to_vec()); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_unconfirmed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(15); + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let index_block_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); + + // can't stream a non-existant microblock + if let Err(chainstate_error::NoSuchBlockError) = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, 0) + { + } else { + panic!("Opened nonexistant microblock"); + } + + // store microblocks to staging and stream them back + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + + // read back all the data we have so far, block-by-block + let mut staging_mblocks = vec![]; + for j in 0..(i + 1) { + let mut next_mblock_bytes = vec![]; + let mut stream = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, j as u16) + .unwrap(); + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + next_mblock_bytes.len() + ); + next_mblock_bytes.append(&mut next_bytes); + } + test_debug!("Got {} total bytes", next_mblock_bytes.len()); + + // should deserialize to a microblock + let staging_mblock = + StacksMicroblock::consensus_deserialize(&mut &next_mblock_bytes[..]).unwrap(); + staging_mblocks.push(staging_mblock); + } + + assert_eq!(staging_mblocks.len(), mblocks[0..(i + 1)].len()); + for j in 0..(i + 1) { + test_debug!("check {}", j); + assert_eq!(staging_mblocks[j], mblocks[j]) + } + + // can also read partial stream in one shot, from any seq + for k in 0..(i + 1) { + test_debug!("start at seq {}", k); + let mut staging_mblock_bytes = vec![]; + let mut stream = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, k as u16) + .unwrap(); + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.len() == 0 { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + staging_mblock_bytes.len() + ); + staging_mblock_bytes.append(&mut next_bytes); + } + + test_debug!("Got {} total bytes", staging_mblock_bytes.len()); + + // decode stream + let staging_mblocks = decode_microblock_stream(&staging_mblock_bytes); + + assert_eq!(staging_mblocks.len(), mblocks[k..(i + 1)].len()); + for j in 0..staging_mblocks.len() { + test_debug!("check {}", j); + assert_eq!(staging_mblocks[j], mblocks[k + j]) + } + } + } +} From 36b91c90897438cf599299c01a6a3b5eec9a5150 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:13:43 -0400 Subject: [PATCH 069/107] refactor: put get-neighbors RPC handler into its own file --- stackslib/src/net/api/getneighbors.rs | 290 ++++++++++++++++++++ stackslib/src/net/api/tests/getneighbors.rs | 118 ++++++++ 2 files changed, 408 insertions(+) create mode 100644 stackslib/src/net/api/getneighbors.rs create mode 100644 stackslib/src/net/api/tests/getneighbors.rs diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs new file mode 100644 index 0000000000..8e89cafa63 --- /dev/null +++ b/stackslib/src/net/api/getneighbors.rs @@ -0,0 +1,290 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::net::{ + db::PeerDB, + httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, NeighborKey, PeerAddress, StacksNodeState, +}; +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; + +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpVersion, +}; + +use crate::net::MAX_NEIGHBORS_DATA_LEN; + +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Hash160; + +use clarity::vm::types::QualifiedContractIdentifier; + +#[derive(Clone)] +pub struct RPCNeighborsRequestHandler {} +impl RPCNeighborsRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +/// Items in the NeighborsInfo -- combines NeighborKey and NeighborAddress +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCNeighbor { + pub network_id: u32, + pub peer_version: u32, + #[serde(rename = "ip")] + pub addrbytes: PeerAddress, + pub port: u16, + pub public_key_hash: Hash160, + pub authenticated: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub stackerdbs: Option>, +} + +impl RPCNeighbor { + pub fn from_neighbor_key_and_pubkh( + nk: NeighborKey, + pkh: Hash160, + auth: bool, + stackerdbs: Vec, + ) -> RPCNeighbor { + RPCNeighbor { + network_id: nk.network_id, + peer_version: nk.peer_version, + addrbytes: nk.addrbytes, + port: nk.port, + public_key_hash: pkh, + authenticated: auth, + stackerdbs: Some(stackerdbs), + } + } +} + +/// Struct given back from a call to `/v2/neighbors`. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCNeighborsInfo { + pub bootstrap: Vec, + pub sample: Vec, + pub inbound: Vec, + pub outbound: Vec, +} + +impl RPCNeighborsInfo { + /// Load neighbor address information from the peer network + pub fn from_p2p(network: &PeerNetwork) -> Result { + let network_epoch = network.get_current_epoch().network_epoch; + let network_id = network.get_local_peer().network_id; + let max_neighbor_age = network.get_connection_opts().max_neighbor_age; + let burnchain_view = network.get_chain_view(); + let peerdb_conn = network.peerdb_conn(); + + let bootstrap_nodes = + PeerDB::get_bootstrap_peers(peerdb_conn, network_id).map_err(NetError::DBError)?; + let bootstrap = bootstrap_nodes + .into_iter() + .map(|n| { + let stackerdb_contract_ids = + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + RPCNeighbor::from_neighbor_key_and_pubkh( + n.addr.clone(), + Hash160::from_node_public_key(&n.public_key), + true, + stackerdb_contract_ids, + ) + }) + .collect(); + + let neighbor_sample = PeerDB::get_fresh_random_neighbors( + peerdb_conn, + network_id, + network_epoch, + max_neighbor_age, + MAX_NEIGHBORS_DATA_LEN, + burnchain_view.burn_block_height, + false, + ) + .map_err(NetError::DBError)?; + + let sample: Vec = neighbor_sample + .into_iter() + .map(|n| { + let stackerdb_contract_ids = + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + RPCNeighbor::from_neighbor_key_and_pubkh( + n.addr.clone(), + Hash160::from_node_public_key(&n.public_key), + true, + stackerdb_contract_ids, + ) + }) + .collect(); + + let mut inbound = vec![]; + let mut outbound = vec![]; + for event_id in network.iter_peer_event_ids() { + let convo = if let Some(convo) = network.get_p2p_convo(*event_id) { + convo + } else { + continue; + }; + + let nk = convo.to_neighbor_key(); + let naddr = convo.to_neighbor_address(); + if convo.is_outbound() { + outbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( + nk, + naddr.public_key_hash, + convo.is_authenticated(), + convo.get_stackerdb_contract_ids().to_vec(), + )); + } else { + inbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( + nk, + naddr.public_key_hash, + convo.is_authenticated(), + convo.get_stackerdb_contract_ids().to_vec(), + )); + } + } + + Ok(RPCNeighborsInfo { + bootstrap, + sample, + inbound, + outbound, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNeighborsRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/neighbors$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetNeighbors".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNeighborsRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let neighbor_data = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + RPCNeighborsInfo::from_p2p(network) + })?; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&neighbor_data)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNeighborsRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let neighbor_info: RPCNeighborsInfo = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(neighbor_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getneighbors request to this endpoint + pub fn new_getneighbors(host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/neighbors".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Make a new neighbors response + #[cfg(test)] + pub fn new_getneighbors( + neighbors: RPCNeighborsInfo, + with_content_length: bool, + ) -> StacksHttpResponse { + let value = + serde_json::to_value(neighbors).expect("FATAL: failed to encode infallible data"); + let length = serde_json::to_string(&value) + .expect("FATAL: failed to encode infallible data") + .len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::JSON, + true, + ); + let body = HttpResponsePayload::JSON(value); + StacksHttpResponse::new(preamble, body) + } + + pub fn decode_rpc_neighbors(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let rpc_neighbor_info = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(rpc_neighbor_info) + } +} diff --git a/stackslib/src/net/api/tests/getneighbors.rs b/stackslib/src/net/api/tests/getneighbors.rs new file mode 100644 index 0000000000..6c7da7ff1b --- /dev/null +++ b/stackslib/src/net/api/tests/getneighbors.rs @@ -0,0 +1,118 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getneighbors(addr.into()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getneighbors::RPCNeighborsRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let request = StacksHttpRequest::new_getneighbors(addr.into()); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_rpc_neighbors().unwrap(); + + // as configured there's one boostrap neighbor + assert_eq!(resp.bootstrap.len(), 1); + + // all neighboring peers (there's one) report stackerdbs + for n in resp.sample.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.bootstrap.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.inbound.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.outbound.iter() { + assert!(n.stackerdbs.is_some()); + } +} From 1c5b0e43a230163c8c7d612cd02fd44128326878 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:13:59 -0400 Subject: [PATCH 070/107] refactor: put get-pox-info RPC handler into its own file --- stackslib/src/net/api/getpoxinfo.rs | 491 ++++++++++++++++++++++ stackslib/src/net/api/tests/getpoxinfo.rs | 242 +++++++++++ 2 files changed, 733 insertions(+) create mode 100644 stackslib/src/net/api/getpoxinfo.rs create mode 100644 stackslib/src/net/api/tests/getpoxinfo.rs diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs new file mode 100644 index 0000000000..ca34ad6360 --- /dev/null +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -0,0 +1,491 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, TipRequest, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Sha256Sum; + +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityVersion; + +use clarity::vm::clarity::ClarityConnection; + +#[derive(Clone)] +pub struct RPCPoxInfoRequestHandler {} +impl RPCPoxInfoRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxCurrentCycleInfo { + pub id: u64, + pub min_threshold_ustx: u64, + pub stacked_ustx: u64, + pub is_pox_active: bool, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxNextCycleInfo { + pub id: u64, + pub min_threshold_ustx: u64, + pub min_increment_ustx: u64, + pub stacked_ustx: u64, + pub prepare_phase_start_block_height: u64, + pub blocks_until_prepare_phase: i64, + pub reward_phase_start_block_height: u64, + pub blocks_until_reward_phase: u64, + pub ustx_until_pox_rejection: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxContractVersion { + pub contract_id: String, + pub activation_burnchain_block_height: u64, + pub first_reward_cycle_id: u64, +} + +/// The data we return on GET /v2/pox +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxInfoData { + pub contract_id: String, + pub pox_activation_threshold_ustx: u64, + pub first_burnchain_block_height: u64, + pub current_burnchain_block_height: u64, + pub prepare_phase_block_length: u64, + pub reward_phase_block_length: u64, + pub reward_slots: u64, + pub rejection_fraction: u64, + pub total_liquid_supply_ustx: u64, + pub current_cycle: RPCPoxCurrentCycleInfo, + pub next_cycle: RPCPoxNextCycleInfo, + + // below are included for backwards-compatibility + pub min_amount_ustx: u64, + pub prepare_cycle_length: u64, + pub reward_cycle_id: u64, + pub reward_cycle_length: u64, + pub rejection_votes_left_required: u64, + pub next_reward_cycle_in: u64, + + // Information specific to each PoX contract version + pub contract_versions: Vec, +} + +impl RPCPoxInfoData { + pub fn from_db( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip: &StacksBlockId, + burnchain: &Burnchain, + ) -> Result { + let mainnet = chainstate.mainnet; + let chain_id = chainstate.chain_id; + let current_burn_height = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?.block_height; + + let pox_contract_name = burnchain + .pox_constants + .active_pox_contract(current_burn_height); + + let contract_identifier = boot_code_id(pox_contract_name, mainnet); + let function = "get-pox-info"; + let cost_track = LimitedCostTracker::new_free(); + let sender = PrincipalData::Standard(StandardPrincipalData::transient()); + + debug!( + "Active PoX contract is '{}' (current_burn_height = {}, v1_unlock_height = {}", + &contract_identifier, current_burn_height, burnchain.pox_constants.v1_unlock_height + ); + + // Note: should always be 0 unless somehow configured to start later + let pox_1_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.first_block_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-1 first reward cycle begins before first burn block height".to_string(), + ))?; + + let pox_2_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-2 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + + let pox_3_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-3 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + + let data = chainstate + .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + sender, + None, + cost_track, + |env| env.execute_contract(&contract_identifier, function, &vec![], true), + ) + }) + .map_err(|_| NetError::NotFoundError)?; + + let res = match data { + Some(Ok(res)) => res.expect_result_ok().expect_tuple(), + _ => return Err(NetError::DBError(DBError::NotFoundError)), + }; + + let first_burnchain_block_height = res + .get("first-burnchain-block-height") + .expect(&format!("FATAL: no 'first-burnchain-block-height'")) + .to_owned() + .expect_u128() as u64; + + let min_stacking_increment_ustx = res + .get("min-amount-ustx") + .expect(&format!("FATAL: no 'min-amount-ustx'")) + .to_owned() + .expect_u128() as u64; + + let prepare_cycle_length = res + .get("prepare-cycle-length") + .expect(&format!("FATAL: no 'prepare-cycle-length'")) + .to_owned() + .expect_u128() as u64; + + let rejection_fraction = res + .get("rejection-fraction") + .expect(&format!("FATAL: no 'rejection-fraction'")) + .to_owned() + .expect_u128() as u64; + + let reward_cycle_id = res + .get("reward-cycle-id") + .expect(&format!("FATAL: no 'reward-cycle-id'")) + .to_owned() + .expect_u128() as u64; + + let reward_cycle_length = res + .get("reward-cycle-length") + .expect(&format!("FATAL: no 'reward-cycle-length'")) + .to_owned() + .expect_u128() as u64; + + let current_rejection_votes = res + .get("current-rejection-votes") + .expect(&format!("FATAL: no 'current-rejection-votes'")) + .to_owned() + .expect_u128() as u64; + + let total_liquid_supply_ustx = res + .get("total-liquid-supply-ustx") + .expect(&format!("FATAL: no 'total-liquid-supply-ustx'")) + .to_owned() + .expect_u128() as u64; + + let total_required = (total_liquid_supply_ustx as u128 / 100) + .checked_mul(rejection_fraction as u128) + .ok_or_else(|| NetError::DBError(DBError::Overflow))? + as u64; + + let rejection_votes_left_required = total_required.saturating_sub(current_rejection_votes); + + let burnchain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + let pox_consts = &burnchain.pox_constants; + + if prepare_cycle_length != pox_consts.prepare_length as u64 { + error!( + "PoX Constants in config mismatched with PoX contract constants: {} != {}", + prepare_cycle_length, pox_consts.prepare_length + ); + return Err(NetError::DBError(DBError::Corruption)); + } + + if reward_cycle_length != pox_consts.reward_cycle_length as u64 { + error!( + "PoX Constants in config mismatched with PoX contract constants: {} != {}", + reward_cycle_length, pox_consts.reward_cycle_length + ); + return Err(NetError::DBError(DBError::Corruption)); + } + + let effective_height = burnchain_tip.block_height - first_burnchain_block_height; + let next_reward_cycle_in = reward_cycle_length - (effective_height % reward_cycle_length); + + let next_rewards_start = burnchain_tip.block_height + next_reward_cycle_in; + let next_prepare_phase_start = next_rewards_start - prepare_cycle_length; + + let next_prepare_phase_in = i64::try_from(next_prepare_phase_start) + .map_err(|_| NetError::ChainstateError("Burn block height overflowed i64".into()))? + - i64::try_from(burnchain_tip.block_height).map_err(|_| { + NetError::ChainstateError("Burn block height overflowed i64".into()) + })?; + + let cur_block_pox_contract = pox_consts.active_pox_contract(burnchain_tip.block_height); + let cur_cycle_pox_contract = + pox_consts.active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id)); + let next_cycle_pox_contract = pox_consts + .active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id + 1)); + + let cur_cycle_stacked_ustx = chainstate.get_total_ustx_stacked( + &sortdb, + tip, + reward_cycle_id as u128, + cur_cycle_pox_contract, + )?; + let next_cycle_stacked_ustx = + // next_cycle_pox_contract might not be instantiated yet + match chainstate.get_total_ustx_stacked( + &sortdb, + tip, + reward_cycle_id as u128 + 1, + next_cycle_pox_contract, + ) { + Ok(ustx) => ustx, + Err(ChainError::ClarityError(_)) => { + // contract not instantiated yet + 0 + } + Err(e) => { + return Err(e.into()); + } + }; + + let reward_slots = pox_consts.reward_slots() as u64; + + let cur_cycle_threshold = StacksChainState::get_threshold_from_participation( + total_liquid_supply_ustx as u128, + cur_cycle_stacked_ustx, + reward_slots as u128, + ) as u64; + + let next_threshold = StacksChainState::get_threshold_from_participation( + total_liquid_supply_ustx as u128, + next_cycle_stacked_ustx, + reward_slots as u128, + ) as u64; + + let pox_activation_threshold_ustx = (total_liquid_supply_ustx as u128) + .checked_mul(pox_consts.pox_participation_threshold_pct as u128) + .map(|x| x / 100) + .ok_or_else(|| NetError::DBError(DBError::Overflow))? + as u64; + + let cur_cycle_pox_active = sortdb.is_pox_active(burnchain, &burnchain_tip)?; + + Ok(RPCPoxInfoData { + contract_id: boot_code_id(cur_block_pox_contract, chainstate.mainnet).to_string(), + pox_activation_threshold_ustx, + first_burnchain_block_height, + current_burnchain_block_height: burnchain_tip.block_height, + prepare_phase_block_length: prepare_cycle_length, + reward_phase_block_length: reward_cycle_length - prepare_cycle_length, + reward_slots, + rejection_fraction, + total_liquid_supply_ustx, + current_cycle: RPCPoxCurrentCycleInfo { + id: reward_cycle_id, + min_threshold_ustx: cur_cycle_threshold, + stacked_ustx: cur_cycle_stacked_ustx as u64, + is_pox_active: cur_cycle_pox_active, + }, + next_cycle: RPCPoxNextCycleInfo { + id: reward_cycle_id + 1, + min_threshold_ustx: next_threshold, + min_increment_ustx: min_stacking_increment_ustx, + stacked_ustx: next_cycle_stacked_ustx as u64, + prepare_phase_start_block_height: next_prepare_phase_start, + blocks_until_prepare_phase: next_prepare_phase_in, + reward_phase_start_block_height: next_rewards_start, + blocks_until_reward_phase: next_reward_cycle_in, + ustx_until_pox_rejection: rejection_votes_left_required, + }, + min_amount_ustx: next_threshold, + prepare_cycle_length, + reward_cycle_id, + reward_cycle_length, + rejection_votes_left_required, + next_reward_cycle_in, + contract_versions: vec![ + RPCPoxContractVersion { + contract_id: boot_code_id(POX_1_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain.first_block_height, + first_reward_cycle_id: pox_1_first_cycle, + }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_2_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain.pox_constants.v1_unlock_height + as u64, + first_reward_cycle_id: pox_2_first_cycle, + }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_3_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_3_activation_height + as u64, + first_reward_cycle_id: pox_3_first_cycle, + }, + ], + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPoxInfoRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/pox$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetPoxInfo".to_string(), + )); + } + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPoxInfoRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let pox_info_res = + node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + RPCPoxInfoData::from_db(sortdb, chainstate, &tip, network.get_burnchain()) + }); + + let pox_info = match pox_info_res { + Ok(pox_info) => pox_info, + Err(NetError::NotFoundError) | Err(NetError::DBError(DBError::NotFoundError)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No such chain tip".into()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load PoX info: {:?}", &e)), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&pox_info)?; + Ok((preamble, body)) + } +} + +impl HttpResponse for RPCPoxInfoRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pox_info: RPCPoxInfoData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pox_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getpoxinfo(host: PeerHost, tip_req: TipRequest) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/pox".into(), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_rpc_get_pox_info(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let pox_info: RPCPoxInfoData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(pox_info) + } +} diff --git a/stackslib/src/net/api/tests/getpoxinfo.rs b/stackslib/src/net/api/tests/getpoxinfo.rs new file mode 100644 index 0000000000..33b949e016 --- /dev/null +++ b/stackslib/src/net/api/tests/getpoxinfo.rs @@ -0,0 +1,242 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getpoxinfo( + addr.into(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getpoxinfo::RPCPoxInfoRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let request = StacksHttpRequest::new_getpoxinfo(addr.into(), TipRequest::UseLatestAnchoredTip); + requests.push(request); + + // bad tip + let request = StacksHttpRequest::new_getpoxinfo( + addr.into(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + // this works + let resp = response.decode_rpc_get_pox_info().unwrap(); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + // this fails with 404 + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +/* +#[test] +#[ignore] +fn test_rpc_getpoxinfo() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, `tip_req` is set to UseLatestAnchoredTip. + // Thus, the query for pox info will be against the canonical Stacks tip, which we expect to succeed. + let pox_server_info = RefCell::new(None); + test_rpc( + function_name!(), + 40002, + 40003, + 50002, + 50003, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let mut sortdb = peer_server.sortdb.as_mut().unwrap(); + let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; + let stacks_block_id = { + let tip = chainstate.get_stacks_chain_tip(sortdb).unwrap().unwrap(); + StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_block_hash, + ) + }; + let pox_info = RPCPoxInfoData::from_db( + &mut sortdb, + chainstate, + &stacks_block_id, + &peer_client.config.burnchain, + ) + .unwrap(); + *pox_server_info.borrow_mut() = Some(pox_info); + convo_client.new_getpoxinfo(TipRequest::UseLatestAnchoredTip) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + convo_client, + convo_server| { + let req_md = http_request.preamble().clone(); + match (*http_response).clone().decode_rpc_get_pox_info() { + Ok(pox_data) => { + assert_eq!(Some(pox_data.clone()), *pox_server_info.borrow()); + true + } + Err(e) => { + error!("Invalid response: {:?}", &e); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_getpoxinfo_use_latest_tip() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for pox + // info against the unconfirmed state will succeed. + let pox_server_info = RefCell::new(None); + test_rpc( + function_name!(), + 40004, + 40005, + 50004, + 50005, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let mut sortdb = peer_server.sortdb.as_mut().unwrap(); + let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; + let stacks_block_id = chainstate + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + let pox_info = RPCPoxInfoData::from_db( + &mut sortdb, + chainstate, + &stacks_block_id, + &peer_client.config.burnchain, + ) + .unwrap(); + *pox_server_info.borrow_mut() = Some(pox_info); + convo_client.new_getpoxinfo(TipRequest::UseLatestUnconfirmedTip) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match (*http_response).clone().decode_rpc_get_pox_info() { + Ok(pox_data) => { + assert_eq!(Some(pox_data.clone()), *pox_server_info.borrow()); + true + } + Err(e) => { + error!("Invalid response: {:?}", &e); + false + } + } + }, + ); +} +*/ From 490866edbeb0dc5af3829b84ccb0fe11aca5265b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:14:19 -0400 Subject: [PATCH 071/107] refactor: put get-stackerdb-chunk RPC handler into its own file --- stackslib/src/net/api/getstackerdbchunk.rs | 270 ++++++++++++++++++ .../src/net/api/tests/getstackerdbchunk.rs | 229 +++++++++++++++ 2 files changed, 499 insertions(+) create mode 100644 stackslib/src/net/api/getstackerdbchunk.rs create mode 100644 stackslib/src/net/api/tests/getstackerdbchunk.rs diff --git a/stackslib/src/net/api/getstackerdbchunk.rs b/stackslib/src/net/api/getstackerdbchunk.rs new file mode 100644 index 0000000000..27ed845a1a --- /dev/null +++ b/stackslib/src/net/api/getstackerdbchunk.rs @@ -0,0 +1,270 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{ + httpcore::{request, HttpPreambleExtensions, StacksHttp}, + Error as NetError, TipRequest, +}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlock; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::CLARITY_NAME_REGEX; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use libstackerdb::SlotMetadata; +use libstackerdb::STACKERDB_MAX_CHUNK_SIZE; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCGetStackerDBChunkRequestHandler { + pub contract_identifier: Option, + pub slot_id: Option, + pub slot_version: Option, +} +impl RPCGetStackerDBChunkRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + slot_id: None, + slot_version: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStackerDBChunkRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)(/(?P[0-9]+)){{0,1}}$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let slot_id = request::get_u32(captures, "slot_id")?; + let slot_version = if captures.name("slot_version").is_some() { + Some(request::get_u32(captures, "slot_version")?) + } else { + None + }; + + self.contract_identifier = Some(contract_identifier); + self.slot_id = Some(slot_id); + self.slot_version = slot_version; + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStackerDBChunkRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.slot_id = None; + self.slot_version = None; + } + + /// Make the response. + /// NOTE: it's not safe to stream chunks; they have to be sent all at once. + /// This is because any streaming algorithm that does not lock the chunk row is at risk of + /// racing a chunk-download or a chunk-push, which would atomically overwrite the data being + /// streamed (and lead to corrupt data being sent). As a result, StackerDB chunks are capped + /// at 1MB, and StackerDB replication is always an opt-in protocol. Node operators subscribe + /// to StackerDB replicas at their own risk. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let slot_id = self + .slot_id + .take() + .ok_or(NetError::SendError("`slot_id` not set".into()))?; + let slot_version = self.slot_version.take(); + + let chunk_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + let chunk_res = if let Some(version) = slot_version.as_ref() { + network + .get_stackerdbs() + .get_chunk(&contract_identifier, slot_id, *version) + .map(|chunk_data| chunk_data.map(|chunk_data| chunk_data.data)) + } else { + network + .get_stackerdbs() + .get_latest_chunk(&contract_identifier, slot_id) + }; + + match chunk_res { + Ok(Some(chunk)) => Ok(chunk), + Ok(None) | Err(NetError::NoSuchStackerDB(..)) => { + // not found + Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB contract or chunk not found".to_string()), + )) + } + Err(e) => { + // some other error + error!("Failed to load StackerDB chunk"; + "smart_contract_id" => contract_identifier.to_string(), + "slot_id" => slot_id, + "slot_version" => slot_version, + "error" => format!("{:?}", &e) + ); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new("Failed to load StackerDB chunk".to_string()), + )) + } + } + }); + + let chunk_resp = match chunk_resp { + Ok(chunk) => chunk, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::from_ram(chunk_resp); + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStackerDBChunkRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let data: Vec = parse_bytes(preamble, body, STACKERDB_MAX_CHUNK_SIZE.into())?; + Ok(HttpResponsePayload::Bytes(data)) + } +} + +impl StacksHttpRequest { + /// Make a request for a stackerDB's chunk + pub fn new_get_stackerdb_chunk( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + slot_id: u32, + slot_version: Option, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + if let Some(version) = slot_version { + format!( + "/v2/stackerdb/{}/{}/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name, slot_id, version + ) + } else { + format!( + "/v2/stackerdb/{}/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name, slot_id + ) + }, + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a chunk + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_chunk(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let chunk_bytes: Vec = contents.try_into()?; + Ok(chunk_bytes) + } +} diff --git a/stackslib/src/net/api/tests/getstackerdbchunk.rs b/stackslib/src/net/api/tests/getstackerdbchunk.rs new file mode 100644 index 0000000000..4017e895d4 --- /dev/null +++ b/stackslib/src/net/api/tests/getstackerdbchunk.rs @@ -0,0 +1,229 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use libstackerdb::SlotMetadata; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(); + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(32), + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstackerdbchunk::RPCGetStackerDBChunkRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some(contract_identifier.clone()) + ); + assert_eq!(handler.slot_id, Some(0)); + assert_eq!(handler.slot_version, Some(32)); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.slot_id.is_none()); + assert!(handler.slot_version.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let contract_identifier = + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(); + let none_contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-ext", + ) + .unwrap(); + + // latest chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + None, + ); + requests.push(request); + + // specific chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(1), + ); + requests.push(request); + + // wrong version + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(2), + ); + requests.push(request); + + // no data + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 1, + None, + ); + requests.push(request); + + // no chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 4093, + None, + ); + requests.push(request); + + // no contract + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + none_contract_identifier.clone(), + 0, + None, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(std::str::from_utf8(&resp).unwrap(), "hello world"); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(std::str::from_utf8(&resp).unwrap(), "hello world"); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(resp.len(), 0); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 90242db08f0525247074fe877af79ea8744c608d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:14:42 -0400 Subject: [PATCH 072/107] refactor: put get-stackerdb-metadata RPC handler into its own file --- stackslib/src/net/api/getstackerdbmetadata.rs | 203 ++++++++++++++++++ .../src/net/api/tests/getstackerdbmetadata.rs | 154 +++++++++++++ 2 files changed, 357 insertions(+) create mode 100644 stackslib/src/net/api/getstackerdbmetadata.rs create mode 100644 stackslib/src/net/api/tests/getstackerdbmetadata.rs diff --git a/stackslib/src/net/api/getstackerdbmetadata.rs b/stackslib/src/net/api/getstackerdbmetadata.rs new file mode 100644 index 0000000000..905f6e04ae --- /dev/null +++ b/stackslib/src/net/api/getstackerdbmetadata.rs @@ -0,0 +1,203 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{ + httpcore::{request, HttpPreambleExtensions, StacksHttp}, + Error as NetError, TipRequest, +}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlock; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use libstackerdb::SlotMetadata; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCGetStackerDBMetadataRequestHandler { + pub contract_identifier: Option, +} +impl RPCGetStackerDBMetadataRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStackerDBMetadataRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + self.contract_identifier = Some(contract_identifier); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStackerDBMetadataRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + + let metadata_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + network + .get_stackerdbs() + .get_db_slot_metadata(&contract_identifier) + .map_err(|_e| { + StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB contract not found".to_string()), + ) + }) + }); + + let metadata_resp = match metadata_resp { + Ok(metadata) => metadata, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&metadata_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStackerDBMetadataRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_get_stackerdb_metadata( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/stackerdb/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name + ), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a block. + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_metadata(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: Vec = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/tests/getstackerdbmetadata.rs b/stackslib/src/net/api/tests/getstackerdbmetadata.rs new file mode 100644 index 0000000000..89cc1c9cd1 --- /dev/null +++ b/stackslib/src/net/api/tests/getstackerdbmetadata.rs @@ -0,0 +1,154 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(); + let request = + StacksHttpRequest::new_get_stackerdb_metadata(addr.into(), contract_identifier.clone()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some(contract_identifier.clone()) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let contract_identifier = + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(); + let none_contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-ext", + ) + .unwrap(); + + let request = + StacksHttpRequest::new_get_stackerdb_metadata(addr.into(), contract_identifier.clone()); + requests.push(request); + + // no contract + let request = StacksHttpRequest::new_get_stackerdb_metadata( + addr.into(), + none_contract_identifier.clone(), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_metadata().unwrap(); + + assert_eq!(resp.len(), 6); + for (i, slot) in resp.iter().enumerate() { + assert_eq!(slot.slot_id, i as u32); + + if i > 0 { + assert_eq!(slot.slot_version, 0); + assert_eq!(slot.data_hash, Sha512Trunc256Sum([0u8; 32])); + assert_eq!(slot.signature, MessageSignature::empty()); + } else { + assert_eq!(slot.slot_version, 1); + assert_eq!( + slot.data_hash, + Sha512Trunc256Sum::from_data("hello world".as_bytes()) + ); + } + } + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 863901d859d3a6f12859271a8f61a0e1c5a5bae3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:15:01 -0400 Subject: [PATCH 073/107] refactor: put get-stx-transfer-cost RPC handler into its own file --- stackslib/src/net/api/getstxtransfercost.rs | 138 ++++++++++++++++++ .../src/net/api/tests/getstxtransfercost.rs | 98 +++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 stackslib/src/net/api/getstxtransfercost.rs create mode 100644 stackslib/src/net/api/tests/getstxtransfercost.rs diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs new file mode 100644 index 0000000000..dda7e25f24 --- /dev/null +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -0,0 +1,138 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; + +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; + +use crate::version_string; + +#[derive(Clone)] +pub struct RPCGetStxTransferCostRequestHandler {} +impl RPCGetStxTransferCostRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStxTransferCostRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/fees/transfer$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // todo -- need to actually estimate the cost / length for token transfers + // right now, it just uses the minimum. + let fee = MINIMUM_TX_FEE_RATE_PER_BYTE; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&fee)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStxTransferCostRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let fee: u64 = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(fee)?) + } +} + +impl StacksHttpRequest { + pub fn new_get_stx_transfer_cost(host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/fees/transfer".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stx_transfer_fee(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let fee: u64 = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(fee) + } +} diff --git a/stackslib/src/net/api/tests/getstxtransfercost.rs b/stackslib/src/net/api/tests/getstxtransfercost.rs new file mode 100644 index 0000000000..38bb0877f6 --- /dev/null +++ b/stackslib/src/net/api/tests/getstxtransfercost.rs @@ -0,0 +1,98 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, +}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use super::test_rpc; + +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; + +use crate::net::connection::ConnectionOptions; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_stx_transfer_cost(addr.into()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstxtransfercost::RPCGetStxTransferCostRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let request = StacksHttpRequest::new_get_stx_transfer_cost(addr.into()); + + let mut responses = test_rpc(function_name!(), vec![request]); + assert_eq!(responses.len(), 1); + + let response = responses.pop().unwrap(); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let fee_rate = response.decode_stx_transfer_fee().unwrap(); + assert_eq!(fee_rate, MINIMUM_TX_FEE_RATE_PER_BYTE); +} From 74ae694dfb258af76ed4f4915242066fec2994a5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:15:19 -0400 Subject: [PATCH 074/107] refactor: put get-transaction-unconfirmed RPC handler into its own file --- .../src/net/api/gettransaction_unconfirmed.rs | 222 ++++++++++++++++++ .../api/tests/gettransaction_unconfirmed.rs | 138 +++++++++++ 2 files changed, 360 insertions(+) create mode 100644 stackslib/src/net/api/gettransaction_unconfirmed.rs create mode 100644 stackslib/src/net/api/tests/gettransaction_unconfirmed.rs diff --git a/stackslib/src/net/api/gettransaction_unconfirmed.rs b/stackslib/src/net/api/gettransaction_unconfirmed.rs new file mode 100644 index 0000000000..7345abcffe --- /dev/null +++ b/stackslib/src/net/api/gettransaction_unconfirmed.rs @@ -0,0 +1,222 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum UnconfirmedTransactionStatus { + Microblock { + block_hash: BlockHeaderHash, + seq: u16, + }, + Mempool, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct UnconfirmedTransactionResponse { + pub tx: String, + pub status: UnconfirmedTransactionStatus, +} + +#[derive(Clone)] +pub struct RPCGetTransactionUnconfirmedRequestHandler { + pub txid: Option, +} +impl RPCGetTransactionUnconfirmedRequestHandler { + pub fn new() -> Self { + Self { txid: None } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetTransactionUnconfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/transactions/unconfirmed/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetTransactionUnconfirmed" + .to_string(), + )); + } + + let txid = request::get_txid(captures, "txid")?; + self.txid = Some(txid); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetTransactionUnconfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.txid = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let txid = self + .txid + .take() + .ok_or(NetError::SendError("`txid` no set".into()))?; + + let txinfo_res = + node.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { + // present in the unconfirmed state? + if let Some(ref unconfirmed) = chainstate.unconfirmed_state.as_ref() { + if let Some((transaction, mblock_hash, seq)) = + unconfirmed.get_unconfirmed_transaction(&txid) + { + return Ok(UnconfirmedTransactionResponse { + status: UnconfirmedTransactionStatus::Microblock { + block_hash: mblock_hash, + seq: seq, + }, + tx: to_hex(&transaction.serialize_to_vec()), + }); + } + } + + // present in the mempool? + if let Some(txinfo) = MemPoolDB::get_tx(mempool.conn(), &txid)? { + return Ok(UnconfirmedTransactionResponse { + status: UnconfirmedTransactionStatus::Mempool, + tx: to_hex(&txinfo.tx.serialize_to_vec()), + }); + } + + return Err(NetError::NotFoundError); + }); + + let txinfo = match txinfo_res { + Ok(txinfo) => txinfo, + Err(NetError::NotFoundError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!( + "Transaction {} not found in mempool or unconfirmed microblock stream", + &txid + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to query transaction {}: {:?}", + &txid, &e + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&txinfo)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetTransactionUnconfirmedRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let txinfo: UnconfirmedTransactionResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(txinfo)?) + } +} + +impl StacksHttpRequest { + /// Make a new get-unconfirmed-tx request + pub fn new_gettransaction_unconfirmed(host: PeerHost, txid: Txid) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/transactions/unconfirmed/{}", &txid), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_gettransaction_unconfirmed( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let txinfo: UnconfirmedTransactionResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(txinfo) + } +} diff --git a/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs b/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs new file mode 100644 index 0000000000..5d8eeaa0d2 --- /dev/null +++ b/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs @@ -0,0 +1,138 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use crate::net::api::gettransaction_unconfirmed::UnconfirmedTransactionStatus; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_gettransaction_unconfirmed(addr.into(), Txid([0x11; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.txid, Some(Txid([0x11; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.txid.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let mut requests = vec![]; + + // get mempool txn + let request = StacksHttpRequest::new_gettransaction_unconfirmed( + addr.into(), + rpc_test.mempool_txids[0].clone(), + ); + requests.push(request); + + // get microblock txn + let request = StacksHttpRequest::new_gettransaction_unconfirmed( + addr.into(), + rpc_test.microblock_txids[0].clone(), + ); + requests.push(request); + + // get neither + let request = StacksHttpRequest::new_gettransaction_unconfirmed(addr.into(), Txid([0x21; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_gettransaction_unconfirmed().unwrap(); + assert_eq!(resp.status, UnconfirmedTransactionStatus::Mempool); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_gettransaction_unconfirmed().unwrap(); + match resp.status { + UnconfirmedTransactionStatus::Microblock { .. } => {} + _ => { + panic!("Not in microblock"); + } + }; + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} From 3a7f82a30aee60461723a5354e29da6d38f1768c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:15:41 -0400 Subject: [PATCH 075/107] refactor: put post-blcok RPC handler into its own file --- stackslib/src/net/api/postblock.rs | 306 +++++++++++++++++++++++ stackslib/src/net/api/tests/postblock.rs | 173 +++++++++++++ 2 files changed, 479 insertions(+) create mode 100644 stackslib/src/net/api/postblock.rs create mode 100644 stackslib/src/net/api/tests/postblock.rs diff --git a/stackslib/src/net/api/postblock.rs b/stackslib/src/net/api/postblock.rs new file mode 100644 index 0000000000..04692d6e6a --- /dev/null +++ b/stackslib/src/net/api/postblock.rs @@ -0,0 +1,306 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::mempool::MemPoolDB; +use crate::net::relay::Relayer; +use crate::net::Attachment; +use crate::net::BlocksData; +use crate::net::BlocksDatum; +use crate::net::StacksMessageType; + +use stacks_common::codec::Error as CodecError; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_PAYLOAD_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; +use stacks_common::util::retry::BoundReader; + +use crate::cost_estimates::FeeRateEstimate; + +use clarity::vm::costs::ExecutionCost; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StacksBlockAcceptedData { + pub stacks_block_id: StacksBlockId, + pub accepted: bool, +} + +#[derive(Clone)] +pub struct RPCPostBlockRequestHandler { + pub block: Option, + pub consensus_hash: Option, +} + +impl RPCPostBlockRequestHandler { + pub fn new() -> Self { + Self { + block: None, + consensus_hash: None, + } + } + + /// Decode a bare block from the body + fn parse_postblock_octets(mut body: &[u8]) -> Result { + let block = StacksBlock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(block) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostBlockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/blocks/upload/(?P[0-9a-f]{40})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock takes application/octet-stream".to_string(), + )); + } + + let consensus_hash = request::get_consensus_hash(captures, "consensus_hash")?; + let block = Self::parse_postblock_octets(body)?; + + self.consensus_hash = Some(consensus_hash); + self.block = Some(block); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostBlockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.consensus_hash = None; + self.block = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // get out the request body + let block = self + .block + .take() + .ok_or(NetError::SendError("`block` not set".into()))?; + let consensus_hash = self + .consensus_hash + .take() + .ok_or(NetError::SendError("`consensus_hash` not set".into()))?; + + let block_hash = block.block_hash(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + match SortitionDB::get_sortition_id_by_consensus(&sortdb.conn(), &consensus_hash) { + Ok(Some(_)) => { + // we recognize this consensus hash + let ic = sortdb.index_conn(); + match Relayer::process_new_anchored_block( + &ic, + chainstate, + &consensus_hash, + &block, + 0, + ) { + Ok(accepted) => { + debug!( + "{} Stacks block {}/{}", + if accepted { + "Accepted" + } else { + "Did not accept" + }, + &consensus_hash, + &block_hash, + ); + return Ok(accepted); + } + Err(e) => { + let msg = format!( + "Failed to process anchored block {}/{}: {:?}", + consensus_hash, + &block.block_hash(), + &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + } + } + Ok(None) => { + let msg = format!( + "Unrecognized consensus hash {} for block {}", + consensus_hash, + &block.block_hash() + ); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query sortition ID by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + } + }); + + let data_resp = match data_resp { + Ok(accepted) => StacksBlockAcceptedData { + accepted, + stacks_block_id: StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block_hash, + ), + }, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if data_resp.accepted { + node.set_relay_message(StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostBlockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let accepted: StacksBlockAcceptedData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(accepted)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-block request + pub fn new_post_block( + host: PeerHost, + ch: ConsensusHash, + block: StacksBlock, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!("/v2/blocks/upload/{}", &ch), + HttpRequestContents::new().payload_stacks(&block), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_block_accepted(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let result: StacksBlockAcceptedData = serde_json::from_value(response_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(result) + } +} diff --git a/stackslib/src/net/api/tests/postblock.rs b/stackslib/src/net/api/tests/postblock.rs new file mode 100644 index 0000000000..816c6909de --- /dev/null +++ b/stackslib/src/net/api/tests/postblock.rs @@ -0,0 +1,173 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; + +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let block = make_codec_test_block(3); + let request = + StacksHttpRequest::new_post_block(addr.into(), ConsensusHash([0x11; 20]), block.clone()); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock::RPCPostBlockRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.consensus_hash, Some(ConsensusHash([0x11; 20]))); + assert_eq!(handler.block, Some(block.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.consensus_hash.is_none()); + assert!(handler.block.is_none()); + + // try to deal with an invalid block + let mut bad_block = block.clone(); + bad_block.txs.clear(); + + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let request = StacksHttpRequest::new_post_block( + addr.into(), + ConsensusHash([0x11; 20]), + bad_block.clone(), + ); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock::RPCPostBlockRequestHandler::new(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad block"); + } + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_block_id = StacksBlockHeader::make_index_block_hash( + &rpc_test.next_block.0, + &rpc_test.next_block.1.block_hash(), + ); + let mut requests = vec![]; + + // post the block + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + // idempotent + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + // fails if the consensus hash is not recognized + let request = StacksHttpRequest::new_post_block( + addr.into(), + ConsensusHash([0x11; 20]), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, stacks_block_id); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, stacks_block_id); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From 213f141dea7c7dba909a967bd1f1ba81ba9262ad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:15:59 -0400 Subject: [PATCH 076/107] refactor: put fee-rate estimator RPC handler into its own file --- stackslib/src/net/api/postfeerate.rs | 309 +++++++++++++++++++++ stackslib/src/net/api/tests/postfeerate.rs | 124 +++++++++ 2 files changed, 433 insertions(+) create mode 100644 stackslib/src/net/api/postfeerate.rs create mode 100644 stackslib/src/net/api/tests/postfeerate.rs diff --git a/stackslib/src/net/api/postfeerate.rs b/stackslib/src/net/api/postfeerate.rs new file mode 100644 index 0000000000..ed713bb4c3 --- /dev/null +++ b/stackslib/src/net/api/postfeerate.rs @@ -0,0 +1,309 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::mempool::MemPoolDB; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_PAYLOAD_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; +use stacks_common::util::retry::BoundReader; + +use crate::cost_estimates::FeeRateEstimate; + +use clarity::vm::costs::ExecutionCost; + +#[derive(Serialize, Deserialize)] +pub struct FeeRateEstimateRequestBody { + #[serde(default)] + pub estimated_len: Option, + pub transaction_payload: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCFeeEstimate { + pub fee_rate: f64, + pub fee: u64, +} + +impl RPCFeeEstimate { + pub fn estimate_fees(scalar: u64, fee_rates: FeeRateEstimate) -> Vec { + let estimated_fees_f64 = fee_rates.clone() * (scalar as f64); + vec![ + RPCFeeEstimate { + fee: estimated_fees_f64.low as u64, + fee_rate: fee_rates.low, + }, + RPCFeeEstimate { + fee: estimated_fees_f64.middle as u64, + fee_rate: fee_rates.middle, + }, + RPCFeeEstimate { + fee: estimated_fees_f64.high as u64, + fee_rate: fee_rates.high, + }, + ] + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCFeeEstimateResponse { + pub estimated_cost: ExecutionCost, + pub estimated_cost_scalar: u64, + pub estimations: Vec, + pub cost_scalar_change_by_byte: f64, +} + +#[derive(Clone)] +pub struct RPCPostFeeRateRequestHandler { + pub estimated_len: Option, + pub transaction_payload: Option, +} +impl RPCPostFeeRateRequestHandler { + pub fn new() -> Self { + Self { + estimated_len: None, + transaction_payload: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostFeeRateRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/fees/transaction$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < MAX_PAYLOAD_LEN) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for FeeRateEstimate ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".to_string(), + )); + } + + let body: FeeRateEstimateRequestBody = serde_json::from_slice(body) + .map_err(|e| Error::DecodeError(format!("Failed to parse JSON body: {}", e)))?; + + let payload_hex = if body.transaction_payload.starts_with("0x") { + &body.transaction_payload[2..] + } else { + &body.transaction_payload + }; + + let payload_data = hex_bytes(payload_hex).map_err(|_e| { + Error::DecodeError("Bad hex string supplied for transaction payload".into()) + })?; + + let tx = TransactionPayload::consensus_deserialize(&mut payload_data.as_slice())?; + let estimated_len = + std::cmp::max(body.estimated_len.unwrap_or(0), payload_data.len() as u64); + + self.transaction_payload = Some(tx); + self.estimated_len = Some(estimated_len); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostFeeRateRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.estimated_len = None; + self.transaction_payload = None; + } + + /// Make the response + /// TODO: accurately estimate the cost/length fee for token transfers, based on mempool + /// pressure. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let estimated_len = self + .estimated_len + .take() + .ok_or(NetError::SendError("`estimated_len` not set".into()))?; + let tx = self + .transaction_payload + .take() + .ok_or(NetError::SendError("`transaction_payload` not set".into()))?; + + let data_resp = + node.with_node_state(|_network, sortdb, _chainstate, _mempool, rpc_args| { + let tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, tip.block_height)?; + + if let Some((cost_estimator, fee_estimator, metric)) = rpc_args.get_estimators_ref() + { + let estimated_cost = cost_estimator + .estimate_cost(&tx, &stacks_epoch.epoch_id) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate tx {}: {:?}", + &tx.name(), + &e + )), + ) + })?; + + let scalar_cost = metric.from_cost_and_len( + &estimated_cost, + &stacks_epoch.block_limit, + estimated_len, + ); + let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate fees for tx {}: {:?}", + &tx.name(), + &e + )), + ) + })?; + + let mut estimations = + RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); + + let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; + + for estimate in estimations.iter_mut() { + if estimate.fee < minimum_fee { + estimate.fee = minimum_fee; + } + } + + Ok(RPCFeeEstimateResponse { + estimated_cost, + estimations, + estimated_cost_scalar: scalar_cost, + cost_scalar_change_by_byte: metric.change_per_byte(), + }) + } else { + debug!("Fee and cost estimation not configured on this stacks node"); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new( + "Fee estimation not supported on this node".to_string(), + ), + )) + } + }); + + let data_resp = match data_resp { + Ok(data) => data, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostFeeRateRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let fee: RPCFeeEstimateResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(fee)?) + } +} + +impl StacksHttpResponse { + pub fn decode_fee_estimate(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let fee: RPCFeeEstimateResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(fee) + } +} + +impl StacksHttpRequest { + pub fn new_post_fee_rate( + host: PeerHost, + fee_request: FeeRateEstimateRequestBody, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/fees/transaction".into(), + HttpRequestContents::new().payload_json( + serde_json::to_value(fee_request) + .expect("FATAL: failed to encode fee rate request to JSON"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/tests/postfeerate.rs b/stackslib/src/net/api/tests/postfeerate.rs new file mode 100644 index 0000000000..64b5d87f39 --- /dev/null +++ b/stackslib/src/net/api/tests/postfeerate.rs @@ -0,0 +1,124 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::to_hex; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; + +use crate::chainstate::stacks::TransactionPayload; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::test_rpc; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let sender_addr = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(); + let tx_payload = + TransactionPayload::new_contract_call(sender_addr, "hello-world", "add-unit", vec![]) + .unwrap(); + + let request = StacksHttpRequest::new_post_fee_rate( + addr.into(), + postfeerate::FeeRateEstimateRequestBody { + estimated_len: Some(123), + transaction_payload: to_hex(&tx_payload.serialize_to_vec()), + }, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postfeerate::RPCPostFeeRateRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.estimated_len, Some(123)); + assert_eq!(handler.transaction_payload, Some(tx_payload.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.estimated_len.is_none()); + assert!(handler.transaction_payload.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let sender_addr = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(); + let tx_payload = + TransactionPayload::new_contract_call(sender_addr, "hello-world", "add-unit", vec![]) + .unwrap(); + + let mut requests = vec![]; + let request = StacksHttpRequest::new_post_fee_rate( + addr.into(), + postfeerate::FeeRateEstimateRequestBody { + estimated_len: Some(123), + transaction_payload: to_hex(&tx_payload.serialize_to_vec()), + }, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} From 6d3604b2755a7691a67ab7c6314767193b9c3c0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:16:24 -0400 Subject: [PATCH 077/107] refactor: put mempool query RPC handler into its own file --- stackslib/src/net/api/postmempoolquery.rs | 376 ++++++++++++++ .../src/net/api/tests/postmempoolquery.rs | 469 ++++++++++++++++++ 2 files changed, 845 insertions(+) create mode 100644 stackslib/src/net/api/postmempoolquery.rs create mode 100644 stackslib/src/net/api/tests/postmempoolquery.rs diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs new file mode 100644 index 0000000000..ab69c860fb --- /dev/null +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -0,0 +1,376 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::MAX_HEADERS; +use crate::net::{httpcore::StacksHttp, Error as NetError, TipRequest}; + +use crate::burnchains::Txid; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksTransaction; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use crate::core::mempool::decode_tx_stream; +use crate::core::mempool::MemPoolDB; +use crate::core::mempool::MemPoolSyncData; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +use url::form_urlencoded; + +use rand::thread_rng; +use rand::Rng; + +#[derive(Clone)] +pub struct RPCMempoolQueryRequestHandler { + pub page_id: Option, + pub mempool_query: Option, +} + +impl RPCMempoolQueryRequestHandler { + pub fn new() -> Self { + Self { + page_id: None, + mempool_query: None, + } + } + + /// Obtain the mempool page_id query string, if it is present + fn get_page_id_query(&self, query: Option<&str>) -> Option { + match query { + Some(query_string) => { + for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { + if key != "page_id" { + continue; + } + if let Ok(page_id) = Txid::from_hex(&value) { + return Some(page_id); + } + } + return None; + } + None => { + return None; + } + } + } +} + +#[derive(Debug)] +pub struct StacksMemPoolStream { + /// Mempool sync data requested + pub tx_query: MemPoolSyncData, + /// last txid loaded + pub last_randomized_txid: Txid, + /// number of transactions visited in the DB so far + pub num_txs: u64, + /// maximum we can visit in the query + pub max_txs: u64, + /// height of the chain at time of query + pub height: u64, + /// Are we done sending transactions, and are now in the process of sending the trailing page + /// ID? + pub corked: bool, + /// Did we run out of transactions to send? + pub finished: bool, + /// link to the mempool DB + mempool_db: DBConn, +} + +impl StacksMemPoolStream { + pub fn new( + mempool_db: DBConn, + tx_query: MemPoolSyncData, + max_txs: u64, + height: u64, + page_id_opt: Option, + ) -> Self { + let last_randomized_txid = page_id_opt.unwrap_or_else(|| { + let random_bytes = thread_rng().gen::<[u8; 32]>(); + Txid(random_bytes) + }); + + Self { + tx_query, + last_randomized_txid: last_randomized_txid, + num_txs: 0, + max_txs: max_txs, + height: height, + corked: false, + finished: false, + mempool_db, + } + } +} + +impl HttpChunkGenerator for StacksMemPoolStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + if self.corked { + test_debug!( + "Finished streaming txs; last page was {:?}", + &self.last_randomized_txid + ); + return Ok(vec![]); + } + + if self.num_txs >= self.max_txs || self.finished { + test_debug!( + "Finished sending transactions after {:?}. Corking tx stream.", + &self.last_randomized_txid + ); + + // cork the stream -- send the next page_id the requester should use to continue + // streaming. + self.corked = true; + return Ok(self.last_randomized_txid.serialize_to_vec()); + } + + let remaining = self.max_txs.saturating_sub(self.num_txs); + let (next_txs, next_last_randomized_txid_opt, num_rows_visited) = + MemPoolDB::static_find_next_missing_transactions( + &self.mempool_db, + &self.tx_query, + self.height, + &self.last_randomized_txid, + 1, + remaining, + ) + .map_err(|e| format!("Failed to find next missing transactions: {:?}", &e))?; + + debug!( + "Streaming mempool propagation stepped"; + "rows_visited" => num_rows_visited, + "last_rand_txid" => %self.last_randomized_txid, + "num_txs" => self.num_txs, + "max_txs" => self.max_txs + ); + + if next_txs.len() > 0 { + // have another tx to send + let chunk = next_txs[0].serialize_to_vec(); + if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { + // we have more after this + self.last_randomized_txid = next_last_randomized_txid; + } else { + // that was the last transaction. + // next call will cork the stream + self.finished = true; + } + return Ok(chunk); + } else if let Some(next_txid) = next_last_randomized_txid_opt { + // no more txs to send + test_debug!( + "No rows returned for {}; cork tx stream with next page {}", + &self.last_randomized_txid, + &next_txid + ); + + // send the page ID as the final chunk + let chunk = next_txid.serialize_to_vec(); + self.finished = true; + self.corked = true; + return Ok(chunk); + } else { + test_debug!( + "No more txs to send after {:?}; corking stream", + &self.last_randomized_txid + ); + + // no more transactions, and none after this + self.finished = true; + self.corked = true; + return Ok(vec![]); + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMempoolQueryRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/mempool/query$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected nonzero body length".to_string(), + )); + } + + let mut body_ptr = body; + let mempool_body = MemPoolSyncData::consensus_deserialize(&mut body_ptr)?; + + self.mempool_query = Some(mempool_body); + if let Some(page_id) = self.get_page_id_query(query) { + self.page_id = Some(page_id); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMempoolQueryRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.mempool_query = None; + self.page_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let mempool_query = self + .mempool_query + .take() + .ok_or(NetError::SendError("`mempool_query` not set".into()))?; + let page_id = self.page_id.take(); + + let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { + let height = self.get_stacks_chain_tip(&preamble, sortdb, chainstate).map(|blk| blk.height).unwrap_or(0); + let max_txs = network.connection_opts.mempool_max_tx_query; + debug!( + "Begin mempool query"; + "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), + "block_height" => height, + "max_txs" => max_txs + ); + + let mempool_db = match mempool.reopen(false) { + Ok(db) => db, + Err(e) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to open mempool DB: {:?}", &e)))); + } + }; + + Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, height, page_id)) + }); + + let stream = match stream_res { + Ok(stream) => stream, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMempoolQueryRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +impl StacksHttpRequest { + pub fn new_mempool_query( + host: PeerHost, + query: MemPoolSyncData, + page_id_opt: Option, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/mempool/query".into(), + if let Some(page_id) = page_id_opt { + HttpRequestContents::new() + .query_arg("page_id".into(), format!("{}", &page_id)) + .payload_stacks(&query) + } else { + HttpRequestContents::new().payload_stacks(&query) + }, + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response body into the transactions and next-page ID returned from + /// /v2/mempool/query. + pub fn decode_mempool_txs_page( + self, + ) -> Result<(Vec, Option), NetError> { + let contents = self.get_http_payload_ok()?; + let raw_bytes: Vec = contents.try_into()?; + let (txs, page_id_opt) = decode_tx_stream(&mut &raw_bytes[..])?; + Ok((txs, page_id_opt)) + } +} diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs new file mode 100644 index 0000000000..6445f85c28 --- /dev/null +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -0,0 +1,469 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashSet; +use std::io; + +use stacks_common::codec::read_next; +use stacks_common::codec::Error as CodecError; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Hash160; + +use crate::util_lib::db::DBConn; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::chainstate_path; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as chainstate_error; +use crate::chainstate::stacks::{ + StacksTransaction, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionVersion, +}; + +use crate::net::api::postmempoolquery::StacksMemPoolStream; + +use crate::net::http::HttpChunkGenerator; + +use crate::core::mempool::decode_tx_stream; +use crate::core::mempool::MemPoolSyncData; +use crate::core::mempool::TxTag; +use crate::core::mempool::MAX_BLOOM_COUNTER_TXS; +use crate::core::MemPoolDB; +use crate::net::Error as NetError; + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_mempool_query( + addr.into(), + MemPoolSyncData::TxTags([0x11; 32], vec![TxTag([0x22; 8])]), + Some(Txid([0x33; 32])), + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmempoolquery::RPCMempoolQueryRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.page_id, Some(Txid([0x33; 32]))); + assert_eq!( + handler.mempool_query, + Some(MemPoolSyncData::TxTags([0x11; 32], vec![TxTag([0x22; 8])])) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.page_id.is_none()); + assert!(handler.mempool_query.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_rpc = TestRPC::setup(function_name!()); + let mempool_txids = test_rpc.mempool_txids.clone(); + let mempool_txids: HashSet<_> = mempool_txids.iter().map(|txid| txid.clone()).collect(); + + let sync_data = test_rpc + .peer_1 + .mempool + .as_ref() + .unwrap() + .make_mempool_sync_data() + .unwrap(); + + let mut requests = vec![]; + let request = StacksHttpRequest::new_mempool_query( + addr.into(), + MemPoolSyncData::TxTags([0x00; 32], vec![]), + Some(Txid([0x00; 32])), + ); + requests.push(request); + + let mut responses = test_rpc.run(requests); + + let response = responses.remove(0); + + let (txs, page) = response.decode_mempool_txs_page().unwrap(); + let received_txids: HashSet<_> = txs.iter().map(|tx| tx.txid()).collect(); + + assert_eq!(received_txids, mempool_txids); + assert!(page.is_none()); +} + +#[test] +fn test_stream_mempool_txs() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + let block_height = 10; + let mut total_len = 0; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + total_len += tx_bytes.len(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + txs.push(tx); + } + mempool_tx.commit().unwrap(); + + let mut buf = vec![]; + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], vec![]), + MAX_BLOOM_COUNTER_TXS.into(), + block_height, + Some(Txid([0u8; 32])), + ); + + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.len() == 0 { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + eprintln!("Read {} bytes of tx data", buf.len()); + + // buf decodes to the list of txs we have + let mut decoded_txs = vec![]; + let mut ptr = &buf[..]; + loop { + let tx: StacksTransaction = match read_next::(&mut ptr) { + Ok(tx) => tx, + Err(e) => match e { + CodecError::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + eprintln!("out of transactions"); + break; + } + _ => { + panic!("IO error: {:?}", &e); + } + }, + _ => { + panic!("other error: {:?}", &e); + } + }, + }; + decoded_txs.push(tx); + } + + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with an empty tx tags + let mut page_id = Txid([0u8; 32]); + let mut decoded_txs = vec![]; + loop { + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], vec![]), + 1, + block_height, + Some(page_id), + ); + + let mut buf = vec![]; + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.len() == 0 { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + // buf decodes to the list of txs we have, plus page ids + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (mut next_txs, next_page) = decode_tx_stream(&mut ptr).unwrap(); + + decoded_txs.append(&mut next_txs); + + // for fun, use a page ID that is actually a well-formed prefix of a transaction + if let Some(ref tx) = decoded_txs.last() { + let mut evil_buf = tx.serialize_to_vec(); + let mut evil_page_id = [0u8; 32]; + evil_page_id.copy_from_slice(&evil_buf[0..32]); + evil_buf.extend_from_slice(&evil_page_id); + + test_debug!("Decode evil buf {}", &to_hex(&evil_buf)); + + let (evil_next_txs, evil_next_page) = decode_tx_stream(&mut &evil_buf[..]).unwrap(); + + // should still work + assert_eq!(evil_next_txs.len(), 1); + assert_eq!(evil_next_txs[0].txid(), tx.txid()); + assert_eq!(evil_next_page.unwrap().0[0..32], evil_buf[0..32]); + } + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } + + // make sure we got them all + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with a full bloom filter + let mut page_id = Txid([0u8; 32]); + let all_txs_tags: Vec<_> = txs + .iter() + .map(|tx| TxTag::from(&[0u8; 32], &tx.txid())) + .collect(); + loop { + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], all_txs_tags.clone()), + 1, + block_height, + Some(page_id), + ); + + let mut buf = vec![]; + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.len() == 0 { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + // buf decodes to an empty list of txs, plus page ID + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (next_txs, next_page) = decode_tx_stream(&mut ptr).unwrap(); + + assert_eq!(next_txs.len(), 0); + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } +} + +#[test] +fn test_decode_tx_stream() { + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + for _i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + txs.push(tx); + } + + // valid empty tx stream + let empty_stream = [0x11u8; 32]; + let (next_txs, next_page) = decode_tx_stream(&mut empty_stream.as_ref()).unwrap(); + assert_eq!(next_txs.len(), 0); + assert_eq!(next_page, Some(Txid([0x11; 32]))); + + // valid tx stream with a page id at the end + let mut tx_stream: Vec = vec![]; + for tx in txs.iter() { + tx.consensus_serialize(&mut tx_stream).unwrap(); + } + tx_stream.extend_from_slice(&[0x22; 32]); + + let (next_txs, next_page) = decode_tx_stream(&mut &tx_stream[..]).unwrap(); + assert_eq!(next_txs, txs); + assert_eq!(next_page, Some(Txid([0x22; 32]))); + + // valid tx stream with _no_ page id at the end + let mut partial_stream: Vec = vec![]; + txs[0].consensus_serialize(&mut partial_stream).unwrap(); + let (next_txs, next_page) = decode_tx_stream(&mut &partial_stream[..]).unwrap(); + assert_eq!(next_txs.len(), 1); + assert_eq!(next_txs[0], txs[0]); + assert!(next_page.is_none()); + + // garbage tx stream + let garbage_stream = [0xff; 256]; + let err = decode_tx_stream(&mut garbage_stream.as_ref()); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream that is too short + let short_stream = [0x33u8; 33]; + let err = decode_tx_stream(&mut short_stream.as_ref()); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream has a tx, a page ID, and then another tx + let mut interrupted_stream = vec![]; + txs[0].consensus_serialize(&mut interrupted_stream).unwrap(); + interrupted_stream.extend_from_slice(&[0x00u8; 32]); + txs[1].consensus_serialize(&mut interrupted_stream).unwrap(); + + let err = decode_tx_stream(&mut &interrupted_stream[..]); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } +} From 2a8ee3d1a99d4f0af627d717e922aa58c7a5e6b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:16:41 -0400 Subject: [PATCH 078/107] refactor: put post-microblock RPC handler into its own file --- stackslib/src/net/api/postmicroblock.rs | 287 ++++++++++++++++++ stackslib/src/net/api/tests/postmicroblock.rs | 160 ++++++++++ 2 files changed, 447 insertions(+) create mode 100644 stackslib/src/net/api/postmicroblock.rs create mode 100644 stackslib/src/net/api/tests/postmicroblock.rs diff --git a/stackslib/src/net/api/postmicroblock.rs b/stackslib/src/net/api/postmicroblock.rs new file mode 100644 index 0000000000..7b677ec213 --- /dev/null +++ b/stackslib/src/net/api/postmicroblock.rs @@ -0,0 +1,287 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, + }, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::mempool::MemPoolDB; +use crate::net::relay::Relayer; +use crate::net::Attachment; +use crate::net::MicroblocksData; +use crate::net::StacksMessageType; +use crate::net::TipRequest; + +use stacks_common::codec::Error as CodecError; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_PAYLOAD_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; +use stacks_common::util::retry::BoundReader; + +use crate::cost_estimates::FeeRateEstimate; + +use clarity::vm::costs::ExecutionCost; + +#[derive(Clone)] +pub struct RPCPostMicroblockRequestHandler { + pub microblock: Option, +} + +impl RPCPostMicroblockRequestHandler { + pub fn new() -> Self { + Self { microblock: None } + } + + /// Decode a bare block from the body + fn parse_postmicroblock_octets(mut body: &[u8]) -> Result { + let mblock = StacksMicroblock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted microblock: {}", msg)) + } else { + e.into() + } + })?; + Ok(mblock) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostMicroblockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostMicroblock" + .to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostMicroblock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostMicroblock takes application/octet-stream".to_string(), + )); + } + + let microblock = Self::parse_postmicroblock_octets(&body)?; + self.microblock = Some(microblock); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostMicroblockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.microblock = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let microblock = self + .microblock + .take() + .ok_or(NetError::SendError("`microblock` not set".into()))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let stacks_tip = match StacksChainState::load_staging_block_info(chainstate.db(), &tip) { + Ok(Some(tip_info)) => tip_info, + Ok(None) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpNotFound::new("No such stacks tip".into()))); + }, + Err(e) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)))); + } + }; + + let consensus_hash = &stacks_tip.consensus_hash; + let block_hash = &stacks_tip.anchored_block_hash; + + // make sure we can accept this + let ch_sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) { + Ok(Some(sn)) => sn, + Ok(None) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpNotFound::new("No such snapshot for Stacks tip consensus hash".to_string()))); + } + Err(e) => { + debug!("No block snapshot for consensus hash {}", &consensus_hash); + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(ChainError::DBError(e).into_json()))); + } + }; + + let sort_handle = sortdb.index_handle(&ch_sn.sortition_id); + let parent_block_snapshot = Relayer::get_parent_stacks_block_snapshot(&sort_handle, consensus_hash, block_hash) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load parent block for Stacks tip: {:?}", &e))))?; + + let ast_rules = SortitionDB::get_ast_rules(&sort_handle, parent_block_snapshot.block_height) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load AST rules for Bitcoin block height {}: {:?}", parent_block_snapshot.block_height, &e))))?; + + let epoch_id = self.get_stacks_epoch(&preamble, sortdb, parent_block_snapshot.block_height)?.epoch_id; + + if !Relayer::static_check_problematic_relayed_microblock( + chainstate.mainnet, + epoch_id, + µblock, + ast_rules, + ) { + info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", µblock.block_hash(), consensus_hash, &block_hash); + + // NOTE: txid is ignored in chainstate error .into_json() + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(ChainError::ProblematicTransaction(Txid([0x00; 32])).into_json()))); + } + + match chainstate.preprocess_streamed_microblock(consensus_hash, block_hash, µblock) { + Ok(accepted) => { + debug!("{} uploaded microblock {}/{}-{}", + if accepted { "Accepted" } else { "Did not accept" }, + consensus_hash, + block_hash, + µblock.block_hash() + ); + return Ok((accepted, StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash))); + }, + Err(e) => { + debug!("Failed to process microblock {}/{}-{}: {:?}", &consensus_hash, &block_hash, µblock.block_hash(), &e); + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(e.into_json()))); + } + } + }); + + let (accepted, parent_block_id, data_resp) = match data_resp { + Ok((accepted, parent_block_id)) => (accepted, parent_block_id, microblock.block_hash()), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if accepted { + node.set_relay_message(StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: parent_block_id, + microblocks: vec![microblock], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostMicroblockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let mblock_hash: BlockHeaderHash = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(mblock_hash)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-microblock request + pub fn new_post_microblock( + host: PeerHost, + mblock: StacksMicroblock, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/microblocks".into(), + HttpRequestContents::new() + .payload_stacks(&mblock) + .for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_microblock_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let result: BlockHeaderHash = serde_json::from_value(response_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(result) + } +} diff --git a/stackslib/src/net/api/tests/postmicroblock.rs b/stackslib/src/net/api/tests/postmicroblock.rs new file mode 100644 index 0000000000..8caf6ea9de --- /dev/null +++ b/stackslib/src/net/api/tests/postmicroblock.rs @@ -0,0 +1,160 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; + +use crate::chainstate::stacks::test::make_codec_test_microblock; +use crate::chainstate::stacks::StacksMicroblock; +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let mblock = make_codec_test_microblock(3); + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmicroblock::RPCPostMicroblockRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.microblock, Some(mblock.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.microblock.is_none()); + + // try to decode a bad microblock + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut bad_mblock = mblock.clone(); + bad_mblock.txs.clear(); + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + bad_mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmicroblock::RPCPostMicroblockRequestHandler::new(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad microblock"); + } + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_rpc = TestRPC::setup_ex(function_name!(), false); + let mblock = test_rpc.next_microblock.clone(); + + let mut requests = vec![]; + + // fails due to bad tip + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + requests.push(request); + + // succeeds + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc.run(requests); + + // fails due to bad tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // succeeds + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let mblock_hash = response.decode_stacks_microblock_response().unwrap(); + assert_eq!(mblock_hash, mblock.block_hash()); +} From c8682f711a30d28ac4920e3e6774696af96209cd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:17:04 -0400 Subject: [PATCH 079/107] refactor: put post-stackerdb-chunk RPC handler into its own file --- stackslib/src/net/api/poststackerdbchunk.rs | 349 ++++++++++++++++++ .../src/net/api/tests/poststackerdbchunk.rs | 290 +++++++++++++++ 2 files changed, 639 insertions(+) create mode 100644 stackslib/src/net/api/poststackerdbchunk.rs create mode 100644 stackslib/src/net/api/tests/poststackerdbchunk.rs diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs new file mode 100644 index 0000000000..e7bf6ce80b --- /dev/null +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -0,0 +1,349 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::fs; +use std::fs::OpenOptions; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::StacksNodeState; +use crate::net::{ + httpcore::{request, HttpPreambleExtensions, StacksHttp}, + Error as NetError, TipRequest, +}; + +use crate::chainstate::stacks::Error as ChainError; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksBlock; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_MESSAGE_LEN; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::util_lib::db::DBConn; +use crate::util_lib::db::Error as DBError; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::CLARITY_NAME_REGEX; +use clarity::vm::representations::CONTRACT_NAME_REGEX_STRING; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::representations::STANDARD_PRINCIPAL_REGEX_STRING; +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StandardPrincipalData; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; + +use libstackerdb::SlotMetadata; +use libstackerdb::StackerDBChunkAckData; +use libstackerdb::StackerDBChunkData; +use libstackerdb::STACKERDB_MAX_CHUNK_SIZE; + +use serde; +use serde::de::Error as de_Error; +use serde_json; + +#[derive(Clone)] +pub struct RPCPostStackerDBChunkRequestHandler { + pub contract_identifier: Option, + pub chunk: Option, +} +impl RPCPostStackerDBChunkRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + chunk: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostStackerDBChunkRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})/chunks$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-empty body".to_string(), + )); + } + + if preamble.get_content_length() > MAX_MESSAGE_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostStackerDBChunk body is too big".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let chunk: StackerDBChunkData = serde_json::from_slice(body).map_err(Error::JsonError)?; + + self.contract_identifier = Some(contract_identifier); + self.chunk = Some(chunk); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum StackerDBErrorCodes { + DataAlreadyExists, + NoSuchSlot, +} + +impl StackerDBErrorCodes { + pub fn code(&self) -> u32 { + match self { + Self::DataAlreadyExists => 0, + Self::NoSuchSlot => 1, + } + } + + pub fn reason(&self) -> &'static str { + match self { + Self::DataAlreadyExists => "Data for this slot and version already exist", + Self::NoSuchSlot => "No such StackerDB slot", + } + } + + pub fn into_json(self) -> serde_json::Value { + json!({ + "code": self.code(), + "message": format!("{:?}", &self), + "reason": self.reason() + }) + } +} + +impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.chunk = None; + } + + /// Make the response. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let stackerdb_chunk = self + .chunk + .take() + .ok_or(NetError::SendError("`chunk` not set".into()))?; + + let ack_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + let tx = if let Ok(tx) = network.stackerdbs_tx_begin(&contract_identifier) { + tx + } else { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB not found".to_string()), + )); + }; + if let Err(_e) = tx.get_stackerdb_id(&contract_identifier) { + // shouldn't be necessary (this is checked against the peer network's configured DBs), + // but you never know. + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB not found".to_string()), + )); + } + if let Err(_e) = tx.try_replace_chunk( + &contract_identifier, + &stackerdb_chunk.get_slot_metadata(), + &stackerdb_chunk.data, + ) { + let slot_metadata_opt = + match tx.get_slot_metadata(&contract_identifier, stackerdb_chunk.slot_id) { + Ok(slot_opt) => slot_opt, + Err(e) => { + // some other error + error!("Failed to load replaced StackerDB chunk metadata"; + "smart_contract_id" => contract_identifier.to_string(), + "error" => format!("{:?}", &e) + ); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to load StackerDB chunk for {}: {:?}", + &contract_identifier, &e + )), + )); + } + }; + + let (reason, slot_metadata_opt) = if let Some(slot_metadata) = slot_metadata_opt + { + ( + serde_json::to_string( + &StackerDBErrorCodes::DataAlreadyExists.into_json(), + ) + .unwrap_or("(unable to encode JSON)".to_string()), + Some(slot_metadata), + ) + } else { + ( + serde_json::to_string(&StackerDBErrorCodes::NoSuchSlot.into_json()) + .unwrap_or("(unable to encode JSON)".to_string()), + None, + ) + }; + + let ack = StackerDBChunkAckData { + accepted: false, + reason: Some(reason), + metadata: slot_metadata_opt, + }; + return Ok(ack); + } + + let slot_metadata = if let Ok(Some(md)) = + tx.get_slot_metadata(&contract_identifier, stackerdb_chunk.slot_id) + { + md + } else { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new( + "Failed to load slot metadata after storing chunk".to_string(), + ), + )); + }; + + if let Err(e) = tx.commit() { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to commit StackerDB tx: {:?}", &e)), + )); + } + + // success! + let ack = StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: Some(slot_metadata), + }; + + return Ok(ack); + }); + + let ack_resp = match ack_resp { + Ok(ack) => ack, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&ack_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostStackerDBChunkRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let ack: StackerDBChunkAckData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(ack)?) + } +} + +impl StacksHttpRequest { + pub fn new_post_stackerdb_chunk( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + slot_id: u32, + slot_version: u32, + sig: MessageSignature, + data: Vec, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/stackerdb/{}/{}/chunks", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name + ), + HttpRequestContents::new().payload_json( + serde_json::to_value(StackerDBChunkData { + slot_id, + slot_version, + sig, + data, + }) + .expect("FATAL: failed to construct JSON from infallible structure"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a chunk + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_chunk_ack(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let data: StackerDBChunkAckData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(data) + } +} diff --git a/stackslib/src/net/api/tests/poststackerdbchunk.rs b/stackslib/src/net/api/tests/poststackerdbchunk.rs new file mode 100644 index 0000000000..b32d0e8440 --- /dev/null +++ b/stackslib/src/net/api/tests/poststackerdbchunk.rs @@ -0,0 +1,290 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::Address; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; + +use libstackerdb::SlotMetadata; +use libstackerdb::StackerDBChunkData; + +use crate::net::httpcore::RPCRequestHandler; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(), + 0, + 1, + MessageSignature::empty(), + vec![0, 1, 2, 3, 4], + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!( + handler.chunk, + Some(StackerDBChunkData { + slot_id: 0, + slot_version: 1, + data: vec![0, 1, 2, 3, 4], + sig: MessageSignature::empty() + }) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.chunk.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let mut requests = vec![]; + + // try to write a new chunk + let data = "try make response".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 1, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to overwrite a new chunk + let data = "try make response 2".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 2, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to overwrite a new chunk, with the same version (should fail) + let data = "try make response 3".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 2, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write with the wrong key (should fail) + let data = "try make response 4".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 3, data_hash); + slot_metadata.sign(&rpc_test.privk2).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write to a bad slot (should fail) + let data = "try make response 5".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(4093, 3, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write to a bad contract (should fail) + let data = "try make response 6".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 3, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist", + ) + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 1); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert!(resp.metadata.is_none()); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} From e0a0857a6e2a741097a959791437d6c136c43f6f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:28:27 -0400 Subject: [PATCH 080/107] refactor: put post-transaction RPC handler into its own file --- stackslib/src/net/api/posttransaction.rs | 379 ++++++++++++++++++ .../src/net/api/tests/posttransaction.rs | 260 ++++++++++++ 2 files changed, 639 insertions(+) create mode 100644 stackslib/src/net/api/posttransaction.rs create mode 100644 stackslib/src/net/api/tests/posttransaction.rs diff --git a/stackslib/src/net/api/posttransaction.rs b/stackslib/src/net/api/posttransaction.rs new file mode 100644 index 0000000000..189cd143da --- /dev/null +++ b/stackslib/src/net/api/posttransaction.rs @@ -0,0 +1,379 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use std::io::{Read, Write}; + +use crate::net::{ + httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}, + p2p::PeerNetwork, + Error as NetError, StacksNodeState, +}; + +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::mempool::MemPoolDB; +use crate::net::relay::Relayer; +use crate::net::Attachment; +use crate::net::StacksMessageType; + +use stacks_common::codec::Error as CodecError; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::MAX_PAYLOAD_LEN; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha256Sum; +use stacks_common::util::retry::BoundReader; + +use crate::cost_estimates::FeeRateEstimate; + +use clarity::vm::costs::ExecutionCost; + +#[derive(Serialize, Deserialize)] +pub struct PostTransactionRequestBody { + pub tx: String, + pub attachment: Option, +} + +#[derive(Clone)] +pub struct RPCPostTransactionRequestHandler { + pub tx: Option, + pub attachment: Option, +} +impl RPCPostTransactionRequestHandler { + pub fn new() -> Self { + Self { + tx: None, + attachment: None, + } + } + + /// Decode a bare transaction from the body + fn parse_posttransaction_octets(mut body: &[u8]) -> Result { + let tx = StacksTransaction::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(tx) + } + + /// Decode a JSON-encoded transaction and Atlas attachment pair + fn parse_posttransaction_json( + body: &[u8], + ) -> Result<(StacksTransaction, Option), Error> { + let body: PostTransactionRequestBody = serde_json::from_slice(body) + .map_err(|_e| Error::DecodeError("Failed to parse body".into()))?; + + let tx = { + let tx_bytes = hex_bytes(&body.tx) + .map_err(|_e| Error::DecodeError("Failed to parse tx".into()))?; + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + }) + }?; + + let attachment = match body.attachment { + None => None, + Some(ref attachment_content) => { + let content = hex_bytes(attachment_content) + .map_err(|_e| Error::DecodeError("Failed to parse attachment".into()))?; + Some(Attachment::new(content)) + } + }; + + Ok((tx, attachment)) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostTransactionRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/transactions$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostTransaction" + .to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostTransaction body is too big".to_string(), + )); + } + + match preamble.content_type { + None => { + return Err(Error::DecodeError( + "Missing Content-Type for transaction".to_string(), + )); + } + Some(HttpContentType::Bytes) => { + // expect a bare transaction + let tx = Self::parse_posttransaction_octets(body)?; + self.tx = Some(tx); + self.attachment = None; + } + Some(HttpContentType::JSON) => { + // expect a transaction and an attachment + let (tx, attachment_opt) = Self::parse_posttransaction_json(body)?; + self.tx = Some(tx); + self.attachment = attachment_opt; + } + _ => { + return Err(Error::DecodeError( + "Wrong Content-Type for transaction; expected application/json".to_string(), + )); + } + } + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostTransactionRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.tx = None; + self.attachment = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tx = self + .tx + .take() + .ok_or(NetError::SendError("`tx` not set".into()))?; + let attachment_opt = self.attachment.take(); + + let txid = tx.txid(); + + let data_resp = node.with_node_state(|network, sortdb, chainstate, mempool, rpc_args| { + if mempool.has_tx(&txid) { + // will not accept + debug!("Mempool already has POSTed transaction {}", &txid); + return Ok(false); + } + + let event_observer = rpc_args.event_observer.as_deref(); + let burn_tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, burn_tip.block_height)?; + + // check for defects which can be determined statically + if Relayer::do_static_problematic_checks() + && !Relayer::static_check_problematic_relayed_tx( + chainstate.mainnet, + stacks_epoch.epoch_id, + &tx, + network.ast_rules, + ) + .is_ok() + { + // we statically check the tx for known problems, and it had some. Reject. + debug!( + "Transaction {} is problematic in rules {:?}; will not store or relay", + &tx.txid(), + network.ast_rules + ); + return Ok(false); + } + + let stacks_tip = self.get_stacks_chain_tip(&preamble, sortdb, chainstate)?; + + // accept to mempool + if let Err(e) = mempool.submit( + chainstate, + sortdb, + &stacks_tip.consensus_hash, + &stacks_tip.anchored_block_hash, + &tx, + event_observer, + &stacks_epoch.block_limit, + &stacks_epoch.epoch_id, + ) { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new_json(e.into_json(&txid)), + )); + }; + + // store attachment as well, if it's part of a contract-call + if let Some(ref attachment) = attachment_opt { + if let TransactionPayload::ContractCall(ref contract_call) = tx.payload { + if network + .get_atlasdb() + .should_keep_attachment(&contract_call.to_clarity_contract_id(), attachment) + { + network + .get_atlasdb_mut() + .insert_uninstantiated_attachment(attachment) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to store contract-call attachment: {:?}", + &e + )), + ) + })?; + } + } + } + + Ok(true) + }); + + let (accepted, txid) = match data_resp { + Ok(accepted) => (accepted, txid), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if accepted { + node.set_relay_message(StacksMessageType::Transaction(tx)); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&txid)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostTransactionRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let txid: Txid = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(txid)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-transaction request + pub fn new_post_transaction(host: PeerHost, tx: StacksTransaction) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/transactions".to_string(), + HttpRequestContents::new().payload_stacks(&tx), + ) + .expect("FATAL: failed to construct request from infallible data") + } + + /// Make a new post-transaction request with an attachment + pub fn new_post_transaction_with_attachment( + host: PeerHost, + tx: StacksTransaction, + attachment: Option>, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/transactions".to_string(), + HttpRequestContents::new().payload_json( + serde_json::to_value(PostTransactionRequestBody { + tx: to_hex(&tx.serialize_to_vec()), + attachment: attachment.map(|bytes| to_hex(&bytes)), + }) + .expect("FATAL: failed to construct request from infallible data"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_posttransaction(txid: Txid, with_content_length: bool) -> StacksHttpResponse { + let value = serde_json::to_value(txid).expect("FATAL: failed to serialize infallible data"); + let length = serde_json::to_string(&value) + .expect("FATAL: failed to serialize infallible data") + .len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::JSON, + true, + ); + let body = HttpResponsePayload::JSON(value); + StacksHttpResponse::new(preamble, body) + } + + pub fn decode_txid(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let txid: Txid = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(txid) + } +} diff --git a/stackslib/src/net/api/tests/posttransaction.rs b/stackslib/src/net/api/tests/posttransaction.rs new file mode 100644 index 0000000000..6cf6d4d1b5 --- /dev/null +++ b/stackslib/src/net/api/tests/posttransaction.rs @@ -0,0 +1,260 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::SocketAddr; + +use crate::net::httpcore::{HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest}; + +use stacks_common::types::net::PeerHost; + +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::Address; + +use clarity::vm::types::PrincipalData; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::ClarityName; +use clarity::vm::ContractName; +use clarity::vm::Value; + +use crate::net::api::*; +use crate::net::ProtocolFamily; +use crate::net::TipRequest; + +use crate::core::BLOCK_LIMIT_MAINNET_21; + +use crate::chainstate::stacks::{ + StacksTransaction, StacksTransactionSigner, TransactionAuth, TransactionPayload, + TransactionVersion, +}; + +use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; + +use crate::net::httpcore::RPCRequestHandler; +use crate::net::Attachment; + +use crate::net::connection::ConnectionOptions; + +use super::TestRPC; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R + let privk1 = StacksPrivateKey::from_hex( + "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", + ) + .unwrap(); + + let addr1 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk1)], + ) + .unwrap(); + + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_contract_call(addr1.clone(), "hello-world", "add-unit", vec![]) + .unwrap(), + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(2); + tx_cc.set_tx_fee(123); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_cc_signed = tx_signer.get_tx().unwrap(); + + // Test without an attachment + let request = StacksHttpRequest::new_post_transaction(addr.into(), tx_cc_signed.clone()); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert!(handler.attachment.is_none()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); + + // Test with a null attachment + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + tx_cc_signed.clone(), + None, + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert_eq!(handler.attachment, None); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // Test with an attachment + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + tx_cc_signed.clone(), + Some(vec![0, 1, 2, 3, 4]), + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert_eq!( + handler.attachment, + Some(Attachment::new(vec![0, 1, 2, 3, 4])) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let sendable_txs = rpc_test.sendable_txs.clone(); + + let mut requests = vec![]; + + // send a tx (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[0].clone(), + None, + ); + requests.push(request); + + // send a tx with an attachment (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[1].clone(), + Some(vec![0, 1, 2, 3, 4]), + ); + requests.push(request); + + // send the same tx (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[0].clone(), + None, + ); + requests.push(request); + + // send a bad tx (should fail) + let mut bad_tx = sendable_txs[2].clone(); + bad_tx.version = TransactionVersion::Mainnet; + let request = + StacksHttpRequest::new_post_transaction_with_attachment(addr.into(), bad_tx.clone(), None); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[0].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[1].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[0].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} From 63ab815f775322964982a35d3358a216b5f93091 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 2 Oct 2023 16:28:47 -0400 Subject: [PATCH 081/107] refactor: put _all_ RPC handlers into their own files, and refactor test_rpc() so it can take a series of requests and spit out a series of responses --- stackslib/src/net/api/mod.rs | 138 ++++ stackslib/src/net/api/tests/mod.rs | 989 +++++++++++++++++++++++++++++ 2 files changed, 1127 insertions(+) create mode 100644 stackslib/src/net/api/mod.rs create mode 100644 stackslib/src/net/api/tests/mod.rs diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs new file mode 100644 index 0000000000..cf6a841c74 --- /dev/null +++ b/stackslib/src/net/api/mod.rs @@ -0,0 +1,138 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::convert::From; + +use crate::net::Error as NetError; + +use crate::core::mempool; + +use stacks_common::types::chainstate::StacksBlockId; + +use crate::net::http::{ + Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; + +use crate::net::httpcore::{StacksHttp, StacksHttpRequest, StacksHttpResponse}; + +use crate::chainstate::stacks::StacksMicroblock; +use crate::chainstate::stacks::StacksTransaction; + +use crate::burnchains::Txid; + +use stacks_common::codec::read_next; + +use crate::net::atlas::GetAttachmentResponse; + +use crate::cost_estimates::FeeRateEstimate; + +use clarity::vm::costs::ExecutionCost; + +use crate::stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::BlockHeaderHash; + +pub mod callreadonly; +pub mod getaccount; +pub mod getattachment; +pub mod getattachmentsinv; +pub mod getblock; +pub mod getconstantval; +pub mod getcontractabi; +pub mod getcontractsrc; +pub mod getdatavar; +pub mod getheaders; +pub mod getinfo; +pub mod getistraitimplemented; +pub mod getmapentry; +pub mod getmicroblocks_confirmed; +pub mod getmicroblocks_indexed; +pub mod getmicroblocks_unconfirmed; +pub mod getneighbors; +pub mod getpoxinfo; +pub mod getstackerdbchunk; +pub mod getstackerdbmetadata; +pub mod getstxtransfercost; +pub mod gettransaction_unconfirmed; +pub mod postblock; +pub mod postfeerate; +pub mod postmempoolquery; +pub mod postmicroblock; +pub mod poststackerdbchunk; +pub mod posttransaction; + +#[cfg(test)] +mod tests; + +impl StacksHttp { + /// Register all RPC methods. + /// Put your new RPC method handlers here. + pub fn register_rpc_methods(&mut self) { + self.register_rpc_endpoint(callreadonly::RPCCallReadOnlyRequestHandler::new( + self.maximum_call_argument_size, + self.read_only_call_limit.clone(), + )); + self.register_rpc_endpoint(getaccount::RPCGetAccountRequestHandler::new()); + self.register_rpc_endpoint(getattachment::RPCGetAttachmentRequestHandler::new()); + self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); + self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); + self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); + self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); + self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); + self.register_rpc_endpoint(getdatavar::RPCGetDataVarRequestHandler::new()); + self.register_rpc_endpoint(getheaders::RPCHeadersRequestHandler::new()); + self.register_rpc_endpoint(getinfo::RPCPeerInfoRequestHandler::new()); + self.register_rpc_endpoint( + getistraitimplemented::RPCGetIsTraitImplementedRequestHandler::new(), + ); + self.register_rpc_endpoint(getmapentry::RPCGetMapEntryRequestHandler::new()); + self.register_rpc_endpoint( + getmicroblocks_confirmed::RPCMicroblocksConfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint( + getmicroblocks_indexed::RPCMicroblocksIndexedRequestHandler::new(), + ); + self.register_rpc_endpoint( + getmicroblocks_unconfirmed::RPCMicroblocksUnconfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint(getneighbors::RPCNeighborsRequestHandler::new()); + self.register_rpc_endpoint(getstxtransfercost::RPCGetStxTransferCostRequestHandler::new()); + self.register_rpc_endpoint(getstackerdbchunk::RPCGetStackerDBChunkRequestHandler::new()); + self.register_rpc_endpoint(getpoxinfo::RPCPoxInfoRequestHandler::new()); + self.register_rpc_endpoint( + getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), + ); + self.register_rpc_endpoint( + gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint(postblock::RPCPostBlockRequestHandler::new()); + self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); + self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); + self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); + self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); + self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); + } +} + +/// Helper conversion for NetError to Error +impl From for Error { + fn from(e: NetError) -> Error { + match e { + NetError::Http(e) => e, + x => Error::AppError(format!("{:?}", &x)), + } + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs new file mode 100644 index 0000000000..80d081138d --- /dev/null +++ b/stackslib/src/net/api/tests/mod.rs @@ -0,0 +1,989 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; + +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::Txid; + +use crate::chainstate::burn::db::sortdb::SortitionDB; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::BlockBuilderSettings; +use crate::chainstate::stacks::miner::StacksMicroblockBuilder; +use crate::chainstate::stacks::CoinbasePayload; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::StacksBlockBuilder; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::StacksMicroblock; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::StacksTransactionSigner; +use crate::chainstate::stacks::TokenTransferMemo; +use crate::chainstate::stacks::TransactionAnchorMode; +use crate::chainstate::stacks::TransactionAuth; +use crate::chainstate::stacks::TransactionPayload; +use crate::chainstate::stacks::TransactionPostConditionMode; +use crate::chainstate::stacks::TransactionVersion; + +use crate::core::MemPoolDB; + +use crate::net::relay::Relayer; +use crate::net::rpc::ConversationHttp; +use crate::net::Attachment; +use crate::net::AttachmentInstance; +use crate::net::RPCHandlerArgs; +use crate::net::StackerDBConfig; +use crate::net::StacksHttpRequest; +use crate::net::StacksHttpResponse; +use crate::net::StacksNodeState; +use crate::net::UrlString; + +use crate::net::test::TestPeer; +use crate::net::test::TestPeerConfig; + +use stacks_common::address::AddressHashMode; +use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; + +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; + +use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::pipe::Pipe; + +use stacks_common::codec::StacksMessageCodec; + +use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::StacksAddressExtensions; + +use libstackerdb::SlotMetadata; + +mod callreadonly; +mod getaccount; +mod getattachment; +mod getattachmentsinv; +mod getblock; +mod getconstantval; +mod getcontractabi; +mod getcontractsrc; +mod getdatavar; +mod getheaders; +mod getinfo; +mod getistraitimplemented; +mod getmapentry; +mod getmicroblocks_confirmed; +mod getmicroblocks_indexed; +mod getmicroblocks_unconfirmed; +mod getneighbors; +mod getpoxinfo; +mod getstackerdbchunk; +mod getstackerdbmetadata; +mod getstxtransfercost; +mod gettransaction_unconfirmed; +mod postblock; +mod postfeerate; +mod postmempoolquery; +mod postmicroblock; +mod poststackerdbchunk; +mod posttransaction; + +const TEST_CONTRACT: &'static str = " + (define-trait test-trait + ( + (do-test () (response uint uint)) + ) + ) + (define-trait test-trait-2 + ( + (do-test-2 () (response uint uint)) + ) + ) + + (define-constant cst 123) + (define-data-var bar int 0) + (define-map unit-map { account: principal } { units: int }) + (define-map test-map uint uint) + (map-set test-map u1 u2) + (define-public (get-bar) (ok (var-get bar))) + (define-public (set-bar (x int) (y int)) + (begin (var-set bar (/ x y)) (ok (var-get bar)))) + (define-public (add-unit) + (begin + (map-set unit-map { account: tx-sender } { units: 1 } ) + (var-set bar 1) + (ok 1))) + (begin + (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) + + (define-read-only (ro-confirmed) u1) + + (define-public (do-test) (ok u0)) + + ;; stacker DB + (define-read-only (stackerdb-get-signer-slots) + (ok (list + { + signer: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R, + num-slots: u3 + } + { + signer: 'STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW, + num-slots: u3 + }))) + + (define-read-only (stackerdb-get-config) + (ok { + chunk-size: u4096, + write-freq: u0, + max-writes: u4096, + max-neighbors: u32, + hint-replicas: (list ) + })) +"; + +const TEST_CONTRACT_UNCONFIRMED: &'static str = " +(define-read-only (ro-test) (ok 1)) +(define-constant cst-unconfirmed 456) +(define-data-var bar-unconfirmed uint u1) +(define-map test-map-unconfirmed int int) +(map-set test-map-unconfirmed 3 4) +(define-public (do-test) (ok u1)) +"; + +/// This helper function drives I/O between a sender and receiver Http conversation. +fn convo_send_recv( + sender: &mut ConversationHttp, + sender_mempool: &MemPoolDB, + sender_chainstate: &mut StacksChainState, + receiver: &mut ConversationHttp, + receiver_mempool: &MemPoolDB, + receiver_chainstate: &mut StacksChainState, +) -> () { + let (mut pipe_read, mut pipe_write) = Pipe::new(); + pipe_read.set_nonblocking(true); + + loop { + sender.try_flush(sender_mempool, sender_chainstate).unwrap(); + receiver + .try_flush(sender_mempool, receiver_chainstate) + .unwrap(); + + pipe_write.try_flush().unwrap(); + + let all_relays_flushed = + receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0; + + let nw = sender + .send(&mut pipe_write, sender_mempool, sender_chainstate) + .unwrap(); + let nr = receiver.recv(&mut pipe_read).unwrap(); + + debug!( + "test_rpc: all_relays_flushed = {} ({},{}), nr = {}, nw = {}", + all_relays_flushed, + receiver.num_pending_outbound(), + sender.num_pending_outbound(), + nr, + nw + ); + if all_relays_flushed && nr == 0 && nw == 0 { + debug!("test_rpc: Breaking send_recv"); + break; + } + } +} + +/// TestRPC state +pub struct TestRPC<'a> { + pub privk1: StacksPrivateKey, + pub privk2: StacksPrivateKey, + pub peer_1: TestPeer<'a>, + pub peer_2: TestPeer<'a>, + pub peer_1_indexer: BitcoinIndexer, + pub peer_2_indexer: BitcoinIndexer, + pub convo_1: ConversationHttp, + pub convo_2: ConversationHttp, + /// hash of the chain tip + pub canonical_tip: StacksBlockId, + /// consensus hash of the chain tip + pub consensus_hash: ConsensusHash, + /// hash of last microblock + pub microblock_tip_hash: BlockHeaderHash, + /// list of mempool transactions + pub mempool_txids: Vec, + /// list of microblock transactions + pub microblock_txids: Vec, + /// next block to post, and its consensus hash + pub next_block: (ConsensusHash, StacksBlock), + /// next microblock to post (may already be posted) + pub next_microblock: StacksMicroblock, + /// transactions that can be posted to the mempool + pub sendable_txs: Vec, +} + +impl<'a> TestRPC<'a> { + pub fn setup(test_name: &str) -> TestRPC<'a> { + Self::setup_ex(test_name, true) + } + + pub fn setup_ex(test_name: &str, process_microblock: bool) -> TestRPC<'a> { + // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R + let privk1 = StacksPrivateKey::from_hex( + "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", + ) + .unwrap(); + + // STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW + let privk2 = StacksPrivateKey::from_hex( + "94c319327cc5cd04da7147d32d836eb2e4c44f4db39aa5ede7314a761183d0c701", + ) + .unwrap(); + let microblock_privkey = StacksPrivateKey::new(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + + let addr1 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk1)], + ) + .unwrap(); + let addr2 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk2)], + ) + .unwrap(); + + let mut peer_1_config = TestPeerConfig::new(&format!("{}-peer1", test_name), 0, 0); + let mut peer_2_config = TestPeerConfig::new(&format!("{}-peer2", test_name), 0, 0); + + peer_1_config.connection_opts.read_only_call_limit = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 1200, + read_count: 3, + runtime: 1200000, + }; + peer_1_config.connection_opts.maximum_call_argument_size = 4096; + + peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 1200, + read_count: 3, + runtime: 1200000, + }; + peer_2_config.connection_opts.maximum_call_argument_size = 4096; + + // stacker DBs get initialized thru reconfiguration when the above block gets processed + peer_1_config.add_stacker_db( + QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), + StackerDBConfig::noop(), + ); + peer_2_config.add_stacker_db( + QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), + StackerDBConfig::noop(), + ); + + let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); + let peer_2_indexer = BitcoinIndexer::new_unit_test(&peer_2_config.burnchain.working_dir); + + peer_1_config.initial_balances = vec![ + (addr1.to_account_principal(), 1000000000), + (addr2.to_account_principal(), 1000000000), + ]; + + peer_2_config.initial_balances = vec![ + (addr1.to_account_principal(), 1000000000), + (addr2.to_account_principal(), 1000000000), + ]; + + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + // mine one block with a contract in it + // first the coinbase + // make a coinbase for this miner + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + + // next the contract + let contract = TEST_CONTRACT.clone(); + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_smart_contract( + &format!("hello-world"), + &contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(1); + tx_contract.set_tx_fee(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_contract_signed = tx_signer.get_tx().unwrap(); + + // update account and state in a microblock that will be unconfirmed + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_contract_call(addr1.clone(), "hello-world", "add-unit", vec![]) + .unwrap(), + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(2); + tx_cc.set_tx_fee(123); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_cc_signed = tx_signer.get_tx().unwrap(); + let tx_cc_len = { + let mut bytes = vec![]; + tx_cc_signed.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + // make an unconfirmed contract + let unconfirmed_contract = TEST_CONTRACT_UNCONFIRMED.clone(); + let mut tx_unconfirmed_contract = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_smart_contract( + &format!("hello-world-unconfirmed"), + &unconfirmed_contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_unconfirmed_contract.chain_id = 0x80000000; + tx_unconfirmed_contract.auth.set_origin_nonce(3); + tx_unconfirmed_contract.set_tx_fee(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_unconfirmed_contract); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_unconfirmed_contract_signed = tx_signer.get_tx().unwrap(); + let tx_unconfirmed_contract_len = { + let mut bytes = vec![]; + tx_unconfirmed_contract_signed + .consensus_serialize(&mut bytes) + .unwrap(); + bytes.len() as u64 + }; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_size = 0; + + // make a block + // Put the coinbase and smart-contract in the anchored block. + // Put the contract-call in the microblock + let (burn_ops, stacks_block, microblocks) = peer_1.make_tenure( + |ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, _| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &parent_tip, + vrf_proof, + tip.total_burn, + microblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, anchored_block_size, anchored_block_cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], + ) + .unwrap(); + + anchor_size = anchored_block_size; + anchor_cost = anchored_block_cost; + + (anchored_block, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, &vec![]); + peer_2.process_stacks_epoch_at_tip(&stacks_block, &vec![]); + + // build 1-block microblock stream with the contract-call and the unconfirmed contract + let microblock = { + let sortdb = peer_1.sortdb.take().unwrap(); + Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); + let mblock = { + let sort_iconn = sortdb.index_conn(); + let mut microblock_builder = StacksMicroblockBuilder::new( + stacks_block.block_hash(), + consensus_hash.clone(), + peer_1.chainstate(), + &sort_iconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + let microblock = microblock_builder + .mine_next_microblock_from_txs( + vec![ + (tx_cc_signed, tx_cc_len), + (tx_unconfirmed_contract_signed, tx_unconfirmed_contract_len), + ], + µblock_privkey, + ) + .unwrap(); + microblock + }; + peer_1.sortdb = Some(sortdb); + mblock + }; + + let microblock_txids = microblock.txs.iter().map(|tx| tx.txid()).collect(); + let canonical_tip = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + + if process_microblock { + // store microblock stream + peer_1 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + peer_2 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + + // process microblock stream to generate unconfirmed state + let sortdb1 = peer_1.sortdb.take().unwrap(); + let sortdb2 = peer_2.sortdb.take().unwrap(); + peer_1 + .chainstate() + .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_2 + .chainstate() + .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_1.sortdb = Some(sortdb1); + peer_2.sortdb = Some(sortdb2); + } + + let mut mempool_txids = vec![]; + + // stuff some transactions into peer_2's mempool + // (relates to mempool query tests) + // Also, create some transactions that could be sent + let mut mempool = peer_2.mempool.take().unwrap(); + let mut mempool_tx = mempool.tx_begin().unwrap(); + let mut sendable_txs = vec![]; + for i in 0..20 { + let pk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&privk2).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(i); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk2).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + if i < 10 { + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &consensus_hash, + &stacks_block.block_hash(), + txid.clone(), + tx_bytes, + tx_fee, + stacks_block.header.total_work.work, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + mempool_txids.push(txid); + } else { + sendable_txs.push(tx); + } + } + mempool_tx.commit().unwrap(); + peer_2.mempool.replace(mempool); + + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let _ = peer_1 + .network + .refresh_burnchain_view( + &peer_1_indexer, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + false, + ) + .unwrap(); + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + + let peer_2_sortdb = peer_2.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let _ = peer_2 + .network + .refresh_burnchain_view( + &peer_2_indexer, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + false, + ) + .unwrap(); + peer_2.sortdb = Some(peer_2_sortdb); + peer_2.stacks_node = Some(peer_2_stacks_node); + + // insert some fake Atlas attachment data + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + + let attachment_instance = AttachmentInstance { + content_hash: attachment.hash(), + attachment_index: 123, + stacks_block_height: 1, + index_block_hash: canonical_tip.clone(), + metadata: "000102030405".to_string(), + contract_id: QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") + .unwrap(), + tx_id: Txid([0x22; 32]), + canonical_stacks_tip_height: Some(1), + }; + + peer_1 + .network + .get_atlasdb_mut() + .insert_initial_attachment_instance(&attachment_instance) + .unwrap(); + peer_2 + .network + .get_atlasdb_mut() + .insert_initial_attachment_instance(&attachment_instance) + .unwrap(); + + peer_1 + .network + .get_atlasdb_mut() + .insert_instantiated_attachment(&attachment) + .unwrap(); + peer_2 + .network + .get_atlasdb_mut() + .insert_instantiated_attachment(&attachment) + .unwrap(); + + // next tip, coinbase + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(4); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + + // make another block for the test framework to POST + let (next_burn_ops, next_stacks_block, _) = peer_1.make_tenure( + |ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, _| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &parent_tip, + vrf_proof, + tip.total_burn, + microblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, anchored_block_size, anchored_block_cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![tx_coinbase_signed.clone()], + ) + .unwrap(); + + anchor_size = anchored_block_size; + anchor_cost = anchored_block_cost; + + (anchored_block, vec![]) + }, + ); + + let (_, _, next_consensus_hash) = peer_1.next_burnchain_block(next_burn_ops.clone()); + peer_2.next_burnchain_block(next_burn_ops.clone()); + + let view_1 = peer_1.get_burnchain_view().unwrap(); + let view_2 = peer_2.get_burnchain_view().unwrap(); + + // extract ports allocated to us + let peer_1_http = peer_1.config.http_port; + let peer_2_http = peer_2.config.http_port; + + debug!("test_rpc: Peer 1 HTTP port: {}", &peer_1_http); + debug!("test_rpc: Peer 2 HTTP port: {}", &peer_2_http); + + // store a chunk in the peers' stackerdb + let data = "hello world".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); + slot_metadata.sign(&privk1).unwrap(); + + for peer_server in [&mut peer_1, &mut peer_2] { + let contract_id = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", + ) + .unwrap(); + let tx = peer_server + .network + .stackerdbs + .tx_begin(StackerDBConfig::noop()) + .unwrap(); + tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) + .unwrap(); + tx.commit().unwrap(); + } + + let convo_1 = ConversationHttp::new( + format!("127.0.0.1:{}", peer_1_http) + .parse::() + .unwrap(), + Some(UrlString::try_from(format!("http://peer1.com")).unwrap()), + peer_1.to_peer_host(), + &peer_1.config.connection_opts, + 0, + ); + + let convo_2 = ConversationHttp::new( + format!("127.0.0.1:{}", peer_2_http) + .parse::() + .unwrap(), + Some(UrlString::try_from(format!("http://peer2.com")).unwrap()), + peer_2.to_peer_host(), + &peer_2.config.connection_opts, + 1, + ); + + TestRPC { + privk1, + privk2, + peer_1, + peer_2, + peer_1_indexer, + peer_2_indexer, + convo_1, + convo_2, + canonical_tip, + consensus_hash, + microblock_tip_hash: microblock.block_hash(), + mempool_txids, + microblock_txids, + next_block: (next_consensus_hash, next_stacks_block), + next_microblock: microblock, + sendable_txs, + } + } + + /// Run zero or more HTTP requests on this setup RPC test harness. + /// Return the list of responses. + pub fn run(self, requests: Vec) -> Vec { + let mut peer_1 = self.peer_1; + let mut peer_2 = self.peer_2; + let peer_1_indexer = self.peer_1_indexer; + let peer_2_indexer = self.peer_2_indexer; + let mut convo_1 = self.convo_1; + let mut convo_2 = self.convo_2; + + let mut responses = vec![]; + for request in requests.into_iter() { + convo_1.send_request(request.clone()).unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let peer_2_mempool = peer_2.mempool.take().unwrap(); + + debug!("test_rpc: Peer 1 sends to Peer 2"); + convo_send_recv( + &mut convo_1, + &peer_1_mempool, + peer_1.chainstate(), + &mut convo_2, + &peer_2_mempool, + peer_2.chainstate(), + ); + + // hack around the borrow-checker + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + ); + convo_1.chat(&mut node_state).unwrap(); + } + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + peer_2.mempool = Some(peer_2_mempool); + + debug!("test_rpc: Peer 2 sends to Peer 1"); + + // hack around the borrow-checker + let peer_2_sortdb = peer_2.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + + let _ = peer_2 + .network + .refresh_burnchain_view( + &peer_2_indexer, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + false, + ) + .unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_2_stacks_node.chainstate, &peer_2_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_2.network, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + &mut peer_2_mempool, + &rpc_args, + ); + convo_2.chat(&mut node_state).unwrap(); + } + + peer_2.sortdb = Some(peer_2_sortdb); + peer_2.stacks_node = Some(peer_2_stacks_node); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + + convo_send_recv( + &mut convo_2, + &peer_2_mempool, + peer_2.chainstate(), + &mut convo_1, + &peer_1_mempool, + peer_1.chainstate(), + ); + + debug!("test_rpc: Peer 1 flush"); + + // hack around the borrow-checker + convo_send_recv( + &mut convo_1, + &peer_1_mempool, + peer_1.chainstate(), + &mut convo_2, + &peer_2_mempool, + peer_2.chainstate(), + ); + + peer_2.mempool = Some(peer_2_mempool); + + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + + let _ = peer_1 + .network + .refresh_burnchain_view( + &peer_1_indexer, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + false, + ) + .unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + ); + convo_1.chat(&mut node_state).unwrap(); + } + + convo_1 + .try_flush(&peer_1_mempool, &mut peer_1_stacks_node.chainstate) + .unwrap(); + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + + // should have gotten a reply + let resp_opt = convo_1.try_get_response(); + assert!(resp_opt.is_some()); + + let resp = resp_opt.unwrap(); + responses.push(resp); + } + + return responses; + } +} + +/// General testing function to test RPC calls. +/// This function sets up two TestPeers and their respective chainstates, and loads them up with +/// some sample blocks and microblocks. The blocks will contain a smart contract transaction +/// called `hello-world` with the code `TEST_CONTRACT` above. In addition, a microblock will be +/// created off of the block with a contract-call to `add-unit`. The second TestPeer will also +/// have a populated mempool, while the first will not. +/// +/// This function causes the first peer to send `request` to the second peer from the first peer, +/// and will return the `StacksHttpResponse` generated by the second peer. +pub fn test_rpc(test_name: &str, requests: Vec) -> Vec { + let test = TestRPC::setup(test_name); + test.run(requests) +} From 7fe72fcb05f9088e15783f4495ff17151fe17e8e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 3 Oct 2023 21:27:40 -0400 Subject: [PATCH 082/107] fix: set higher default execution cost so callreadonly test passes --- stackslib/src/net/api/tests/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 80d081138d..9be02cf90d 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -281,18 +281,18 @@ impl<'a> TestRPC<'a> { peer_1_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, write_count: 0, - read_length: 1200, + read_length: 1500, read_count: 3, - runtime: 1200000, + runtime: 1500000, }; peer_1_config.connection_opts.maximum_call_argument_size = 4096; peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, write_count: 0, - read_length: 1200, + read_length: 1500, read_count: 3, - runtime: 1200000, + runtime: 1500000, }; peer_2_config.connection_opts.maximum_call_argument_size = 4096; From 5775c3807484900620359022656e177a6e263737 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 3 Oct 2023 21:32:17 -0400 Subject: [PATCH 083/107] chore: remove sha256d --- stacks-common/src/util/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 1b3c467806..db8901ccd7 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -78,8 +78,8 @@ impl error::Error for HexError { } fn description(&self) -> &str { match *self { - HexError::BadLength(_) => "sha256d hex string non-64 length", - HexError::BadCharacter(_) => "sha256d bad hex character", + HexError::BadLength(_) => "hex string non-64 length", + HexError::BadCharacter(_) => "bad hex character", } } } From c627eedb0e8b1b2bed6158e3462cdabd7b48f6db Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 3 Oct 2023 23:36:25 -0400 Subject: [PATCH 084/107] fix: "pages_indexes" is correct --- stackslib/src/net/atlas/download.rs | 5 +++-- stackslib/src/net/atlas/tests.rs | 21 ++++++++++++--------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index f5caf06a43..747d4fe256 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -1089,10 +1089,11 @@ impl Requestable for AttachmentsInventoryRequest { for page in self.pages.iter() { page_indexes.insert(*page); } - let page_list: Vec = page_indexes + let mut page_list: Vec = page_indexes .into_iter() .map(|i| format!("{}", &i)) .collect(); + page_list.sort(); StacksHttpRequest::new_for_peer( peer_host, "GET".into(), @@ -1102,7 +1103,7 @@ impl Requestable for AttachmentsInventoryRequest { "index_block_hash".into(), format!("{}", &self.index_block_hash), ) - .query_arg("page_indexes".into(), page_list[..].join(",")), + .query_arg("pages_indexes".into(), page_list[..].join(",")), ) .expect("FATAL: failed to create an HTTP request for infallible data") } diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index ca06c343c1..97307cf0a2 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -624,25 +624,28 @@ fn test_downloader_context_attachment_inventories_requests() { let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:30443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:20443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:40443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); } From cdeb6315ffc3f6c7c42c086e93de6464111290a4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Oct 2023 13:56:41 -0400 Subject: [PATCH 085/107] chore: fix regression in NetworkReplyHandle::try_flush() --- stackslib/src/net/chat.rs | 1 - stackslib/src/net/connection.rs | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 622a34869f..a754300ca6 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3349,7 +3349,6 @@ mod test { } #[test] - #[ignore] fn convo_handshake_accept() { with_timeout(100, || { let conn_opts = ConnectionOptions::default(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 210274f192..2e540f362e 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -240,7 +240,7 @@ impl NetworkReplyHandle

{ /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe if /// `drop_on_success` is true. Returns `true` if we drained the write end, `false` if not. pub fn try_flush_ex(&mut self, drop_on_success: bool) -> Result { - let mut ret = false; + let ret; let fd_opt = match self.request_pipe_write.take() { Some(mut fd) => { ret = fd.try_flush().map_err(net_error::WriteError)?; @@ -252,7 +252,10 @@ impl NetworkReplyHandle

{ Some(fd) } } - None => None, + None => { + ret = true; + None + } }; self.request_pipe_write = fd_opt; Ok(ret) From 8e880e7a4d88949735bdb0c392cd7e95db04a539 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:47:33 -0400 Subject: [PATCH 086/107] fix: count the number of txs sent --- stackslib/src/net/api/postmempoolquery.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index ab69c860fb..4a18f73157 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -200,6 +200,7 @@ impl HttpChunkGenerator for StacksMemPoolStream { // next call will cork the stream self.finished = true; } + self.num_txs += next_txs.len() as u64; return Ok(chunk); } else if let Some(next_txid) = next_last_randomized_txid_opt { // no more txs to send From 5b3edef05dddc57c9ae13b0d2e54e1bad21a95a4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:47:50 -0400 Subject: [PATCH 087/107] refactor: remove unneeded function args --- stackslib/src/net/api/tests/mod.rs | 52 ++++++------------------------ 1 file changed, 10 insertions(+), 42 deletions(-) diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 9be02cf90d..b0ebde67f0 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -169,31 +169,20 @@ const TEST_CONTRACT_UNCONFIRMED: &'static str = " "; /// This helper function drives I/O between a sender and receiver Http conversation. -fn convo_send_recv( - sender: &mut ConversationHttp, - sender_mempool: &MemPoolDB, - sender_chainstate: &mut StacksChainState, - receiver: &mut ConversationHttp, - receiver_mempool: &MemPoolDB, - receiver_chainstate: &mut StacksChainState, -) -> () { +fn convo_send_recv(sender: &mut ConversationHttp, receiver: &mut ConversationHttp) -> () { let (mut pipe_read, mut pipe_write) = Pipe::new(); pipe_read.set_nonblocking(true); loop { - sender.try_flush(sender_mempool, sender_chainstate).unwrap(); - receiver - .try_flush(sender_mempool, receiver_chainstate) - .unwrap(); + sender.try_flush().unwrap(); + receiver.try_flush().unwrap(); pipe_write.try_flush().unwrap(); let all_relays_flushed = receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0; - let nw = sender - .send(&mut pipe_write, sender_mempool, sender_chainstate) - .unwrap(); + let nw = sender.send(&mut pipe_write).unwrap(); let nr = receiver.recv(&mut pipe_read).unwrap(); debug!( @@ -785,6 +774,7 @@ impl<'a> TestRPC<'a> { peer_1.to_peer_host(), &peer_1.config.connection_opts, 0, + 32, ); let convo_2 = ConversationHttp::new( @@ -795,6 +785,7 @@ impl<'a> TestRPC<'a> { peer_2.to_peer_host(), &peer_2.config.connection_opts, 1, + 32, ); TestRPC { @@ -834,14 +825,7 @@ impl<'a> TestRPC<'a> { let peer_2_mempool = peer_2.mempool.take().unwrap(); debug!("test_rpc: Peer 1 sends to Peer 2"); - convo_send_recv( - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - ); + convo_send_recv(&mut convo_1, &mut convo_2); // hack around the borrow-checker let peer_1_sortdb = peer_1.sortdb.take().unwrap(); @@ -903,26 +887,12 @@ impl<'a> TestRPC<'a> { peer_2.stacks_node = Some(peer_2_stacks_node); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - convo_send_recv( - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - ); + convo_send_recv(&mut convo_2, &mut convo_1); debug!("test_rpc: Peer 1 flush"); // hack around the borrow-checker - convo_send_recv( - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - ); + convo_send_recv(&mut convo_1, &mut convo_2); peer_2.mempool = Some(peer_2_mempool); @@ -954,9 +924,7 @@ impl<'a> TestRPC<'a> { convo_1.chat(&mut node_state).unwrap(); } - convo_1 - .try_flush(&peer_1_mempool, &mut peer_1_stacks_node.chainstate) - .unwrap(); + convo_1.try_flush().unwrap(); peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); From 1177e64381a16f960a31e78700b6affe01c3d1cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:48:05 -0400 Subject: [PATCH 088/107] refactor: remove unneeded function args --- stackslib/src/net/atlas/download.rs | 31 ++++------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index 747d4fe256..15fd1d46d3 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -22,7 +22,6 @@ use std::hash::{Hash, Hasher}; use std::net::{IpAddr, SocketAddr}; use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::stacks::db::StacksChainState; use crate::net::atlas::MAX_RETRY_DELAY; use crate::net::atlas::{GetAttachmentResponse, GetAttachmentsInvResponse}; use crate::net::connection::ConnectionOptions; @@ -48,8 +47,6 @@ use rand::thread_rng; use rand::Rng; use std::cmp; -use crate::core::mempool::MemPoolDB; - use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::http::HttpRequestContents; @@ -112,8 +109,6 @@ impl AttachmentsDownloader { pub fn run( &mut self, dns_client: &mut DNSClient, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, network: &mut PeerNetwork, ) -> Result<(Vec<(AttachmentInstance, Attachment)>, Vec), net_error> { let mut resolved_attachments = vec![]; @@ -167,13 +162,8 @@ impl AttachmentsDownloader { } }; - let mut progress = AttachmentsBatchStateMachine::try_proceed( - ongoing_fsm, - dns_client, - network, - mempool, - chainstate, - ); + let mut progress = + AttachmentsBatchStateMachine::try_proceed(ongoing_fsm, dns_client, network); match progress { AttachmentsBatchStateMachine::Done(ref mut context) => { @@ -630,8 +620,6 @@ impl AttachmentsBatchStateMachine { fsm: AttachmentsBatchStateMachine, dns_client: &mut DNSClient, network: &mut PeerNetwork, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> AttachmentsBatchStateMachine { match fsm { AttachmentsBatchStateMachine::Initialized(context) => { @@ -666,8 +654,6 @@ impl AttachmentsBatchStateMachine { attachments_invs_requests, &context.dns_lookups, network, - mempool, - chainstate, &context.connection_options, ) { BatchedRequestsState::Done(ref mut results) => { @@ -691,8 +677,6 @@ impl AttachmentsBatchStateMachine { attachments_requests, &context.dns_lookups, network, - mempool, - chainstate, &context.connection_options, ) { BatchedRequestsState::Done(ref mut results) => { @@ -868,8 +852,6 @@ impl BatchedRequestsState fsm: BatchedRequestsState, dns_lookups: &HashMap>>, network: &mut PeerNetwork, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, connection_options: &ConnectionOptions, ) -> BatchedRequestsState { let mut fsm = fsm; @@ -891,13 +873,8 @@ impl BatchedRequestsState if let Some(requestable) = queue.pop() { let mut requestables = VecDeque::new(); requestables.push_back(requestable); - let res = PeerNetwork::begin_request( - network, - dns_lookups, - &mut requestables, - mempool, - chainstate, - ); + let res = + PeerNetwork::begin_request(network, dns_lookups, &mut requestables); if let Some((request, event_id)) = res { results.remaining.insert(event_id, request); } From 8eb1e98626d408191957ec879c81d0092d884f26 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:48:16 -0400 Subject: [PATCH 089/107] feat: allow connection opts to set the send/recv socket buffer size (this needs to be known to the http server) --- stackslib/src/net/connection.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 2e540f362e..91c70c20f7 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -249,6 +249,11 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. + test_debug!( + "Still have data to send, drop_on_success = {}, ret = {}", + drop_on_success, + ret + ); Some(fd) } } @@ -392,6 +397,10 @@ pub struct ConnectionOptions { pub mempool_max_tx_query: u64, /// how long a mempool sync is allowed to take, in total, before timing out pub mempool_sync_timeout: u64, + /// socket read buffer size + pub socket_recv_buffer_size: u32, + /// socket write buffer size + pub socket_send_buffer_size: u32, // fault injection pub disable_neighbor_walk: bool, @@ -481,6 +490,8 @@ impl std::default::Default for ConnectionOptions { mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) + socket_recv_buffer_size: 131072, // Linux default + socket_send_buffer_size: 16384, // Linux default // no faults on by default disable_neighbor_walk: false, From 50f9715e586257000169b9646e998deb643d0cfc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:48:35 -0400 Subject: [PATCH 090/107] refactor: remove unneeded function args --- stackslib/src/net/download.rs | 37 ++++++----------------------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 5c21a1c15a..7f6f7fba1b 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -1938,8 +1938,6 @@ impl PeerNetwork { network: &mut PeerNetwork, dns_lookups: &HashMap>>, requestables: &mut VecDeque, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> Option<(T, usize)> { loop { match requestables.pop_front() { @@ -1961,8 +1959,6 @@ impl PeerNetwork { requestable.get_url().clone(), addr.clone(), request, - mempool, - chainstate, ) { Ok(handle) => { debug!( @@ -2001,11 +1997,7 @@ impl PeerNetwork { } /// Start fetching blocks - pub fn block_getblocks_begin( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { + pub fn block_getblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { let mut priority = PeerNetwork::prioritize_requests(&downloader.blocks_to_try); @@ -2013,13 +2005,7 @@ impl PeerNetwork { for sortition_height in priority.drain(..) { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request( - network, - &downloader.dns_lookups, - keys, - mempool, - chainstate, - ) { + match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { Some((key, handle)) => { requests.insert(key.clone(), handle); } @@ -2049,11 +2035,7 @@ impl PeerNetwork { } /// Proceed to get microblocks - pub fn block_getmicroblocks_begin( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { + pub fn block_getmicroblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getmicroblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { let mut priority = PeerNetwork::prioritize_requests(&downloader.microblocks_to_try); @@ -2061,13 +2043,7 @@ impl PeerNetwork { for sortition_height in priority.drain(..) { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request( - network, - &downloader.dns_lookups, - keys, - mempool, - chainstate, - ) { + match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { Some((key, handle)) => { requests.insert(key.clone(), handle); } @@ -2386,7 +2362,6 @@ impl PeerNetwork { pub fn download_blocks( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2488,13 +2463,13 @@ impl PeerNetwork { self.block_dns_lookups_try_finish(dns_client)?; } BlockDownloaderState::GetBlocksBegin => { - self.block_getblocks_begin(mempool, chainstate)?; + self.block_getblocks_begin()?; } BlockDownloaderState::GetBlocksFinish => { self.block_getblocks_try_finish()?; } BlockDownloaderState::GetMicroblocksBegin => { - self.block_getmicroblocks_begin(mempool, chainstate)?; + self.block_getmicroblocks_begin()?; } BlockDownloaderState::GetMicroblocksFinish => { self.block_getmicroblocks_try_finish()?; From d3fd9b14f8c99c9edfabf8500eeae3cc08c1e18a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:48:48 -0400 Subject: [PATCH 091/107] refactor: remove unneeded function args --- stackslib/src/net/httpcore.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 74117961d1..22e2c65d1e 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1569,8 +1569,6 @@ impl PeerNetwork { data_url: UrlString, addr: SocketAddr, request: StacksHttpRequest, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> Result { PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { PeerNetwork::with_http(network, |ref mut network, ref mut http| { @@ -1586,7 +1584,7 @@ impl PeerNetwork { match http.get_conversation_and_socket(event_id) { (Some(ref mut convo), Some(ref mut socket)) => { convo.send_request(request)?; - HttpPeer::saturate_http_socket(socket, convo, mempool, chainstate)?; + HttpPeer::saturate_http_socket(socket, convo)?; Ok(event_id) } (_, _) => { From 4c3e6e9a6c0cef0d75f77bba664bfc211c8168cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:49:05 -0400 Subject: [PATCH 092/107] chore: remove unneeded function args --- stackslib/src/net/p2p.rs | 43 +++++++++++----------------------------- 1 file changed, 12 insertions(+), 31 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c082092eb0..e0d9528695 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1092,7 +1092,11 @@ impl PeerNetwork { return Err(net_error::NotConnected); } Some(ref mut network) => { - let sock = NetworkState::connect(&neighbor.addrbytes.to_socketaddr(neighbor.port))?; + let sock = NetworkState::connect( + &neighbor.addrbytes.to_socketaddr(neighbor.port), + self.connection_opts.socket_send_buffer_size, + self.connection_opts.socket_recv_buffer_size, + )?; let hint_event_id = network.next_event_id()?; let registered_event_id = network.register(self.p2p_network_handle, hint_event_id, &sock)?; @@ -2428,14 +2432,13 @@ impl PeerNetwork { &mut self, dns_client_opt: &mut Option<&mut DNSClient>, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ibd: bool, ) -> Option> { if ibd { return None; } - return match self.do_mempool_sync(dns_client_opt, mempool, chainstate) { + return match self.do_mempool_sync(dns_client_opt, mempool) { (true, txs_opt) => { // did we run to completion? if let Some(txs) = txs_opt { @@ -2773,7 +2776,6 @@ impl PeerNetwork { fn do_network_block_download( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2796,7 +2798,7 @@ impl PeerNetwork { mut microblocks, mut broken_http_peers, mut broken_p2p_peers, - ) = match self.download_blocks(sortdb, mempool, chainstate, dns_client, ibd) { + ) = match self.download_blocks(sortdb, chainstate, dns_client, ibd) { Ok(x) => x, Err(net_error::NotConnected) => { // there was simply nothing to do @@ -3600,7 +3602,6 @@ impl PeerNetwork { url: &UrlString, addr: &SocketAddr, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, page_id: Txid, ) -> Result<(bool, Option), net_error> { let sync_data = mempool.make_mempool_sync_data()?; @@ -3613,13 +3614,7 @@ impl PeerNetwork { .payload_stacks(&sync_data), )?; - let event_id = self.connect_or_send_http_request( - url.clone(), - addr.clone(), - request, - mempool, - chainstate, - )?; + let event_id = self.connect_or_send_http_request(url.clone(), addr.clone(), request)?; return Ok((false, Some(event_id))); } @@ -3682,7 +3677,6 @@ impl PeerNetwork { &mut self, dns_client_opt: &mut Option<&mut DNSClient>, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> (bool, Option>) { if get_epoch_time_secs() <= self.mempool_sync_deadline { debug!( @@ -3770,13 +3764,7 @@ impl PeerNetwork { "{:?}: Mempool sync will query {} for mempool transactions at {}", &self.local_peer, url, page_id ); - match self.mempool_sync_send_query( - url, - addr, - mempool, - chainstate, - page_id.clone(), - ) { + match self.mempool_sync_send_query(url, addr, mempool, page_id.clone()) { Ok((false, Some(event_id))) => { // success! advance debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &self.local_peer, url, page_id, event_id); @@ -3864,7 +3852,6 @@ impl PeerNetwork { fn do_network_work( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client_opt: &mut Option<&mut DNSClient>, download_backpressure: bool, @@ -4071,7 +4058,6 @@ impl PeerNetwork { Some(ref mut dns_client) => { let done = self.do_network_block_download( sortdb, - mempool, chainstate, *dns_client, ibd, @@ -4132,8 +4118,6 @@ impl PeerNetwork { fn do_attachment_downloads( &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, mut dns_client_opt: Option<&mut DNSClient>, network_result: &mut NetworkResult, ) { @@ -4158,7 +4142,7 @@ impl PeerNetwork { self, |network, attachments_downloader| { let mut dead_events = vec![]; - match attachments_downloader.run(dns_client, mempool, chainstate, network) { + match attachments_downloader.run(dns_client, network) { Ok((ref mut attachments, ref mut events_to_deregister)) => { network_result.attachments.append(attachments); dead_events.append(events_to_deregister); @@ -5466,7 +5450,6 @@ impl PeerNetwork { // an already-used network ID. let do_prune = self.do_network_work( sortdb, - mempool, chainstate, &mut dns_client_opt, download_backpressure, @@ -5494,14 +5477,12 @@ impl PeerNetwork { // In parallel, do a mempool sync. // Remember any txs we get, so we can feed them to the relayer thread. - if let Some(mut txs) = - self.do_network_mempool_sync(&mut dns_client_opt, mempool, chainstate, ibd) - { + if let Some(mut txs) = self.do_network_mempool_sync(&mut dns_client_opt, mempool, ibd) { network_result.synced_transactions.append(&mut txs); } // download attachments - self.do_attachment_downloads(mempool, chainstate, dns_client_opt, network_result); + self.do_attachment_downloads(dns_client_opt, network_result); // synchronize stacker DBs match self.run_stacker_db_sync() { From 4a550bddad4bc8240fd43455e56557709ffcec46 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:49:19 -0400 Subject: [PATCH 093/107] feat: when connecting to an address, allow the caller to set the socket send/recv buffer sizes --- stackslib/src/net/poll.rs | 44 ++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index eda59577d4..00ee4095de 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -318,7 +318,11 @@ impl NetworkState { /// Connect to a remote peer, but don't register it with the poll handle. /// The underlying connect(2) is _asynchronous_, so the caller will need to register it with a /// poll handle and wait for it to be connected. - pub fn connect(addr: &SocketAddr) -> Result { + pub fn connect( + addr: &SocketAddr, + socket_send_buffer: u32, + socket_recv_buffer: u32, + ) -> Result { let stream = mio_net::TcpStream::connect(addr).map_err(|_e| { test_debug!("Failed to convert to mio stream: {:?}", &_e); net_error::ConnectionError @@ -328,14 +332,14 @@ impl NetworkState { // Don't go crazy on TIME_WAIT states; have them all die after 5 seconds stream .set_linger(Some(time::Duration::from_millis(5000))) - .map_err(|_e| { - test_debug!("Failed to set SO_LINGER: {:?}", &_e); + .map_err(|e| { + warn!("Failed to set SO_LINGER: {:?}", &e); net_error::ConnectionError })?; // Disable Nagle algorithm stream.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + warn!("Failed to set TCP_NODELAY: {:?}", &_e); net_error::ConnectionError })?; @@ -343,8 +347,8 @@ impl NetworkState { // for a while. Linux default is 7200 seconds, so make sure we keep it here. stream .set_keepalive(Some(time::Duration::from_millis(7200 * 1000))) - .map_err(|_e| { - test_debug!("Failed to set TCP_KEEPALIVE and/or SO_KEEPALIVE: {:?}", &_e); + .map_err(|e| { + warn!("Failed to set TCP_KEEPALIVE and/or SO_KEEPALIVE: {:?}", &e); net_error::ConnectionError })?; @@ -354,6 +358,26 @@ impl NetworkState { stream.set_send_buffer_size(32).unwrap(); stream.set_recv_buffer_size(32).unwrap(); } + } else { + stream + .set_send_buffer_size(socket_send_buffer as usize) + .map_err(|e| { + warn!( + "Failed to set socket write buffer size to {}: {:?}", + socket_send_buffer, &e + ); + net_error::ConnectionError + })?; + + stream + .set_recv_buffer_size(socket_recv_buffer as usize) + .map_err(|e| { + warn!( + "Failed to set socket read buffer size to {}: {:?}", + socket_send_buffer, &e + ); + net_error::ConnectionError + })?; } test_debug!("New socket connected to {:?}: {:?}", addr, &stream); @@ -514,7 +538,7 @@ mod test { let addr = format!("127.0.0.1:{}", &port) .parse::() .unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); let event_id = ns.register(server_events[i], 1, &sock).unwrap(); assert!(event_id != 0); @@ -545,7 +569,7 @@ mod test { let addr = format!("127.0.0.1:{}", &port) .parse::() .unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); // can't use non-server events assert_eq!( @@ -568,7 +592,7 @@ mod test { .unwrap(); event_ids.insert(server_event_id); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); // register 10 client events let event_id = ns.register(server_event_id, 11, &sock).unwrap(); @@ -577,7 +601,7 @@ mod test { // the 11th socket should fail let addr = format!("127.0.0.1:{}", port).parse::().unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); let res = ns.register(server_event_id, 11, &sock); assert_eq!(Err(net_error::TooManyPeers), res); } From c87c59d20e14786c86b550db8c5991bee64ad849 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:49:45 -0400 Subject: [PATCH 094/107] fix: ensure that the reply handle's inner PipeWrite has at least $SOCKET_SEND_BUFFER bytes of pending data when filling it with the stream generator. --- stackslib/src/net/rpc.rs | 89 +++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 37 deletions(-) diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 421503b9d3..e64a60cc84 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -140,28 +140,42 @@ use crate::net::api::{getpoxinfo::RPCPoxCurrentCycleInfo, getpoxinfo::RPCPoxNext pub const STREAM_CHUNK_SIZE: u64 = 4096; pub struct ConversationHttp { + /// send/receive buffering state-machine for interfacing with a non-blocking socket connection: ConnectionHttp, + /// poll ID for this struct's associated socket conn_id: usize, + /// time (in seconds) for how long an attempt to connect to a peer is allowed to take timeout: u64, /// remote host's identifier (DNS or IP). Goes into the `Host:` header peer_host: PeerHost, + /// URL of the remote peer's data, if given outbound_url: Option, /// remote host's IP address peer_addr: SocketAddr, + /// remote host's keep-alive setting keep_alive: bool, - total_request_count: u64, // number of messages taken from the inbox - total_reply_count: u64, // number of messages responsed to - last_request_timestamp: u64, // absolute timestamp of the last time we received at least 1 byte in a request - last_response_timestamp: u64, // absolute timestamp of the last time we sent at least 1 byte in a response - connection_time: u64, // when this converation was instantiated - - canonical_stacks_tip_height: Option, // chain tip height of the peer's Stacks blockchain + /// number of messages consumed + total_request_count: u64, + /// number of messages sent + total_reply_count: u64, + /// absolute timestamp of the last time we recieved at least 1 byte + last_request_timestamp: u64, + /// absolute timestamp of the last time we sent at least 1 byte + last_response_timestamp: u64, + /// absolute time when this conversation was instantiated + connection_time: u64, + /// stacks canonical chain tip that this peer reported + canonical_stacks_tip_height: Option, + /// Ongoing replies reply_streams: VecDeque<(ReplyHandleHttp, HttpResponseContents, bool)>, - - // our outstanding request/response to the remote peer, if any + /// outstanding request pending_request: Option, + /// outstanding response pending_response: Option, + /// whether or not there's an error response pending pending_error_response: bool, + /// how much data to buffer (i.e. the socket's send buffer size) + socket_send_buffer_size: u32, } impl fmt::Display for ConversationHttp { @@ -195,6 +209,7 @@ impl ConversationHttp { peer_host: PeerHost, conn_opts: &ConnectionOptions, conn_id: usize, + socket_send_buffer_size: u32, ) -> ConversationHttp { let stacks_http = StacksHttp::new(peer_addr.clone(), conn_opts); ConversationHttp { @@ -214,6 +229,7 @@ impl ConversationHttp { total_reply_count: 0, last_request_timestamp: 0, last_response_timestamp: 0, + socket_send_buffer_size, connection_time: get_epoch_time_secs(), } } @@ -341,11 +357,7 @@ impl ConversationHttp { } /// Make progress on outbound requests. - fn send_outbound_responses( - &mut self, - _mempool: &MemPoolDB, - _chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { + fn send_outbound_responses(&mut self) -> Result<(), net_error> { // send out streamed responses in the order they were requested let mut drained_handle = false; let mut drained_stream = false; @@ -364,17 +376,25 @@ impl ConversationHttp { { do_keep_alive = *keep_alive; - // write out the last-generated data into the write-end of the reply handle's pipe - if let Some(pipe_fd) = reply.inner_pipe_out() { - let num_written = http_response.pipe_out(pipe_fd)?; - if num_written == 0 { - // no more chunks + while !drained_stream { + // write out the last-generated data into the write-end of the reply handle's pipe + if let Some(pipe_fd) = reply.inner_pipe_out() { + let num_written = http_response.pipe_out(pipe_fd)?; + if num_written == 0 { + // no more chunks + drained_stream = true; + } + test_debug!("{}: Wrote {} bytes", &_self_str, num_written); + if (pipe_fd.pending() as u32) >= self.socket_send_buffer_size { + // we've written more data than can be dumped into the socket buffer, so + // we're good to go for now -- we'll get an edge trigger next time the data + // drains from this socket. + break; + } + } else { + test_debug!("{}: No inner pipe", &_self_str); drained_stream = true; } - test_debug!("{}: Wrote {} bytes", &_self_str, num_written); - } else { - test_debug!("{}: No inner pipe", &_self_str); - drained_stream = true; } if !drained_stream { @@ -511,12 +531,8 @@ impl ConversationHttp { } /// Make progress on in-flight messages. - pub fn try_flush( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { - self.send_outbound_responses(mempool, chainstate)?; + pub fn try_flush(&mut self) -> Result<(), net_error> { + self.send_outbound_responses()?; self.recv_inbound_response()?; Ok(()) } @@ -658,17 +674,15 @@ impl ConversationHttp { } /// Write data out of our HTTP connection. Write as much as we can - pub fn send( - &mut self, - w: &mut W, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result { + pub fn send(&mut self, w: &mut W) -> Result { let mut total_sz = 0; loop { - // prime the Write - self.try_flush(mempool, chainstate)?; + test_debug!("{:?}: Try to send bytes (total {})", self, total_sz); + + // fill the reply handles in self.connection with data + self.try_flush()?; + // dump reply handle state into `w` let sz = match self.connection.send_data(w) { Ok(sz) => sz, Err(e) => { @@ -676,6 +690,7 @@ impl ConversationHttp { return Err(e); } }; + test_debug!("{:?}: Sent {} bytes (total {})", self, sz, total_sz); total_sz += sz; if sz > 0 { From b3a8b45557f0da76faf2f0f381445f3cf82fa54c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:50:14 -0400 Subject: [PATCH 095/107] refactor: remove unneeded function args --- stackslib/src/net/server.rs | 82 +++++++++++++------------------------ 1 file changed, 28 insertions(+), 54 deletions(-) diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 920530ca36..58e55760aa 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -151,7 +151,11 @@ impl HttpPeer { return Err(net_error::AlreadyConnected(event_id, http_nk)); } - let sock = NetworkState::connect(&addr)?; + let sock = NetworkState::connect( + &addr, + network.connection_opts.socket_send_buffer_size, + network.connection_opts.socket_recv_buffer_size, + )?; let hint_event_id = network_state.next_event_id()?; let next_event_id = network_state.register(self.http_server_handle, hint_event_id, &sock)?; @@ -225,6 +229,9 @@ impl HttpPeer { outbound_url: Option, initial_request: Option, ) -> Result<(), net_error> { + let send_buffer_size = node_state + .with_node_state(|network, _, _, _, _| network.connection_opts.socket_send_buffer_size); + let client_addr = match socket.peer_addr() { Ok(addr) => addr, Err(e) => { @@ -255,6 +262,7 @@ impl HttpPeer { peer_host, &self.connection_opts, event_id, + send_buffer_size, ); debug!( @@ -273,17 +281,9 @@ impl HttpPeer { } // prime the socket - let saturation_res = - node_state.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { - HttpPeer::saturate_http_socket(&mut socket, &mut new_convo, mempool, chainstate) - }); - - match saturation_res { - Ok(_) => {} - Err(e) => { - let _ = network_state.deregister(event_id, &socket); - return Err(e); - } + if let Err(e) = HttpPeer::saturate_http_socket(&mut socket, &mut new_convo) { + let _ = network_state.deregister(event_id, &socket); + return Err(e); } } @@ -353,12 +353,10 @@ impl HttpPeer { pub fn saturate_http_socket( client_sock: &mut mio::net::TcpStream, convo: &mut ConversationHttp, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> Result<(), net_error> { // saturate the socket loop { - let send_res = convo.send(client_sock, mempool, chainstate); + let send_res = convo.send(client_sock); match send_res { Err(e) => { debug!("Failed to send data to socket {:?}: {:?}", &client_sock, &e); @@ -459,25 +457,12 @@ impl HttpPeer { )) { Ok(_) => { // prime the socket - let saturation_res = node_state.with_node_state( - |_network, _sortdb, chainstate, mempool, _rpc_args| { - HttpPeer::saturate_http_socket( - client_sock, - convo, - mempool, - chainstate, - ) - }, - ); - match saturation_res { - Ok(_) => {} - Err(e) => { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - convo_dead = true; - } + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { + debug!( + "Failed to flush HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; } } Err(e) => { @@ -519,19 +504,12 @@ impl HttpPeer { if !convo_dead { // (continue) sending out data in this conversation, if the conversation is still // ongoing - let saturation_res = - node_state.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { - HttpPeer::saturate_http_socket(client_sock, convo, mempool, chainstate) - }); - match saturation_res { - Ok(_) => {} - Err(e) => { - debug!( - "Failed to send HTTP data to event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { + debug!( + "Failed to send HTTP data to event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } @@ -635,16 +613,12 @@ impl HttpPeer { /// Flush outgoing replies, but don't block. /// Drop broken handles. /// Return the list of conversation event IDs to close (i.e. they're broken, or the request is done) - fn flush_conversations(&mut self, node_state: &mut StacksNodeState) -> Vec { + fn flush_conversations(&mut self) -> Vec { let mut close = vec![]; // flush each outgoing conversation for (event_id, ref mut convo) in self.peers.iter_mut() { - let flush_res = - node_state.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { - convo.try_flush(mempool, chainstate) - }); - if let Err(e) = flush_res { + if let Err(e) = convo.try_flush() { info!("Broken HTTP connection {:?}: {:?}", convo, &e); close.push(*event_id); } @@ -684,7 +658,7 @@ impl HttpPeer { } // move conversations along - let close_events = self.flush_conversations(node_state); + let close_events = self.flush_conversations(); for close_event in close_events { debug!("Close HTTP connection on event {}", close_event); self.deregister_http(network_state, close_event); From 1c3c28d22dbfd7488896fc461255b4a2991046dd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:50:34 -0400 Subject: [PATCH 096/107] chore: API sync --- stackslib/src/net/tests/httpcore.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 6a629dd632..1b152f42f4 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -181,6 +181,7 @@ fn test_http_request_type_codec() { PeerHost::DNS("localhost".to_string(), 12345), &ConnectionOptions::default(), 100, + 32, ); let tx = make_test_transaction(); let tx_body = tx.serialize_to_vec(); From 062bb2d903b67dc376f37d4df30f7ff5dcee0c4b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 13:50:47 -0400 Subject: [PATCH 097/107] feat: PipeWrite can return the number of bytes pending --- stacks-common/src/util/pipe.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index e04a8f8fbf..fed0d3c8db 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -227,6 +227,11 @@ impl PipeWrite { Ok(buf.len()) } + /// How many bytes are pending? + pub fn pending(&self) -> usize { + self.buf.as_ref().map(|b| b.len()).unwrap_or(0) + } + /// Try and flush all data to the reader. /// Return True if we succeeded; False if not. pub fn try_flush(&mut self) -> io::Result { From 9919e1064926b4dfa3ae25c71b01734c08a9e44b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Oct 2023 17:06:20 -0400 Subject: [PATCH 098/107] fix: failure to find a request route is a 404, not a 400 --- stackslib/src/net/http/mod.rs | 2 +- stackslib/src/net/httpcore.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index 1e87f979fc..7d032ddd90 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -140,7 +140,7 @@ impl Error { "Failed to parse data (underflow): {:?}", &x ))), - Error::Http(code, msg) => Box::new(HttpError::new(code, msg)), + Error::Http(code, msg) => http_error_from_code_and_text(code, msg), Error::AppError(x) => Box::new(HttpServerError::new(format!( "Unhandled application error: {:?}", &x diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 22e2c65d1e..dbdeefce1b 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1005,7 +1005,10 @@ impl StacksHttp { } test_debug!("Failed to parse '{}'", &preamble.path_and_query_str); - Err(NetError::NotFoundError) + Err(NetError::Http(HttpError::Http( + 404, + "No such file or directory".into(), + ))) } /// Parse out an HTTP response error message From 915ac5615e5ede34b44fb77c406fdaf51ad0c43d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 21 Oct 2023 06:57:24 -0400 Subject: [PATCH 099/107] fix: use .request_path() --- stackslib/src/monitoring/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index b1fa025fde..1d3f212d24 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -56,7 +56,7 @@ where increment_rpc_calls_counter(); #[cfg(feature = "monitoring_prom")] - let timer = prometheus::new_rpc_call_timer(req.get_path()); + let timer = prometheus::new_rpc_call_timer(req.request_path()); let res = handler(req); From 33114c1e520febaf1d307b6b6934d7e965ba3eb8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 21 Oct 2023 07:07:12 -0400 Subject: [PATCH 100/107] fix: revert localized cargo.toml changes --- Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 956eabb5a2..2a04b86a6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,3 @@ opt-level = 3 [profile.release] debug = true -[patch.crates-io] -p256k1 = { path = "/home/jude/pkg/p256k1/p256k1", version = "5.5.0" } -wsts = { path = "/home/jude/pkg/wsts" } From f34606797dc26aba14828bb02ee528710c5c388e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 21 Oct 2023 07:11:03 -0400 Subject: [PATCH 101/107] chore: replace `.len() == 0` with `.is_empty()` --- stackslib/src/net/api/getattachmentsinv.rs | 2 +- stackslib/src/net/api/tests/getblock.rs | 4 ++-- stackslib/src/net/api/tests/getheaders.rs | 2 +- stackslib/src/net/api/tests/getmicroblocks_confirmed.rs | 2 +- stackslib/src/net/api/tests/getmicroblocks_indexed.rs | 2 +- stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs | 4 ++-- stackslib/src/net/api/tests/postmempoolquery.rs | 6 +++--- stackslib/src/net/http/stream.rs | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 9ab19c6176..d41898a731 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -167,7 +167,7 @@ impl RPCRequestHandler for RPCGetAttachmentsInvRequestHandler { .try_into_contents() .map_err(NetError::from); } - if page_indexes.len() == 0 { + if page_indexes.is_empty() { let msg = format!("Page indexes missing"); warn!("{}", msg); return StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new(msg)) diff --git a/stackslib/src/net/api/tests/getblock.rs b/stackslib/src/net/api/tests/getblock.rs index f987f5e79b..c873c52620 100644 --- a/stackslib/src/net/api/tests/getblock.rs +++ b/stackslib/src/net/api/tests/getblock.rs @@ -147,7 +147,7 @@ fn test_stream_blocks() { let mut all_block_bytes = vec![]; loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( @@ -172,7 +172,7 @@ fn test_stream_blocks() { let mut all_block_bytes = vec![]; loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( diff --git a/stackslib/src/net/api/tests/getheaders.rs b/stackslib/src/net/api/tests/getheaders.rs index 800d43220a..4ea4480082 100644 --- a/stackslib/src/net/api/tests/getheaders.rs +++ b/stackslib/src/net/api/tests/getheaders.rs @@ -171,7 +171,7 @@ fn stream_headers_to_vec(stream: &mut StacksHeaderStream) -> Vec { let mut header_bytes = vec![]; loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } header_bytes.append(&mut next_bytes); diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs index 298cb496ce..a4eb372abf 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -265,7 +265,7 @@ fn test_stream_confirmed_microblocks() { let mut confirmed_mblock_bytes = vec![]; loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs index 52d49c8f2e..0676ecc497 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -267,7 +267,7 @@ fn test_stream_indexed_microblocks() { let mut confirmed_mblock_bytes = vec![]; loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( diff --git a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs index cc244956f6..f4facf717c 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs @@ -185,7 +185,7 @@ fn test_stream_unconfirmed_microblocks() { .unwrap(); loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( @@ -218,7 +218,7 @@ fn test_stream_unconfirmed_microblocks() { .unwrap(); loop { let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.len() == 0 { + if next_bytes.is_empty() { break; } test_debug!( diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 034eed9f49..1f528c57c5 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -202,7 +202,7 @@ fn test_stream_mempool_txs() { loop { let chunk = tx_stream_data.generate_next_chunk().unwrap(); - if chunk.len() == 0 { + if chunk.is_empty() { break; } buf.extend_from_slice(&chunk[..]); @@ -260,7 +260,7 @@ fn test_stream_mempool_txs() { let mut buf = vec![]; loop { let chunk = tx_stream_data.generate_next_chunk().unwrap(); - if chunk.len() == 0 { + if chunk.is_empty() { break; } buf.extend_from_slice(&chunk[..]); @@ -327,7 +327,7 @@ fn test_stream_mempool_txs() { let mut buf = vec![]; loop { let chunk = tx_stream_data.generate_next_chunk().unwrap(); - if chunk.len() == 0 { + if chunk.is_empty() { break; } buf.extend_from_slice(&chunk[..]); diff --git a/stackslib/src/net/http/stream.rs b/stackslib/src/net/http/stream.rs index 08fe5a26c7..4f5d9f55cd 100644 --- a/stackslib/src/net/http/stream.rs +++ b/stackslib/src/net/http/stream.rs @@ -52,7 +52,7 @@ pub trait HttpChunkGenerator: Send { let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, encoder_state); - if chunk.len() == 0 { + if chunk.is_empty() { // no more chunks, but be sure to cork the stream if !encoder.corked() { encoder.flush()?; From eaa8de9e7eb826e71727f5b9ec62276dc087b309 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 Oct 2023 00:44:24 -0400 Subject: [PATCH 102/107] chore: run DKG test as part of integration test --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 19669aebd8..ff686ed831 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -139,6 +139,7 @@ jobs: - tests::neon_integrations::bad_microblock_pubkey - tests::epoch_24::fix_to_pox_contract - tests::epoch_24::verify_auto_unlock_behavior + - tests::signer::test_stackerdb_dkg - tests::stackerdb::test_stackerdb_load_store - tests::stackerdb::test_stackerdb_event_observer steps: From a3f6cca6f0b867f8434a415bd5528fa4404c9795 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 Oct 2023 00:45:09 -0400 Subject: [PATCH 103/107] fix: forward stackerdb chunks to relayer --- stackslib/src/net/api/poststackerdbchunk.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index 8af2fb0e33..190ba1f710 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -48,7 +48,9 @@ use crate::net::httpcore::{ request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::net::{ + Error as NetError, StackerDBPushChunkData, StacksMessageType, StacksNodeState, TipRequest, +}; use crate::util_lib::db::{DBConn, Error as DBError}; #[derive(Clone)] @@ -267,6 +269,17 @@ impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { } }; + if ack_resp.accepted { + let push_chunk_data = StackerDBPushChunkData { + contract_id: contract_identifier, + rc_consensus_hash: node.with_node_state(|network, _, _, _, _| { + network.get_chain_view().rc_consensus_hash.clone() + }), + chunk_data: stackerdb_chunk, + }; + node.set_relay_message(StacksMessageType::StackerDBPushChunk(push_chunk_data)); + } + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&ack_resp)?; From dcfba381b24a2d4c1fab5aba1611098bcc437f44 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 Oct 2023 00:45:26 -0400 Subject: [PATCH 104/107] chore: address PR feedback --- stackslib/src/net/httpcore.rs | 163 +++++++++++++++++----------------- 1 file changed, 80 insertions(+), 83 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index ae4db0c07f..5ed344fefe 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -776,16 +776,16 @@ impl StacksHttpRecvStream { (0, num_consumed) } Ok((num_read, num_consumed)) => (num_read, num_consumed), - Err(e) => match e.kind() { - io::ErrorKind::WouldBlock | io::ErrorKind::TimedOut => { + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut + { trace!("consume_data blocked on read error"); blocked = true; (0, 0) - } - _ => { + } else { return Err(NetError::ReadError(e)); } - }, + } }; consumed += consumed_pass; @@ -1105,11 +1105,7 @@ impl StacksHttp { #[cfg(test)] pub fn num_pending(&self) -> usize { - if self.reply.is_some() { - 1 - } else { - 0 - } + self.reply.as_ref().map(|_| 1).unwrap_or(0) } /// Set up the pending response @@ -1169,31 +1165,37 @@ impl StacksHttp { preamble: &HttpResponsePreamble, fd: &mut R, ) -> Result<(Option<(Vec, usize)>, usize), NetError> { - assert!(preamble.is_chunked()); - assert!(self.reply.is_some()); - + if !preamble.is_chunked() { + return Err(NetError::InvalidState); + } if let Some(reply) = self.reply.as_mut() { - match reply.stream.consume_data(fd) { - Ok(res) => match res { - (None, sz) => Ok((None, sz)), - (Some((byte_vec, bytes_total)), sz) => { - // done receiving - self.reply = None; - Ok((Some((byte_vec, bytes_total)), sz)) - } - }, - Err(e) => { - // broken stream - self.reset(); - Err(e) + match reply.stream.consume_data(fd).map_err(|e| { + self.reset(); + e + })? { + (Some((byte_vec, bytes_total)), sz) => { + // done receiving + self.reply = None; + Ok((Some((byte_vec, bytes_total)), sz)) } + res => Ok(res), } } else { - unreachable!(); + return Err(NetError::InvalidState); } } - /// Calculate the search window for \r\n\r\n + /// Calculate the search window for \r\n\r\n in the preamble stream. + /// + /// As we are streaming the preamble, we're looking for the pattern `\r\n\r\n`. The last four + /// bytes of the encoded preamble are always stored in `self.last_four_preamble_bytes`; this + /// gets updated as the preamble data is streamed in. So, given these last four bytes, and the + /// next chunk of data streamed in from the request (in `buf`), determine the 4-byte sequence + /// to check for `\r\n\r\n`. + /// + /// `i` is the offset into the chunk `buf` being searched. If `i < 4`, then we must check the + /// last `4 - i` bytes of `self.last_four_preamble_bytes` as well as the first `i` bytes of + /// `buf`. Otherwise, we just check `buf[i-4..i]`. fn body_start_search_window(&self, i: usize, buf: &[u8]) -> [u8; 4] { let window = match i { 0 => [ @@ -1253,12 +1255,11 @@ impl StacksHttp { let mut message_bytes = &response_buf[message_offset..]; if is_chunked { - match http.stream_payload(&preamble, &mut message_bytes) { - Ok((Some((message, _)), _)) => Ok(message), - Ok((None, _)) => Err(NetError::UnderflowError( + match http.stream_payload(&preamble, &mut message_bytes)? { + (Some((message, _)), _) => Ok(message), + (None, _) => Err(NetError::UnderflowError( "Not enough bytes to form a streamed HTTP response".to_string(), )), - Err(e) => Err(e), } } else { let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; @@ -1282,12 +1283,9 @@ impl ProtocolFamily for StacksHttp { StacksHttpPreamble::Request(ref http_request_preamble) => { Some(http_request_preamble.get_content_length() as usize) } - StacksHttpPreamble::Response(ref http_response_preamble) => { - match http_response_preamble.content_length { - Some(len) => Some(len as usize), - None => None, - } - } + StacksHttpPreamble::Response(ref http_response_preamble) => http_response_preamble + .content_length + .map(|len| len as usize), } } @@ -1299,7 +1297,7 @@ impl ProtocolFamily for StacksHttp { if self.body_start.is_none() { for i in 0..=buf.len() { let window = self.body_start_search_window(i, buf); - if window == [13, 10, 13, 10] { + if window == [b'\r', b'\n', b'\r', b'\n'] { self.body_start = Some(self.num_preamble_bytes + i); } } @@ -1346,14 +1344,18 @@ impl ProtocolFamily for StacksHttp { preamble: &StacksHttpPreamble, fd: &mut R, ) -> Result<(Option<(StacksHttpMessage, usize)>, usize), NetError> { - assert!(self.payload_len(preamble).is_none()); + if self.payload_len(preamble).is_some() { + return Err(NetError::InvalidState); + } match preamble { StacksHttpPreamble::Request(_) => { // HTTP requests can't be chunk-encoded, so this should never be reached - unreachable!() + return Err(NetError::InvalidState); } StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(http_response_preamble.is_chunked()); + if !http_response_preamble.is_chunked() { + return Err(NetError::InvalidState); + } // sanity check -- if we're receiving a response, then we must have earlier issued // a request. Thus, we must already know which response handler to use. @@ -1439,40 +1441,40 @@ impl ProtocolFamily for StacksHttp { StacksHttpPreamble::Request(ref http_request_preamble) => { // all requests have a known length let len = http_request_preamble.get_content_length() as usize; - assert!(len <= buf.len(), "{} > {}", len, buf.len()); + if len > buf.len() { + return Err(NetError::InvalidState); + } trace!("read http request payload of {} bytes", len); match self.try_parse_request(http_request_preamble, &buf[0..len]) { Ok(data_request) => Ok((StacksHttpMessage::Request(data_request), len)), + Err(NetError::Http(http_error)) => { + // convert into a response + let resp = StacksHttpResponse::new_error( + http_request_preamble, + &*http_error.into_http_error(), + ); + self.reset(); + return Ok(( + StacksHttpMessage::Error( + http_request_preamble.path_and_query_str.clone(), + resp, + ), + len, + )); + } Err(e) => { - match e { - NetError::Http(http_error) => { - // convert into a response - let resp = StacksHttpResponse::new_error( - http_request_preamble, - &*http_error.into_http_error(), - ); - self.reset(); - return Ok(( - StacksHttpMessage::Error( - http_request_preamble.path_and_query_str.clone(), - resp, - ), - len, - )); - } - _ => { - info!("Failed to parse HTTP request: {:?}", &e); - self.reset(); - Err(e) - } - } + info!("Failed to parse HTTP request: {:?}", &e); + self.reset(); + Err(e) } } } StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(!http_response_preamble.is_chunked()); + if http_response_preamble.is_chunked() { + return Err(NetError::InvalidState); + } // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); @@ -1480,14 +1482,10 @@ impl ProtocolFamily for StacksHttp { // sanity check -- if we're receiving a response, then we must have earlier issued // a request. Thus, we must already know which response handler to use. // Otherwise, someone sent us malformed data. - let handler_index = if let Some(i) = self.request_handler_index.as_ref() { - *i - } else { + let handler_index = self.request_handler_index.ok_or_else(|| { self.reset(); - return Err(NetError::DeserializeError( - "Unsolicited HTTP response".to_string(), - )); - }; + NetError::DeserializeError("Unsolicited HTTP response".to_string()) + })?; let res = self.try_parse_response(handler_index, http_response_preamble, buf); self.reset(); @@ -1568,16 +1566,15 @@ impl PeerNetwork { ) { Ok(event_id) => Ok(event_id), Err(NetError::AlreadyConnected(event_id, _)) => { - match http.get_conversation_and_socket(event_id) { - (Some(ref mut convo), Some(ref mut socket)) => { - convo.send_request(request)?; - HttpPeer::saturate_http_socket(socket, convo)?; - Ok(event_id) - } - (_, _) => { - debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); - Err(NetError::PeerNotConnected) - } + if let (Some(ref mut convo), Some(ref mut socket)) = + http.get_conversation_and_socket(event_id) + { + convo.send_request(request)?; + HttpPeer::saturate_http_socket(socket, convo)?; + Ok(event_id) + } else { + debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); + Err(NetError::PeerNotConnected) } } Err(e) => { From 6d22cc6ad6cc267871ee11f74c5aee23422d571b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 Oct 2023 00:45:38 -0400 Subject: [PATCH 105/107] chore: add InvalidState error variant instead of unreachable!() --- stackslib/src/net/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 188a4bca31..90698e126e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -275,6 +275,8 @@ pub enum Error { StackerDBChunkTooBig(usize), /// HTTP error Http(HttpErr), + /// Invalid state machine state reached + InvalidState, } impl From for Error { @@ -421,6 +423,7 @@ impl fmt::Display for Error { write!(f, "StackerDB chunk size is too big ({})", sz) } Error::Http(e) => fmt::Display::fmt(&e, f), + Error::InvalidState => write!(f, "Invalid state-machine state reached"), } } } @@ -492,6 +495,7 @@ impl error::Error for Error { Error::StepTimeout => None, Error::StackerDBChunkTooBig(..) => None, Error::Http(ref e) => Some(e), + Error::InvalidState => None, } } } From 5ff0b218d5a9b2a43a5d2c35e0052367524690ca Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 Oct 2023 12:15:36 -0400 Subject: [PATCH 106/107] fix: use 100 signers and 4000 keys. This test runs only on release CI anyway, so it's fine if it takes a while. --- testnet/stacks-node/src/tests/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 788474085b..9b86f9e4fa 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -157,8 +157,8 @@ fn test_stackerdb_dkg() { .init(); // Generate Signer Data - let num_signers: u32 = 16; - let num_keys: u32 = 40; + let num_signers: u32 = 100; + let num_keys: u32 = 4000; let signer_stacks_private_keys = (0..num_signers) .map(|_| StacksPrivateKey::new()) .collect::>(); From 3b373321eb20bbbee34fe74d0b59bc3f144c2909 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 1 Nov 2023 12:58:27 -0400 Subject: [PATCH 107/107] chore: make fmt --- stacks-signer/src/stacks_client.rs | 42 ++++++++++++------------------ 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/stacks_client.rs b/stacks-signer/src/stacks_client.rs index 700858cfb4..0621df4b09 100644 --- a/stacks-signer/src/stacks_client.rs +++ b/stacks-signer/src/stacks_client.rs @@ -1,33 +1,25 @@ use std::time::Duration; use bincode::Error as BincodeError; -use blockstack_lib::{ - burnchains::Txid, - chainstate::stacks::{ - StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, - TransactionContractCall, TransactionPayload, TransactionPostConditionMode, - TransactionSpendingCondition, TransactionVersion, - }, -}; -use clarity::vm::{ - types::{serialization::SerializationError, QualifiedContractIdentifier, SequenceData}, - Value as ClarityValue, {ClarityName, ContractName}, +use blockstack_lib::burnchains::Txid; +use blockstack_lib::chainstate::stacks::{ + StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSpendingCondition, TransactionVersion, }; +use clarity::vm::types::serialization::SerializationError; +use clarity::vm::types::{QualifiedContractIdentifier, SequenceData}; +use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use hashbrown::HashMap; use libsigner::{RPCError, SignerSession, StackerDBSession}; use libstackerdb::{Error as StackerDBError, StackerDBChunkAckData, StackerDBChunkData}; use serde_json::json; use slog::{slog_debug, slog_warn}; -use stacks_common::{ - codec::StacksMessageCodec, - debug, - types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}, - warn, -}; -use wsts::{ - net::{Message, Packet}, - Point, Scalar, -}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::{debug, warn}; +use wsts::net::{Message, Packet}; +use wsts::{Point, Scalar}; use crate::config::Config; @@ -466,11 +458,9 @@ fn slot_id(id: u32, message: &Message) -> u32 { #[cfg(test)] mod tests { - use std::{ - io::{BufWriter, Read, Write}, - net::{SocketAddr, TcpListener}, - thread::spawn, - }; + use std::io::{BufWriter, Read, Write}; + use std::net::{SocketAddr, TcpListener}; + use std::thread::spawn; use super::*;