diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 19669aebd8..ff686ed831 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -139,6 +139,7 @@ jobs: - tests::neon_integrations::bad_microblock_pubkey - tests::epoch_24::fix_to_pox_contract - tests::epoch_24::verify_auto_unlock_behavior + - tests::signer::test_stackerdb_dkg - tests::stackerdb::test_stackerdb_load_store - tests::stackerdb::test_stackerdb_event_observer steps: diff --git a/Cargo.lock b/Cargo.lock index 4096da6da0..6f0d0c6217 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -450,28 +450,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.64.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 1.0.109", - "which", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -606,15 +584,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "0.1.10" @@ -667,17 +636,6 @@ dependencies = [ "inout", ] -[[package]] -name = "clang-sys" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "2.34.0" @@ -1459,12 +1417,6 @@ version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - [[package]] name = "gloo-timers" version = "0.2.6" @@ -1925,12 +1877,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.140" @@ -1957,16 +1903,6 @@ dependencies = [ "rle-decode-fast", ] -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", -] - [[package]] name = "libsigner" version = "0.0.1" @@ -2086,12 +2022,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -2204,16 +2134,6 @@ dependencies = [ "memoffset 0.6.5", ] -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2348,10 +2268,7 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256k1" version = "5.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e81c2cb5a1936d3f26278f9d698932239d03ddf0d5818392d91cd5f98ffc79" dependencies = [ - "bindgen", "bitvec", "bs58 0.4.0", "cc", @@ -2400,12 +2317,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "percent-encoding" version = "2.2.0" @@ -2968,12 +2879,6 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc-hex" version = "2.1.0" @@ -3378,12 +3283,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - [[package]] name = "simple-mutex" version = "1.1.5" @@ -4453,17 +4352,6 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - [[package]] name = "winapi" version = "0.2.8" @@ -4686,8 +4574,6 @@ dependencies = [ [[package]] name = "wsts" version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0c0ec44cbd35be82490c8c566ad4971f7b41ffe8508f1c9938140df7fe18b2" dependencies = [ "aes-gcm 0.10.2", "bs58 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index 7a87639a02..2a04b86a6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,3 +24,4 @@ opt-level = 3 [profile.release] debug = true + diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 0d4e0c9fa9..8d84666f2d 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -14,6 +14,7 @@ use crate::util::hash::Hash160; use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; pub mod chainstate; +pub mod net; /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); diff --git a/stacks-common/src/types/net.rs b/stacks-common/src/types/net.rs new file mode 100644 index 0000000000..45b6fb43ef --- /dev/null +++ b/stacks-common/src/types/net.rs @@ -0,0 +1,382 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::str::FromStr; + +use serde::de::{Deserialize, Error as de_Error}; +use serde::ser::Serialize; + +use crate::util::hash::to_bin; + +#[derive(Debug)] +pub enum Error { + DecodeError(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::DecodeError(msg) => write!(f, "{}", &msg), + } + } +} + +impl std::error::Error for Error { + fn cause(&self) -> Option<&dyn std::error::Error> { + match self { + Error::DecodeError(_) => None, + } + } +} + +/// A container for an IPv4 or IPv6 address. +/// Rules: +/// -- If this is an IPv6 address, the octets are in network byte order +/// -- If this is an IPv4 address, the octets must encode an IPv6-to-IPv4-mapped address +pub struct PeerAddress(pub [u8; 16]); +impl_array_newtype!(PeerAddress, u8, 16); +impl_array_hexstring_fmt!(PeerAddress); +impl_byte_array_newtype!(PeerAddress, u8, 16); +impl_byte_array_message_codec!(PeerAddress, 16); + +impl Serialize for PeerAddress { + fn serialize(&self, s: S) -> Result { + let inst = format!("{}", self.to_socketaddr(0).ip()); + s.serialize_str(inst.as_str()) + } +} + +impl<'de> Deserialize<'de> for PeerAddress { + fn deserialize>(d: D) -> Result { + let inst = String::deserialize(d)?; + let ip = inst.parse::().map_err(de_Error::custom)?; + + Ok(PeerAddress::from_ip(&ip)) + } +} + +impl PeerAddress { + pub fn from_slice(bytes: &[u8]) -> Option { + if bytes.len() != 16 { + return None; + } + + let mut bytes16 = [0u8; 16]; + bytes16.copy_from_slice(&bytes[0..16]); + Some(PeerAddress(bytes16)) + } + + /// Is this an IPv4 address? + pub fn is_ipv4(&self) -> bool { + self.ipv4_octets().is_some() + } + + /// Get the octet representation of this peer address as an IPv4 address. + /// The last 4 bytes of the list contain the IPv4 address. + /// This method returns None if the bytes don't encode a valid IPv4-mapped address (i.e. ::ffff:0:0/96) + pub fn ipv4_octets(&self) -> Option<[u8; 4]> { + if self.0[0..12] + != [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, + ] + { + return None; + } + let mut ret = [0u8; 4]; + ret.copy_from_slice(&self.0[12..16]); + Some(ret) + } + + /// Return the bit representation of this peer address as an IPv4 address, in network byte + /// order. Return None if this is not an IPv4 address. + pub fn ipv4_bits(&self) -> Option { + let octets_opt = self.ipv4_octets(); + if octets_opt.is_none() { + return None; + } + + let octets = octets_opt.unwrap(); + Some( + ((octets[0] as u32) << 24) + | ((octets[1] as u32) << 16) + | ((octets[2] as u32) << 8) + | (octets[3] as u32), + ) + } + + /// Convert to SocketAddr + pub fn to_socketaddr(&self, port: u16) -> SocketAddr { + if self.is_ipv4() { + SocketAddr::new( + IpAddr::V4(Ipv4Addr::new( + self.0[12], self.0[13], self.0[14], self.0[15], + )), + port, + ) + } else { + let addr_words: [u16; 8] = [ + ((self.0[0] as u16) << 8) | (self.0[1] as u16), + ((self.0[2] as u16) << 8) | (self.0[3] as u16), + ((self.0[4] as u16) << 8) | (self.0[5] as u16), + ((self.0[6] as u16) << 8) | (self.0[7] as u16), + ((self.0[8] as u16) << 8) | (self.0[9] as u16), + ((self.0[10] as u16) << 8) | (self.0[11] as u16), + ((self.0[12] as u16) << 8) | (self.0[13] as u16), + ((self.0[14] as u16) << 8) | (self.0[15] as u16), + ]; + + SocketAddr::new( + IpAddr::V6(Ipv6Addr::new( + addr_words[0], + addr_words[1], + addr_words[2], + addr_words[3], + addr_words[4], + addr_words[5], + addr_words[6], + addr_words[7], + )), + port, + ) + } + } + + /// Convert from socket address + pub fn from_socketaddr(addr: &SocketAddr) -> PeerAddress { + PeerAddress::from_ip(&addr.ip()) + } + + /// Convert from IP address + pub fn from_ip(addr: &IpAddr) -> PeerAddress { + match addr { + IpAddr::V4(ref addr) => { + let octets = addr.octets(); + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, + octets[0], octets[1], octets[2], octets[3], + ]) + } + IpAddr::V6(ref addr) => { + let words = addr.segments(); + PeerAddress([ + (words[0] >> 8) as u8, + (words[0] & 0xff) as u8, + (words[1] >> 8) as u8, + (words[1] & 0xff) as u8, + (words[2] >> 8) as u8, + (words[2] & 0xff) as u8, + (words[3] >> 8) as u8, + (words[3] & 0xff) as u8, + (words[4] >> 8) as u8, + (words[4] & 0xff) as u8, + (words[5] >> 8) as u8, + (words[5] & 0xff) as u8, + (words[6] >> 8) as u8, + (words[6] & 0xff) as u8, + (words[7] >> 8) as u8, + (words[7] & 0xff) as u8, + ]) + } + } + } + + /// Convert from ipv4 octets + pub fn from_ipv4(o1: u8, o2: u8, o3: u8, o4: u8) -> PeerAddress { + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, o1, o2, o3, o4, + ]) + } + + /// Is this the any-network address? i.e. 0.0.0.0 (v4) or :: (v6)? + pub fn is_anynet(&self) -> bool { + self.0 == [0x00; 16] || self == &PeerAddress::from_ipv4(0, 0, 0, 0) + } + + /// Is this a private IP address? + pub fn is_in_private_range(&self) -> bool { + if self.is_ipv4() { + // 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16 + self.0[12] == 10 + || (self.0[12] == 172 && self.0[13] >= 16 && self.0[13] <= 31) + || (self.0[12] == 192 && self.0[13] == 168) + } else { + self.0[0] >= 0xfc + } + } + + pub fn to_bin(&self) -> String { + to_bin(&self.0) + } +} + +/// Peer address variants for the Host: header +#[derive(Clone, PartialEq)] +pub enum PeerHost { + DNS(String, u16), + IP(PeerAddress, u16), +} + +impl fmt::Display for PeerHost { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + PeerHost::DNS(ref s, ref p) => write!(f, "{}:{}", s, p), + PeerHost::IP(ref a, ref p) => write!(f, "{}", a.to_socketaddr(*p)), + } + } +} + +impl fmt::Debug for PeerHost { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + PeerHost::DNS(ref s, ref p) => write!(f, "PeerHost::DNS({},{})", s, p), + PeerHost::IP(ref a, ref p) => write!(f, "PeerHost::IP({:?},{})", a, p), + } + } +} + +impl Hash for PeerHost { + fn hash(&self, state: &mut H) { + match *self { + PeerHost::DNS(ref name, ref port) => { + "DNS".hash(state); + name.hash(state); + port.hash(state); + } + PeerHost::IP(ref addrbytes, ref port) => { + "IP".hash(state); + addrbytes.hash(state); + port.hash(state); + } + } + } +} + +impl FromStr for PeerHost { + type Err = Error; + + fn from_str(header: &str) -> Result { + // we're looser than the RFC allows for DNS names -- anything that doesn't parse to an IP + // address will be parsed to a DNS name. + // try as IP:port + match header.parse::() { + Ok(socketaddr) => Ok(PeerHost::IP( + PeerAddress::from_socketaddr(&socketaddr), + socketaddr.port(), + )), + Err(_) => { + // maybe missing :port + let hostport = format!("{}:80", header); + match hostport.parse::() { + Ok(socketaddr) => Ok(PeerHost::IP( + PeerAddress::from_socketaddr(&socketaddr), + socketaddr.port(), + )), + Err(_) => { + // try as DNS-name:port + let host; + let port; + let parts: Vec<&str> = header.split(":").collect(); + if parts.len() == 0 { + return Err(Error::DecodeError( + "Failed to parse PeerHost: no parts".to_string(), + )); + } else if parts.len() == 1 { + // no port + host = Some(parts[0].to_string()); + port = Some(80); + } else { + let np = parts.len(); + if parts[np - 1].chars().all(char::is_numeric) { + // ends in :port + let host_str = parts[0..np - 1].join(":"); + if host_str.len() == 0 { + return Err(Error::DecodeError("Empty host".to_string())); + } + host = Some(host_str); + + let port_res = parts[np - 1].parse::(); + port = match port_res { + Ok(p) => Some(p), + Err(_) => { + return Err(Error::DecodeError( + "Failed to parse PeerHost: invalid port".to_string(), + )); + } + }; + } else { + // only host + host = Some(header.to_string()); + port = Some(80); + } + } + + match (host, port) { + (Some(h), Some(p)) => Ok(PeerHost::DNS(h, p)), + (_, _) => Err(Error::DecodeError( + "Failed to parse PeerHost: failed to extract host and/or port" + .to_string(), + )), // I don't think this is reachable + } + } + } + } + } + } +} + +impl PeerHost { + pub fn hostname(&self) -> String { + match *self { + PeerHost::DNS(ref s, _) => s.clone(), + PeerHost::IP(ref a, ref p) => format!("{}", a.to_socketaddr(*p).ip()), + } + } + + pub fn port(&self) -> u16 { + match *self { + PeerHost::DNS(_, ref p) => *p, + PeerHost::IP(_, ref p) => *p, + } + } + + pub fn from_host_port(host: String, port: u16) -> PeerHost { + // try as IP, and fall back to DNS + match host.parse::() { + Ok(addr) => PeerHost::IP(PeerAddress::from_ip(&addr), port), + Err(_) => PeerHost::DNS(host, port), + } + } + + pub fn from_socketaddr(socketaddr: &SocketAddr) -> PeerHost { + PeerHost::IP(PeerAddress::from_socketaddr(socketaddr), socketaddr.port()) + } + + pub fn to_host_port(&self) -> (String, u16) { + match *self { + PeerHost::DNS(ref s, ref p) => (s.clone(), *p), + PeerHost::IP(ref i, ref p) => (format!("{}", i.to_socketaddr(0).ip()), *p), + } + } +} + +impl From for PeerHost { + fn from(addr: SocketAddr) -> PeerHost { + PeerHost::from_socketaddr(&addr) + } +} diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index 17afb25f05..bb1b869eee 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -20,6 +20,8 @@ use std::{error, fmt, io}; use crate::codec::MAX_MESSAGE_LEN; use crate::deps_common::httparse; +/// NOTE: it is imperative that the given Read and Write impls here _never_ fail with EWOULDBLOCK. + #[derive(Debug)] pub enum ChunkedError { DeserializeError(String), @@ -336,6 +338,10 @@ impl HttpChunkedTransferWriterState { corked: false, } } + + pub fn get_chunk_size(&self) -> usize { + self.chunk_size + } } pub struct HttpChunkedTransferWriter<'a, 'state, W: Write> { diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 5a8d68368a..97cbc4104f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -63,8 +63,8 @@ pub enum HexError { impl fmt::Display for HexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - HexError::BadLength(n) => write!(f, "bad length {} for sha256d hex string", n), - HexError::BadCharacter(c) => write!(f, "bad character {} in sha256d hex string", c), + HexError::BadLength(n) => write!(f, "bad length {} for hex string", n), + HexError::BadCharacter(c) => write!(f, "bad character {} for hex string", c), } } } @@ -75,8 +75,8 @@ impl error::Error for HexError { } fn description(&self) -> &str { match *self { - HexError::BadLength(_) => "sha256d hex string non-64 length", - HexError::BadCharacter(_) => "sha256d bad hex character", + HexError::BadLength(_) => "hex string non-64 length", + HexError::BadCharacter(_) => "bad hex character", } } } diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index c07ad4dbe1..d850826fd4 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -220,6 +220,11 @@ impl PipeWrite { Ok(buf.len()) } + /// How many bytes are pending? + pub fn pending(&self) -> usize { + self.buf.as_ref().map(|b| b.len()).unwrap_or(0) + } + /// Try and flush all data to the reader. /// Return True if we succeeded; False if not. pub fn try_flush(&mut self) -> io::Result { diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index c21c3d78cc..ab0e6649a3 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -2,10 +2,12 @@ use std::io::{self, Read}; use std::net::SocketAddr; use std::path::PathBuf; -use crate::config::Network; use clap::Parser; use clarity::vm::types::QualifiedContractIdentifier; -use stacks_common::{address::b58, types::chainstate::StacksPrivateKey}; +use stacks_common::address::b58; +use stacks_common::types::chainstate::StacksPrivateKey; + +use crate::config::Network; extern crate alloc; diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index f198cf4044..d634dd0cdd 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -14,27 +14,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::convert::TryFrom; +use std::fs; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::path::PathBuf; +use std::time::Duration; + use blockstack_lib::chainstate::stacks::TransactionVersion; use clarity::vm::types::QualifiedContractIdentifier; use hashbrown::HashMap; use p256k1::ecdsa; use p256k1::scalar::Scalar; use serde::Deserialize; -use stacks_common::{ - address::{ - AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - }, - consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}, - types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}, -}; -use std::{ - convert::TryFrom, - fs, - net::{SocketAddr, ToSocketAddrs}, - path::PathBuf, - time::Duration, +use stacks_common::address::{ + AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use wsts::state_machine::PublicKeys; /// List of key_ids for each signer_id diff --git a/stacks-signer/src/stacks_client.rs b/stacks-signer/src/stacks_client.rs index 700858cfb4..0621df4b09 100644 --- a/stacks-signer/src/stacks_client.rs +++ b/stacks-signer/src/stacks_client.rs @@ -1,33 +1,25 @@ use std::time::Duration; use bincode::Error as BincodeError; -use blockstack_lib::{ - burnchains::Txid, - chainstate::stacks::{ - StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, - TransactionContractCall, TransactionPayload, TransactionPostConditionMode, - TransactionSpendingCondition, TransactionVersion, - }, -}; -use clarity::vm::{ - types::{serialization::SerializationError, QualifiedContractIdentifier, SequenceData}, - Value as ClarityValue, {ClarityName, ContractName}, +use blockstack_lib::burnchains::Txid; +use blockstack_lib::chainstate::stacks::{ + StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSpendingCondition, TransactionVersion, }; +use clarity::vm::types::serialization::SerializationError; +use clarity::vm::types::{QualifiedContractIdentifier, SequenceData}; +use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use hashbrown::HashMap; use libsigner::{RPCError, SignerSession, StackerDBSession}; use libstackerdb::{Error as StackerDBError, StackerDBChunkAckData, StackerDBChunkData}; use serde_json::json; use slog::{slog_debug, slog_warn}; -use stacks_common::{ - codec::StacksMessageCodec, - debug, - types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}, - warn, -}; -use wsts::{ - net::{Message, Packet}, - Point, Scalar, -}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::{debug, warn}; +use wsts::net::{Message, Packet}; +use wsts::{Point, Scalar}; use crate::config::Config; @@ -466,11 +458,9 @@ fn slot_id(id: u32, message: &Message) -> u32 { #[cfg(test)] mod tests { - use std::{ - io::{BufWriter, Read, Write}, - net::{SocketAddr, TcpListener}, - thread::spawn, - }; + use std::io::{BufWriter, Read, Write}; + use std::net::{SocketAddr, TcpListener}; + use std::thread::spawn; use super::*; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 999d145828..4c63a28968 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5569,6 +5569,7 @@ impl<'a> SortitionHandleTx<'a> { let winner = hash_tied .first() .expect("FATAL: zero-length list of tied block IDs"); + let winner_index = *mapping .get(&winner) .expect("FATAL: winning block ID not mapped"); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index e362cbbd85..25dcdc9f33 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -23,7 +23,6 @@ use std::{cmp, fmt, fs, io}; pub use clarity::vm::analysis::errors::{CheckError, CheckErrors}; use clarity::vm::analysis::run_analysis; -use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::AssetMap; use clarity::vm::contracts::Contract; @@ -68,8 +67,7 @@ use crate::core::*; use crate::cost_estimates::EstimatorError; use crate::monitoring::{set_last_block_transaction_count, set_last_execution_cost_observed}; use crate::net::relay::Relayer; -use crate::net::stream::{BlockStreamData, HeaderStreamData, MicroblockStreamData, Streamer}; -use crate::net::{BlocksInvData, Error as net_error, ExtendedStacksHeader}; +use crate::net::{BlocksInvData, Error as net_error}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{ query_count, query_int, query_row, query_row_columns, query_row_panic, query_rows, @@ -1158,6 +1156,15 @@ impl StacksChainState { ) -> Result, Error> { let parent_index_hash = StacksBlockHeader::make_index_block_hash(parent_consensus_hash, parent_block_hash); + Self::load_staging_microblock_indexed(blocks_conn, &parent_index_hash, microblock_hash) + } + + /// Load up a preprocessed microblock given the index block hash of the anchored parent + pub fn load_staging_microblock_indexed( + blocks_conn: &DBConn, + parent_index_hash: &StacksBlockId, + microblock_hash: &BlockHeaderHash, + ) -> Result, Error> { match StacksChainState::load_staging_microblock_info( blocks_conn, &parent_index_hash, @@ -2905,6 +2912,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. + #[cfg(test)] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2936,458 +2944,29 @@ impl StacksChainState { Ok(microblock_info) } - /// Write header data to the fd - fn write_stream_data( - fd: &mut W, - stream: &mut S, - input: &mut R, - count: u64, - ) -> Result { - let mut buf = vec![0u8; count as usize]; - let nr = input.read(&mut buf).map_err(Error::ReadError)?; - fd.write_all(&buf[0..nr]).map_err(Error::WriteError)?; - - stream.add_bytes(nr as u64); - - Ok(nr as u64) - } - - /// Stream header data from one Read to one Write - fn stream_data( - fd: &mut W, - stream: &mut S, - input: &mut R, - count: u64, - ) -> Result { - input - .seek(SeekFrom::Start(stream.offset())) - .map_err(Error::ReadError)?; - - StacksChainState::write_stream_data(fd, stream, input, count) - } - - /// Stream a single header's data from disk - /// If this method returns 0, it's because we're EOF on the header and should begin the next. - /// - /// The data streamed to `fd` is meant to be part of a JSON array. The header data will be - /// encoded as JSON, and a `,` will be written after it if there are more headers to follow. - /// The caller is responsible for writing `[` before writing headers, and writing `]` after all - /// headers have been written. - /// - /// Returns the number of bytes written - pub fn stream_one_header( - blocks_conn: &DBConn, - block_path: &str, - fd: &mut W, - stream: &mut HeaderStreamData, - count: u64, - ) -> Result { - if stream.header_bytes.is_none() && stream.num_headers > 0 { - let header = - StacksChainState::load_block_header_indexed(block_path, &stream.index_block_hash)? - .ok_or(Error::NoSuchBlockError)?; - - let header_info = - StacksChainState::load_staging_block_info(blocks_conn, &stream.index_block_hash)? - .ok_or(Error::NoSuchBlockError)?; - - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &header_info.parent_consensus_hash, - &header_info.parent_anchored_block_hash, - ); - - let mut header_bytes = vec![]; - let extended_header = ExtendedStacksHeader { - consensus_hash: header_info.consensus_hash, - header: header, - parent_block_id: parent_index_block_hash, - }; - - serde_json::to_writer(&mut header_bytes, &extended_header).map_err(|e| { - Error::NetError(net_error::SerializeError(format!( - "Failed to send as JSON: {:?}", - &e - ))) - })?; - - if stream.num_headers > 1 { - header_bytes.push(',' as u8); - } - - test_debug!( - "header_bytes: {}", - String::from_utf8(header_bytes.clone()).unwrap() - ); - - stream.header_bytes = Some(header_bytes); - stream.offset = 0; - } - - if stream.header_bytes.is_some() { - let header_bytes = stream - .header_bytes - .take() - .expect("Do not have header bytes and did not set them"); - let res = (|| { - if stream.offset >= (header_bytes.len() as u64) { - // EOF - return Ok(0); - } - - let num_bytes = StacksChainState::write_stream_data( - fd, - stream, - &mut &header_bytes[(stream.offset as usize)..], - count, - )?; - test_debug!( - "Stream header hash={} offset={} total_bytes={}, num_bytes={} num_headers={}", - &stream.index_block_hash, - stream.offset, - stream.total_bytes, - num_bytes, - stream.num_headers - ); - Ok(num_bytes) - })(); - stream.header_bytes = Some(header_bytes); - res - } else { - Ok(0) - } - } - - /// Stream multiple headers from disk, moving in reverse order from the chain tip back. - /// The format will be a JSON array. - /// Returns total number of bytes written (will be equal to the number of bytes read). - /// Returns 0 if we run out of headers - pub fn stream_headers( - &self, - fd: &mut W, - stream: &mut HeaderStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = match StacksChainState::stream_one_header( - &self.db(), - &self.blocks_path, - fd, - stream, - to_write, - ) { - Ok(nw) => nw, - Err(Error::DBError(db_error::NotFoundError)) => { - // out of headers - debug!( - "No more header to stream after {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - Err(e) => { - return Err(e); - } - }; - - if nw == 0 { - if stream.num_headers == 0 { - // out of headers - debug!( - "No more header to stream after {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - - // EOF on header; move to the next one (its parent) - let header_info = match StacksChainState::load_staging_block_info( - &self.db(), - &stream.index_block_hash, - )? { - Some(x) => x, - None => { - // out of headers - debug!( - "Out of headers to stream after block {}", - &stream.index_block_hash - ); - stream.header_bytes = None; - stream.end_of_stream = true; - break; - } - }; - - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &header_info.parent_consensus_hash, - &header_info.parent_anchored_block_hash, - ); - - stream.index_block_hash = parent_index_block_hash; - stream.num_headers = stream - .num_headers - .checked_sub(1) - .expect("BUG: streamed more headers than called for"); + /// Read one header for the purposes of streaming. + pub fn read_extended_header( + db: &DBConn, + blocks_path: &str, + index_block_hash: &StacksBlockId, + ) -> Result { + let header = StacksChainState::load_block_header_indexed(blocks_path, index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; - stream.header_bytes = None; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } + let header_info = StacksChainState::load_staging_block_info(db, index_block_hash)? + .ok_or(Error::NoSuchBlockError)?; - debug!( - "Streaming header={}: to_write={}, nw={}", - &stream.index_block_hash, to_write, nw - ); - } - debug!( - "Streamed headers ({} remaining): {} - {} = {}", - stream.num_headers, - count, - to_write, - count - to_write + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &header_info.parent_consensus_hash, + &header_info.parent_anchored_block_hash, ); - Ok(count - to_write) - } - /// Stream a single microblock's data from the staging database. - /// If this method returns 0, it's because we're EOF on the blob. - pub fn stream_one_microblock( - blocks_conn: &DBConn, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let rowid = match stream.rowid { - None => { - // need to get rowid in order to get the blob - match StacksChainState::stream_microblock_get_rowid( - blocks_conn, - &stream.parent_index_block_hash, - &stream.microblock_hash, - )? { - Some(rid) => rid, - None => { - test_debug!("Microblock hash={:?} not in DB", &stream.microblock_hash,); - return Err(Error::NoSuchBlockError); - } - } - } - Some(rid) => rid, + let extended_header = ExtendedStacksHeader { + consensus_hash: header_info.consensus_hash, + header: header, + parent_block_id: parent_index_block_hash, }; - - stream.rowid = Some(rowid); - let mut blob = blocks_conn - .blob_open( - DatabaseName::Main, - "staging_microblocks_data", - "block_data", - rowid, - true, - ) - .map_err(|e| { - match e { - sqlite_error::SqliteFailure(_, _) => { - // blob got moved out of staging - Error::NoSuchBlockError - } - _ => Error::DBError(db_error::SqliteError(e)), - } - })?; - - let num_bytes = StacksChainState::stream_data(fd, stream, &mut blob, count)?; - test_debug!( - "Stream microblock rowid={} hash={} offset={} total_bytes={}, num_bytes={}", - rowid, - &stream.microblock_hash, - stream.offset, - stream.total_bytes, - num_bytes - ); - Ok(num_bytes) - } - - /// Stream multiple microblocks from staging, moving in reverse order from the stream tail to the stream head. - /// Returns total number of bytes written (will be equal to the number of bytes read). - /// Returns 0 if we run out of microblocks in the staging db - pub fn stream_microblocks_confirmed( - chainstate: &StacksChainState, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = - StacksChainState::stream_one_microblock(&chainstate.db(), fd, stream, to_write)?; - if nw == 0 { - // EOF on microblock blob; move to the next one (its parent) - let mblock_info = match StacksChainState::load_staging_microblock_info( - &chainstate.db(), - &stream.parent_index_block_hash, - &stream.microblock_hash, - )? { - Some(x) => x, - None => { - // out of mblocks - debug!( - "Out of microblocks to stream after confirmed microblock {}", - &stream.microblock_hash - ); - break; - } - }; - - let rowid = match StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), - &stream.parent_index_block_hash, - &mblock_info.parent_hash, - )? { - Some(rid) => rid, - None => { - // out of mblocks - debug!( - "No rowid found for confirmed stream microblock {}", - &mblock_info.parent_hash - ); - break; - } - }; - - stream.offset = 0; - stream.rowid = Some(rowid); - stream.microblock_hash = mblock_info.parent_hash; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } - debug!( - "Streaming microblock={}: to_write={}, nw={}", - &stream.microblock_hash, to_write, nw - ); - } - debug!( - "Streamed confirmed microblocks: {} - {} = {}", - count, - to_write, - count - to_write - ); - Ok(count - to_write) - } - - /// Stream block data from the chunk store. - pub fn stream_data_from_chunk_store( - blocks_path: &str, - fd: &mut W, - stream: &mut BlockStreamData, - count: u64, - ) -> Result { - let block_path = - StacksChainState::get_index_block_path(blocks_path, &stream.index_block_hash)?; - - // The reason we open a file on each call to stream data is because we don't want to - // exhaust the supply of file descriptors. Maybe a future version of this code will do - // something like cache the set of open files so we don't have to keep re-opening them. - let mut file_fd = fs::OpenOptions::new() - .read(true) - .write(false) - .create(false) - .truncate(false) - .open(&block_path) - .map_err(|e| { - if e.kind() == io::ErrorKind::NotFound { - error!("File not found: {:?}", &block_path); - Error::NoSuchBlockError - } else { - Error::ReadError(e) - } - })?; - - StacksChainState::stream_data(fd, stream, &mut file_fd, count) - } - - /// Stream block data from the chain state. - /// Returns the number of bytes written, and updates `stream` to point to the next point to - /// read. Writes the bytes streamed to `fd`. - pub fn stream_block( - &mut self, - fd: &mut W, - stream: &mut BlockStreamData, - count: u64, - ) -> Result { - StacksChainState::stream_data_from_chunk_store(&self.blocks_path, fd, stream, count) - } - - /// Stream unconfirmed microblocks from the staging DB. Pull only from the staging DB. - /// Returns the number of bytes written, and updates `stream` to point to the next point to - /// read. Wrties the bytes streamed to `fd`. - pub fn stream_microblocks_unconfirmed( - chainstate: &StacksChainState, - fd: &mut W, - stream: &mut MicroblockStreamData, - count: u64, - ) -> Result { - let mut to_write = count; - while to_write > 0 { - let nw = - StacksChainState::stream_one_microblock(&chainstate.db(), fd, stream, to_write)?; - if nw == 0 { - // EOF on microblock blob; move to the next one - let next_seq = match stream.seq { - u16::MAX => { - return Err(Error::NoSuchBlockError); - } - x => x + 1, - }; - let next_mblock_hash = match StacksChainState::load_next_descendant_microblock( - &chainstate.db(), - &stream.index_block_hash, - next_seq, - )? { - Some(mblock) => { - test_debug!( - "Switch to {}-{} ({})", - &stream.index_block_hash, - &mblock.block_hash(), - next_seq - ); - mblock.block_hash() - } - None => { - // EOF on stream - break; - } - }; - - let rowid = match StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), - &stream.parent_index_block_hash, - &next_mblock_hash, - )? { - Some(rid) => rid, - None => { - // out of mblocks - break; - } - }; - - stream.offset = 0; - stream.rowid = Some(rowid); - stream.microblock_hash = next_mblock_hash; - stream.seq = next_seq; - } else { - to_write = to_write - .checked_sub(nw) - .expect("BUG: wrote more data than called for"); - } - } - Ok(count - to_write) + Ok(extended_header) } /// Check whether or not there exists a Stacks block at or higher @@ -7146,7 +6725,6 @@ pub mod test { use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::UnitEstimator; use crate::net::test::*; - use crate::net::ExtendedStacksHeader; use crate::util_lib::db::{Error as db_error, *}; pub fn make_empty_coinbase_block(mblock_key: &StacksPrivateKey) -> StacksBlock { diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e96ef18ddf..5e1996f3bb 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -36,9 +36,12 @@ use clarity::vm::types::TupleData; use clarity::vm::Value; use rusqlite::types::ToSql; use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use serde::de::Error as de_Error; +use serde::Deserialize; +use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash}; use stacks_common::util; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{hex_bytes, to_hex}; use crate::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddress}; use crate::burnchains::{Address, Burnchain, BurnchainParameters, PoxConstants}; @@ -71,7 +74,7 @@ use crate::clarity_vm::database::HeadersDBConn; use crate::core::*; use crate::monitoring; use crate::net::atlas::BNS_CHARS_REGEX; -use crate::net::{Error as net_error, MemPoolSyncData}; +use crate::net::Error as net_error; use crate::util_lib::boot::{boot_code_acc, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_count, query_row, tx_begin_immediate, tx_busy_handler, DBConn, DBTx, Error as db_error, @@ -183,6 +186,57 @@ pub struct StacksEpochReceipt { pub epoch_transition: bool, } +/// Headers we serve over the network +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ExtendedStacksHeader { + pub consensus_hash: ConsensusHash, + #[serde( + serialize_with = "ExtendedStacksHeader_StacksBlockHeader_serialize", + deserialize_with = "ExtendedStacksHeader_StacksBlockHeader_deserialize" + )] + pub header: StacksBlockHeader, + pub parent_block_id: StacksBlockId, +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_serialize( + header: &StacksBlockHeader, + s: S, +) -> Result { + let bytes = header.serialize_to_vec(); + let header_hex = to_hex(&bytes); + s.serialize_str(&header_hex.as_str()) +} + +/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string +fn ExtendedStacksHeader_StacksBlockHeader_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let header_hex = String::deserialize(d)?; + let header_bytes = hex_bytes(&header_hex).map_err(de_Error::custom)?; + StacksBlockHeader::consensus_deserialize(&mut &header_bytes[..]).map_err(de_Error::custom) +} + +impl StacksMessageCodec for ExtendedStacksHeader { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.header)?; + write_next(fd, &self.parent_block_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let ch = read_next(fd)?; + let bh = read_next(fd)?; + let pbid = read_next(fd)?; + Ok(ExtendedStacksHeader { + consensus_hash: ch, + header: bh, + parent_block_id: pbid, + }) + } +} + #[derive(Debug, Clone, PartialEq)] pub struct DBConfig { pub version: String, @@ -1536,6 +1590,20 @@ impl StacksChainState { ) } + /// Re-open the chainstate DB + pub fn reopen_db(&self) -> Result { + let path = PathBuf::from(self.root_path.clone()); + let header_index_root_path = StacksChainState::header_index_root_path(path); + let header_index_root = header_index_root_path + .to_str() + .ok_or_else(|| Error::DBError(db_error::ParseError))? + .to_string(); + + let state_index = + StacksChainState::open_db(self.mainnet, self.chain_id, &header_index_root)?; + Ok(state_index.into_sqlite_conn()) + } + pub fn blocks_path(mut path: PathBuf) -> PathBuf { path.push("blocks"); path diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 1f01d19f3b..6cec632373 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1537,4 +1537,9 @@ impl MARF { pub fn get_root_hash_at(&mut self, block_hash: &T) -> Result { self.storage.connection().get_root_hash_at(block_hash) } + + /// Convert to the inner sqlite connection + pub fn into_sqlite_conn(self) -> Connection { + self.storage.into_sqlite_conn() + } } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index c686e24043..c39976419e 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -863,7 +863,7 @@ impl TrieRAM { for j in 0..node_data.len() { let next_node = &mut self.data[node_data[j] as usize].0; if !next_node.is_leaf() { - let mut ptrs = next_node.ptrs_mut(); + let ptrs = next_node.ptrs_mut(); let num_children = ptrs.len(); for k in 0..num_children { if ptrs[k].id != TrieNodeID::Empty as u8 && !is_backptr(ptrs[k].id) { @@ -1369,6 +1369,10 @@ impl TrieFileStorage { tx_begin_immediate(&mut self.db) } + pub fn into_sqlite_conn(self) -> Connection { + self.db + } + fn open_opts( db_path: &str, readonly: bool, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index c8e5aba782..f7f1243d9c 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1334,4 +1334,46 @@ pub mod test { txs: txs_anchored, } } + + pub fn make_codec_test_microblock(num_txs: usize) -> StacksMicroblock { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + ); + let all_txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::OffChainOnly, + &TransactionPostConditionMode::Allow, + ); + + let txs_mblock: Vec<_> = all_txs.into_iter().take(num_txs).collect(); + let txid_vecs = txs_mblock + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + + let mut header = StacksMicroblockHeader { + version: 6, + sequence: 1, + prev_block: BlockHeaderHash([0x11; 32]), + tx_merkle_root, + signature: MessageSignature::empty(), + }; + + header.sign(&privk).unwrap(); + StacksMicroblock { + header: header, + txs: txs_mblock, + } + } } diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 78f7b3cea3..0be3d75f6b 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -246,7 +246,7 @@ impl TestMinerTrace { for miner_id in p.miner_node_map.keys() { if let Some(test_name) = p.miner_node_map.get(miner_id) { if !all_test_names.contains(test_name) { - all_test_names.insert(test_name.clone()); + all_test_names.insert(test_name.to_owned()); } } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 780e8b70fd..b853401fd9 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -16,12 +16,12 @@ use std::cmp::{self, Ordering}; use std::collections::{HashMap, HashSet, VecDeque}; -use std::fs; use std::hash::Hasher; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::time::Instant; +use std::{fs, io}; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; @@ -32,9 +32,12 @@ use rusqlite::{ NO_PARAMS, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 -use stacks_common::codec::{Error as codec_error, StacksMessageCodec}; +use stacks_common::codec::{ + read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, +}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::Txid; @@ -55,8 +58,7 @@ use crate::core::{ use crate::cost_estimates::metrics::{CostMetric, UnitMetric}; use crate::cost_estimates::{CostEstimator, EstimatorError, UnitEstimator}; use crate::monitoring::increment_stx_mempool_gc; -use crate::net::stream::TxStreamData; -use crate::net::MemPoolSyncData; +use crate::net::Error as net_error; use crate::util_lib::bloom::{BloomCounter, BloomFilter, BloomNodeHasher}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_rows, sql_pragma, sqlite_open, table_exists, @@ -125,6 +127,167 @@ impl StacksMessageCodec for TxTag { } } +define_u8_enum!(MemPoolSyncDataID { + BloomFilter = 0x01, + TxTags = 0x02 +}); + +#[derive(Debug, Clone, PartialEq)] +pub enum MemPoolSyncData { + BloomFilter(BloomFilter), + TxTags([u8; 32], Vec), +} + +impl StacksMessageCodec for MemPoolSyncData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + match *self { + MemPoolSyncData::BloomFilter(ref bloom_filter) => { + write_next(fd, &MemPoolSyncDataID::BloomFilter.to_u8())?; + write_next(fd, bloom_filter)?; + } + MemPoolSyncData::TxTags(ref seed, ref tags) => { + write_next(fd, &MemPoolSyncDataID::TxTags.to_u8())?; + write_next(fd, seed)?; + write_next(fd, tags)?; + } + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let data_id: u8 = read_next(fd)?; + match MemPoolSyncDataID::from_u8(data_id).ok_or(codec_error::DeserializeError(format!( + "Unrecognized MemPoolSyncDataID {}", + &data_id + )))? { + MemPoolSyncDataID::BloomFilter => { + let bloom_filter: BloomFilter = read_next(fd)?; + Ok(MemPoolSyncData::BloomFilter(bloom_filter)) + } + MemPoolSyncDataID::TxTags => { + let seed: [u8; 32] = read_next(fd)?; + let txtags: Vec = read_next(fd)?; + Ok(MemPoolSyncData::TxTags(seed, txtags)) + } + } + } +} + +/// Read the trailing page ID from a transaction stream +fn parse_mempool_query_page_id( + pos: usize, + retry_reader: &mut RetryReader<'_, R>, +) -> Result, net_error> { + // possibly end-of-transactions, in which case, the last 32 bytes should be + // a page ID. Expect end-of-stream after this. + retry_reader.set_position(pos); + let next_page: Txid = match read_next(retry_reader) { + Ok(txid) => txid, + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if pos == retry_reader.position() { + // this is fine -- the node didn't get another page + return Ok(None); + } else { + // partial data -- corrupt stream + test_debug!("Unexpected EOF: {} != {}", pos, retry_reader.position()); + return Err(e.into()); + } + } + _ => { + return Err(e.into()); + } + }, + e => { + return Err(e.into()); + } + }, + }; + + test_debug!("Read page_id {:?}", &next_page); + Ok(Some(next_page)) +} + +/// Decode a transaction stream, returned from /v2/mempool/query. +/// The wire format is a list of transactions (no SIP-003 length prefix), followed by an +/// optional 32-byte page ID. Obtain both the transactions and page ID, if it exists. +pub fn decode_tx_stream( + fd: &mut R, +) -> Result<(Vec, Option), net_error> { + // The wire format is `tx, tx, tx, tx, .., tx, txid`. + // The last 32 bytes are the page ID for the next mempool query. + // NOTE: there will be no length prefix on this. + let mut txs: Vec = vec![]; + let mut bound_reader = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + let mut retry_reader = RetryReader::new(&mut bound_reader); + let mut page_id = None; + let mut expect_eof = false; + + loop { + let pos = retry_reader.position(); + let next_msg: Result = read_next(&mut retry_reader); + match next_msg { + Ok(tx) => { + if expect_eof { + // this should have failed + test_debug!("Expected EOF; got transaction {}", tx.txid()); + return Err(net_error::ExpectedEndOfStream); + } + + test_debug!("Read transaction {}", tx.txid()); + txs.push(tx); + Ok(()) + } + Err(e) => match e { + codec_error::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + if expect_eof { + if pos != retry_reader.position() { + // read partial data. The stream is corrupt. + test_debug!( + "Expected EOF; stream advanced from {} to {}", + pos, + retry_reader.position() + ); + return Err(net_error::ExpectedEndOfStream); + } + } else { + // couldn't read a full transaction. This is possibly a page ID, whose + // 32 bytes decode to the prefix of a well-formed transaction. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = parse_mempool_query_page_id(pos, &mut retry_reader)?; + } + break; + } + _ => Err(e), + }, + codec_error::DeserializeError(_msg) => { + if expect_eof { + // this should have failed due to EOF + test_debug!("Expected EOF; got DeserializeError '{}'", &_msg); + return Err(net_error::ExpectedEndOfStream); + } + + // failed to parse a transaction. This is possibly a page ID. + test_debug!("Try to read page ID trailer after ReadError"); + page_id = parse_mempool_query_page_id(pos, &mut retry_reader)?; + + // do one more pass to make sure we're actually end-of-stream. + // otherwise, the stream itself was corrupt, since any 32 bytes is a valid + // txid and the presence of more bytes means that we simply got a bad tx + // that we couldn't decode. + expect_eof = true; + Ok(()) + } + _ => Err(e), + }, + }?; + } + + Ok((txs, page_id)) +} + pub struct MemPoolAdmitter { cur_block: BlockHeaderHash, cur_consensus_hash: ConsensusHash, @@ -1187,6 +1350,21 @@ impl MemPoolDB { }) } + pub fn reopen(&self, readwrite: bool) -> Result { + if let Err(e) = fs::metadata(&self.path) { + return Err(db_error::IOError(e)); + } + + let open_flags = if readwrite { + OpenFlags::SQLITE_OPEN_READ_WRITE + } else { + OpenFlags::SQLITE_OPEN_READ_ONLY + }; + + let conn = sqlite_open(&self.path, open_flags, true)?; + Ok(conn) + } + /// Open the mempool db within the chainstate directory. /// The chainstate must be instantiated already. pub fn open( @@ -2420,6 +2598,24 @@ impl MemPoolDB { query_row(&self.conn(), sql, args) } + pub fn find_next_missing_transactions( + &self, + data: &MemPoolSyncData, + height: u64, + last_randomized_txid: &Txid, + max_txs: u64, + max_run: u64, + ) -> Result<(Vec, Option, u64), db_error> { + Self::static_find_next_missing_transactions( + self.conn(), + data, + height, + last_randomized_txid, + max_txs, + max_run, + ) + } + /// Get the next batch of transactions from our mempool that are *not* represented in the given /// MemPoolSyncData. Transactions are ordered lexicographically by randomized_txids.hashed_txid, since this allows us /// to use the txid as a cursor while ensuring that each node returns txids in a deterministic random order @@ -2427,8 +2623,8 @@ impl MemPoolDB { /// a requesting node will still have a good chance of getting something useful). /// Also, return the next value to pass for `last_randomized_txid` to load the next page. /// Also, return the number of rows considered. - pub fn find_next_missing_transactions( - &self, + pub fn static_find_next_missing_transactions( + conn: &DBConn, data: &MemPoolSyncData, height: u64, last_randomized_txid: &Txid, @@ -2458,7 +2654,7 @@ impl MemPoolDB { } } - let mut stmt = self.conn().prepare(sql)?; + let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; let mut num_rows_visited = 0; let mut next_page = None; @@ -2503,130 +2699,4 @@ impl MemPoolDB { Ok((ret, next_page, num_rows_visited)) } - - /// Stream transaction data. - /// Send back one transaction at a time. - pub fn stream_txs( - &self, - fd: &mut W, - query: &mut TxStreamData, - count: u64, - ) -> Result { - let mut num_written = 0; - while num_written < count { - // write out bufferred tx - let start = query.tx_buf_ptr; - let end = cmp::min(query.tx_buf.len(), ((start as u64) + count) as usize); - fd.write_all(&query.tx_buf[start..end]) - .map_err(ChainstateError::WriteError)?; - - let nw = end.saturating_sub(start) as u64; - - query.tx_buf_ptr = end; - num_written += nw; - - if query.tx_buf_ptr >= query.tx_buf.len() { - if query.corked { - // we're done - test_debug!( - "Finished streaming txs; last page was {:?}", - &query.last_randomized_txid - ); - break; - } - - if query.num_txs >= query.max_txs { - // no more space in this stream - debug!( - "No more space in this query after {:?}. Corking tx stream.", - &query.last_randomized_txid - ); - - // send the next page ID - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - - query - .last_randomized_txid - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - continue; - } - - // load next - let remaining = query.max_txs.saturating_sub(query.num_txs); - let (next_txs, next_last_randomized_txid_opt, num_rows_visited) = self - .find_next_missing_transactions( - &query.tx_query, - query.height, - &query.last_randomized_txid, - 1, - remaining, - )?; - - debug!( - "Streaming mempool propagation stepped"; - "rows_visited" => num_rows_visited, - "last_rand_txid" => %query.last_randomized_txid, - "num_txs" => query.num_txs, - "max_txs" => query.max_txs - ); - - query.num_txs += num_rows_visited; - if next_txs.len() > 0 { - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - - for next_tx in next_txs.iter() { - next_tx - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - } - if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { - query.last_randomized_txid = next_last_randomized_txid; - } else { - test_debug!( - "No more txs after {}", - &next_txs - .last() - .map(|tx| tx.txid()) - .unwrap_or(Txid([0u8; 32])) - ); - break; - } - } else if let Some(next_txid) = next_last_randomized_txid_opt { - test_debug!( - "No rows returned for {}; cork tx stream with next page {}", - &query.last_randomized_txid, - &next_txid - ); - - // no rows found - query.last_randomized_txid = next_txid; - - // send the next page ID - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - - query - .last_randomized_txid - .consensus_serialize(&mut query.tx_buf) - .map_err(ChainstateError::CodecError)?; - } else if next_last_randomized_txid_opt.is_none() { - // no more transactions - test_debug!( - "No more txs to send after {:?}; corking stream", - &query.last_randomized_txid - ); - - query.tx_buf_ptr = 0; - query.tx_buf.clear(); - query.corked = true; - } - } - } - Ok(num_written) - } } diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 88da0d86f9..8902ff4cb8 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -58,12 +58,11 @@ use crate::chainstate::stacks::{ C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::core::mempool::{ - db_get_all_nonces, MemPoolWalkSettings, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, - MAX_BLOOM_COUNTER_TXS, + db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, TxTag, BLOOM_COUNTER_DEPTH, + BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use crate::net::stream::StreamCursor; -use crate::net::{Error as NetError, HttpResponseType, MemPoolSyncData}; +use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; use crate::util_lib::bloom::*; use crate::util_lib::db::{tx_begin_immediate, DBConn, FromRow}; @@ -2429,352 +2428,6 @@ fn test_find_next_missing_transactions() { assert!(next_page_opt.is_none()); } -#[test] -fn test_stream_txs() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let chainstate_path = chainstate_path(function_name!()); - let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - - let addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - let mut txs = vec![]; - let block_height = 10; - let mut total_len = 0; - - let mut mempool_tx = mempool.tx_begin().unwrap(); - for i in 0..10 { - let pk = StacksPrivateKey::new(); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - total_len += tx_bytes.len(); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &ConsensusHash([0x1 + (block_height as u8); 20]), - &BlockHeaderHash([0x2 + (block_height as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - block_height as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - txs.push(tx); - } - mempool_tx.commit().unwrap(); - - let mut buf = vec![]; - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], vec![]), - MAX_BLOOM_COUNTER_TXS.into(), - block_height, - Some(Txid([0u8; 32])), - ); - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - eprintln!("Read {} bytes of tx data", buf.len()); - - // buf decodes to the list of txs we have - let mut decoded_txs = vec![]; - let mut ptr = &buf[..]; - loop { - let tx: StacksTransaction = match read_next::(&mut ptr) { - Ok(tx) => tx, - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - eprintln!("out of transactions"); - break; - } - _ => { - panic!("IO error: {:?}", &e); - } - }, - _ => { - panic!("other error: {:?}", &e); - } - }, - }; - decoded_txs.push(tx); - } - - let mut tx_set = HashSet::new(); - for tx in txs.iter() { - tx_set.insert(tx.txid()); - } - - // the order won't be preserved - assert_eq!(tx_set.len(), decoded_txs.len()); - for tx in decoded_txs { - assert!(tx_set.contains(&tx.txid())); - } - - // verify that we can stream through pagination, with an empty tx tags - let mut page_id = Txid([0u8; 32]); - let mut decoded_txs = vec![]; - loop { - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], vec![]), - 1, - block_height, - Some(page_id), - ); - - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - let mut buf = vec![]; - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - // buf decodes to the list of txs we have, plus page ids - let mut ptr = &buf[..]; - test_debug!("Decode {}", to_hex(ptr)); - let (mut next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); - - decoded_txs.append(&mut next_txs); - - // for fun, use a page ID that is actually a well-formed prefix of a transaction - if let Some(ref tx) = decoded_txs.last() { - let mut evil_buf = tx.serialize_to_vec(); - let mut evil_page_id = [0u8; 32]; - evil_page_id.copy_from_slice(&evil_buf[0..32]); - evil_buf.extend_from_slice(&evil_page_id); - - test_debug!("Decode evil buf {}", &to_hex(&evil_buf)); - - let (evil_next_txs, evil_next_page) = - HttpResponseType::decode_tx_stream(&mut &evil_buf[..], None).unwrap(); - - // should still work - assert_eq!(evil_next_txs.len(), 1); - assert_eq!(evil_next_txs[0].txid(), tx.txid()); - assert_eq!(evil_next_page.unwrap().0[0..32], evil_buf[0..32]); - } - - if let Some(next_page) = next_page { - page_id = next_page; - } else { - break; - } - } - - // make sure we got them all - let mut tx_set = HashSet::new(); - for tx in txs.iter() { - tx_set.insert(tx.txid()); - } - - // the order won't be preserved - assert_eq!(tx_set.len(), decoded_txs.len()); - for tx in decoded_txs { - assert!(tx_set.contains(&tx.txid())); - } - - // verify that we can stream through pagination, with a full bloom filter - let mut page_id = Txid([0u8; 32]); - let all_txs_tags: Vec<_> = txs - .iter() - .map(|tx| TxTag::from(&[0u8; 32], &tx.txid())) - .collect(); - loop { - let stream = StreamCursor::new_tx_stream( - MemPoolSyncData::TxTags([0u8; 32], all_txs_tags.clone()), - 1, - block_height, - Some(page_id), - ); - - let mut tx_stream_data = if let StreamCursor::MempoolTxs(stream_data) = stream { - stream_data - } else { - unreachable!(); - }; - - let mut buf = vec![]; - loop { - let nw = match mempool.stream_txs(&mut buf, &mut tx_stream_data, 10) { - Ok(nw) => nw, - Err(e) => { - error!("Failed to stream_to: {:?}", &e); - panic!(); - } - }; - if nw == 0 { - break; - } - } - - // buf decodes to an empty list of txs, plus page ID - let mut ptr = &buf[..]; - test_debug!("Decode {}", to_hex(ptr)); - let (next_txs, next_page) = HttpResponseType::decode_tx_stream(&mut ptr, None).unwrap(); - - assert_eq!(next_txs.len(), 0); - - if let Some(next_page) = next_page { - page_id = next_page; - } else { - break; - } - } -} - -#[test] -fn test_decode_tx_stream() { - let addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - let mut txs = vec![]; - for _i in 0..10 { - let pk = StacksPrivateKey::new(); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - txs.push(tx); - } - - // valid empty tx stream - let empty_stream = [0x11u8; 32]; - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut empty_stream.as_ref(), None).unwrap(); - assert_eq!(next_txs.len(), 0); - assert_eq!(next_page, Some(Txid([0x11; 32]))); - - // valid tx stream with a page id at the end - let mut tx_stream: Vec = vec![]; - for tx in txs.iter() { - tx.consensus_serialize(&mut tx_stream).unwrap(); - } - tx_stream.extend_from_slice(&[0x22; 32]); - - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut &tx_stream[..], None).unwrap(); - assert_eq!(next_txs, txs); - assert_eq!(next_page, Some(Txid([0x22; 32]))); - - // valid tx stream with _no_ page id at the end - let mut partial_stream: Vec = vec![]; - txs[0].consensus_serialize(&mut partial_stream).unwrap(); - let (next_txs, next_page) = - HttpResponseType::decode_tx_stream(&mut &partial_stream[..], None).unwrap(); - assert_eq!(next_txs.len(), 1); - assert_eq!(next_txs[0], txs[0]); - assert!(next_page.is_none()); - - // garbage tx stream - let garbage_stream = [0xff; 256]; - let err = HttpResponseType::decode_tx_stream(&mut garbage_stream.as_ref(), None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } - - // tx stream that is too short - let short_stream = [0x33u8; 33]; - let err = HttpResponseType::decode_tx_stream(&mut short_stream.as_ref(), None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } - - // tx stream has a tx, a page ID, and then another tx - let mut interrupted_stream = vec![]; - txs[0].consensus_serialize(&mut interrupted_stream).unwrap(); - interrupted_stream.extend_from_slice(&[0x00u8; 32]); - txs[1].consensus_serialize(&mut interrupted_stream).unwrap(); - - let err = HttpResponseType::decode_tx_stream(&mut &interrupted_stream[..], None); - match err { - Err(NetError::ExpectedEndOfStream) => {} - x => { - error!("did not fail: {:?}", &x); - panic!(); - } - } -} - #[test] fn test_drop_and_blacklist_txs_by_time() { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index e532c9b062..c70e5c2e7a 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -68,7 +68,6 @@ use blockstack_lib::cost_estimates::UnitEstimator; use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::PeerAddress; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use libstackerdb::StackerDBChunkData; @@ -79,6 +78,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, }; +use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index 0e69197816..1d3f212d24 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -28,7 +28,8 @@ use stacks_common::util::uint::{Uint256, Uint512}; use crate::burnchains::{BurnchainSigner, Txid}; use crate::core::MemPoolDB; -use crate::net::{Error as net_error, HttpRequestType}; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::Error as net_error; use crate::util_lib::db::{sqlite_open, tx_busy_handler, DBConn, Error as DatabaseError}; #[cfg(feature = "monitoring_prom")] @@ -45,17 +46,17 @@ pub fn increment_rpc_calls_counter() { } pub fn instrument_http_request_handler( - req: HttpRequestType, + req: StacksHttpRequest, handler: F, ) -> Result where - F: FnOnce(HttpRequestType) -> Result, + F: FnOnce(StacksHttpRequest) -> Result, { #[cfg(feature = "monitoring_prom")] increment_rpc_calls_counter(); #[cfg(feature = "monitoring_prom")] - let timer = prometheus::new_rpc_call_timer(req.get_path()); + let timer = prometheus::new_rpc_call_timer(req.request_path()); let res = handler(req); diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs new file mode 100644 index 0000000000..7ab2a728c3 --- /dev/null +++ b/stackslib/src/net/api/callreadonly.rs @@ -0,0 +1,373 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::analysis::CheckErrors; +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::errors::Error::Unchecked; +use clarity::vm::errors::{Error as ClarityRuntimeError, InterpreterError}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{ + PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, + BOUND_VALUE_SERIALIZATION_HEX, +}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, SymbolicExpression, Value}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPayload, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Clone, Serialize, Deserialize)] +pub struct CallReadOnlyRequestBody { + pub sender: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub sponsor: Option, + pub arguments: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CallReadOnlyResponse { + pub okay: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub cause: Option, +} + +#[derive(Clone)] +pub struct RPCCallReadOnlyRequestHandler { + maximum_call_argument_size: u32, + read_only_call_limit: ExecutionCost, + + /// Runtime fields + pub contract_identifier: Option, + pub function: Option, + pub sender: Option, + pub sponsor: Option, + pub arguments: Option>, +} + +impl RPCCallReadOnlyRequestHandler { + pub fn new(maximum_call_argument_size: u32, read_only_call_limit: ExecutionCost) -> Self { + Self { + maximum_call_argument_size, + read_only_call_limit, + contract_identifier: None, + function: None, + sender: None, + sponsor: None, + arguments: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCCallReadOnlyRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/call-read/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < self.maximum_call_argument_size) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for CallReadOnly ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let function = request::get_clarity_name(captures, "function")?; + let body: CallReadOnlyRequestBody = serde_json::from_slice(body) + .map_err(|_e| Error::DecodeError("Failed to parse JSON body".into()))?; + + let sender = PrincipalData::parse(&body.sender) + .map_err(|_e| Error::DecodeError("Failed to parse sender principal".into()))?; + + let sponsor = if let Some(sponsor) = body.sponsor { + Some( + PrincipalData::parse(&sponsor) + .map_err(|_e| Error::DecodeError("Failed to parse sponsor principal".into()))?, + ) + } else { + None + }; + + // arguments must be valid Clarity values + let arguments = body + .arguments + .into_iter() + .map(|hex| Value::try_deserialize_hex_untyped(&hex).ok()) + .collect::>>() + .ok_or_else(|| Error::DecodeError("Failed to deserialize argument value".into()))?; + + self.contract_identifier = Some(contract_identifier); + self.function = Some(function); + self.sender = Some(sender); + self.sponsor = sponsor; + self.arguments = Some(arguments); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.function = None; + self.sender = None; + self.sponsor = None; + self.arguments = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("Missing `contract_identifier`".into()))?; + let function = self + .function + .take() + .ok_or(NetError::SendError("Missing `function`".into()))?; + let sender = self + .sender + .take() + .ok_or(NetError::SendError("Missing `sender`".into()))?; + let sponsor = self.sponsor.clone(); + let arguments = self + .arguments + .take() + .ok_or(NetError::SendError("Missing `arguments`".into()))?; + + // run the read-only call + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let args: Vec<_> = arguments + .iter() + .map(|x| SymbolicExpression::atom_value(x.clone())) + .collect(); + + let mainnet = chainstate.mainnet; + let chain_id = chainstate.chain_id; + let mut cost_limit = self.read_only_call_limit.clone(); + cost_limit.write_length = 0; + cost_limit.write_count = 0; + + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + let cost_track = clarity_tx + .with_clarity_db_readonly(|clarity_db| { + LimitedCostTracker::new_mid_block( + mainnet, chain_id, cost_limit, clarity_db, epoch, + ) + }) + .map_err(|_| { + ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) + })?; + + let clarity_version = clarity_tx + .with_analysis_db_readonly(|analysis_db| { + analysis_db.get_clarity_version(&contract_identifier) + }) + .map_err(|_| { + ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( + "{}", + &contract_identifier + ))) + })?; + + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + clarity_version, + sender, + sponsor, + cost_track, + |env| { + // we want to execute any function as long as no actual writes are made as + // opposed to be limited to purely calling `define-read-only` functions, + // so use `read_only = false`. This broadens the number of functions that + // can be called, and also circumvents limitations on `define-read-only` + // functions that can not use `contrac-call?`, even when calling other + // read-only functions + env.execute_contract( + &contract_identifier, + function.as_str(), + &args, + false, + ) + }, + ) + }) + }); + + // decode the response + let data_resp = match data_resp { + Ok(Some(Ok(data))) => CallReadOnlyResponse { + okay: true, + result: Some(format!("0x{}", data.serialize_to_hex())), + cause: None, + }, + Ok(Some(Err(e))) => match e { + Unchecked(CheckErrors::CostBalanceExceeded(actual_cost, _)) + if actual_cost.write_count > 0 => + { + CallReadOnlyResponse { + okay: false, + result: None, + cause: Some("NotReadOnly".to_string()), + } + } + _ => CallReadOnlyResponse { + okay: false, + result: None, + cause: Some(e.to_string()), + }, + }, + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCCallReadOnlyRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let map_entry: CallReadOnlyResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(map_entry)?) + } +} + +impl StacksHttpRequest { + /// Make a new request to run a read-only function + pub fn new_callreadonlyfunction( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + sender: PrincipalData, + sponsor: Option, + function_name: ClarityName, + function_args: Vec, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/contracts/call-read/{}/{}/{}", + &contract_addr, &contract_name, &function_name + ), + HttpRequestContents::new().for_tip(tip_req).payload_json( + serde_json::to_value(CallReadOnlyRequestBody { + sender: sender.to_string(), + sponsor: sponsor.map(|s| s.to_string()), + arguments: function_args.into_iter().map(|v| v.to_string()).collect(), + }) + .expect("FATAL: failed to encode infallible data"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_call_readonly_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: CallReadOnlyResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs new file mode 100644 index 0000000000..684096cd1f --- /dev/null +++ b/stackslib/src/net/api/getaccount.rs @@ -0,0 +1,261 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance}; +use clarity::vm::representations::PRINCIPAL_DATA_REGEX_STRING; +use clarity::vm::types::{PrincipalData, StandardPrincipalData}; +use clarity::vm::ClarityVersion; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AccountEntryResponse { + pub balance: String, + pub locked: String, + pub unlock_height: u64, + pub nonce: u64, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub balance_proof: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub nonce_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetAccountRequestHandler { + pub account: Option, +} +impl RPCGetAccountRequestHandler { + pub fn new() -> Self { + Self { account: None } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAccountRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/accounts/(?P{})$", + *PRINCIPAL_DATA_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let account = if let Some(value) = captures.name("principal") { + PrincipalData::parse(value.into()) + .map_err(|_e| Error::DecodeError("Failed to parse `principal` field".to_string()))? + } else { + return Err(Error::DecodeError( + "Missing in request path: `principal`".into(), + )); + }; + + self.account = Some(account); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetAccountRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.account = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let account = self + .account + .take() + .ok_or(NetError::SendError("Missing `account`".into()))?; + let with_proof = contents.get_with_proof(); + + let account_opt_res = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_account_balance(&account); + let burn_block_height = + clarity_db.get_current_burnchain_block_height() as u64; + let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height(); + let (balance, balance_proof) = if with_proof { + clarity_db + .get_with_proof::(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) + } else { + clarity_db + .get::(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) + }; + + let key = ClarityDatabase::make_key_for_account_nonce(&account); + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get(&key) + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; + + let unlocked = balance.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + + let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); + let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + + AccountEntryResponse { + balance, + locked, + unlock_height, + nonce, + balance_proof, + nonce_proof, + } + }) + }) + }); + + let account = if let Ok(Some(account)) = account_opt_res { + account + } else { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Chain tip '{}' not found", &tip)), + ) + .try_into_contents() + .map_err(NetError::from); + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&account)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAccountRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let account: AccountEntryResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(account)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for an account + pub fn new_getaccount( + host: PeerHost, + principal: PrincipalData, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/accounts/{}", &principal), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_account_entry_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: AccountEntryResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getattachment.rs b/stackslib/src/net/api/getattachment.rs new file mode 100644 index 0000000000..30c5f3db5c --- /dev/null +++ b/stackslib/src/net/api/getattachment.rs @@ -0,0 +1,173 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashSet; +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Hash160; +use url::form_urlencoded; + +use crate::net::atlas::{ + AttachmentPage, GetAttachmentResponse, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, +}; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCGetAttachmentRequestHandler { + pub attachment_hash: Option, +} + +impl RPCGetAttachmentRequestHandler { + pub fn new() -> Self { + Self { + attachment_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAttachmentRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/attachments/(?P[0-9a-f]{40})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let attachment_hash_str = captures + .name("attachment_hash") + .ok_or(Error::DecodeError( + "Failed to match path to attachment_hash group".to_string(), + ))? + .as_str(); + + self.attachment_hash = Some( + Hash160::from_hex(attachment_hash_str) + .map_err(|_| Error::DecodeError("Failed to decode `attachment_hash`".into()))?, + ); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetAttachmentRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.attachment_hash = None; + } + + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let attachment_hash = self + .attachment_hash + .take() + .ok_or(NetError::SendError("Missing `attachment_hash`".into()))?; + + let attachment_res = node.with_node_state( + |network, _sortdb, _chainstate, _mempool, _rpc_args| match network + .get_atlasdb() + .find_attachment(&attachment_hash) + { + Ok(Some(attachment)) => Ok(GetAttachmentResponse { attachment }), + _ => { + let msg = format!("Unable to find attachment"); + warn!("{}", msg); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )) + } + }, + ); + let attachment = match attachment_res { + Ok(attachment) => attachment, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&attachment)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAttachmentRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pages: GetAttachmentResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pages)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for an attachment + pub fn new_getattachment(host: PeerHost, attachment_id: Hash160) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/attachments/{}", &attachment_id), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_atlas_get_attachment(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetAttachmentResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs new file mode 100644 index 0000000000..d41898a731 --- /dev/null +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -0,0 +1,265 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashSet; +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use url::form_urlencoded; + +use crate::net::atlas::{ + AttachmentPage, GetAttachmentsInvResponse, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, +}; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCGetAttachmentsInvRequestHandler { + pub index_block_hash: Option, + pub page_indexes: Option>, +} + +impl RPCGetAttachmentsInvRequestHandler { + pub fn new() -> Self { + Self { + index_block_hash: None, + page_indexes: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetAttachmentsInvRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new("^/v2/attachments/inv$").unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let query_str = if let Some(qs) = query { + qs + } else { + return Err(Error::DecodeError( + "Invalid Http request: expecting index_block_hash and pages_indexes".to_string(), + )); + }; + + let mut index_block_hash = None; + let mut page_indexes = HashSet::new(); + + // expect index_block_hash= and page_indexes= + for (key, value) in form_urlencoded::parse(query_str.as_bytes()) { + if key == "index_block_hash" { + index_block_hash = StacksBlockId::from_hex(&value).ok(); + } else if key == "pages_indexes" { + if let Ok(pages_indexes_value) = value.parse::() { + for entry in pages_indexes_value.split(",") { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); + } + } + } + } + } + + let index_block_hash = if let Some(ibh) = index_block_hash { + ibh + } else { + return Err(Error::DecodeError( + "Invalid Http request: expecting index_block_hash".to_string(), + )); + }; + + if page_indexes.is_empty() { + return Err(Error::DecodeError( + "Invalid Http request: expecting pages_indexes".to_string(), + )); + } + + let mut page_index_list: Vec = page_indexes.into_iter().collect(); + page_index_list.sort(); + + self.index_block_hash = Some(index_block_hash); + self.page_indexes = Some(page_index_list); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetAttachmentsInvRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.index_block_hash = None; + self.page_indexes = None; + } + + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let index_block_hash = self + .index_block_hash + .take() + .ok_or(NetError::SendError("Missing `index_block_hash`".into()))?; + let page_indexes = self + .page_indexes + .take() + .ok_or(NetError::SendError("Missing `page_indexes`".into()))?; + + // We are receiving a list of page indexes with a chain tip hash. + // The amount of pages_indexes is capped by MAX_ATTACHMENT_INV_PAGES_PER_REQUEST (8) + // Pages sizes are controlled by the constant ATTACHMENTS_INV_PAGE_SIZE (8), which + // means that a `GET v2/attachments/inv` request can be requesting for a 64 bit vector + // at once. + // Since clients can be asking for non-consecutive pages indexes (1, 5_000, 10_000, ...), + // we will be handling each page index separately. + // We could also add the notion of "budget" so that a client could only get a limited number + // of pages when they are spanning over many blocks. + if page_indexes.len() > MAX_ATTACHMENT_INV_PAGES_PER_REQUEST { + let msg = format!( + "Number of attachment inv pages is limited by {} per request", + MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + ); + warn!("{}", msg); + return StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + if page_indexes.is_empty() { + let msg = format!("Page indexes missing"); + warn!("{}", msg); + return StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + + let mut pages = vec![]; + + for page_index in page_indexes.iter() { + let page_res = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + match network + .get_atlasdb() + .get_attachments_available_at_page_index(*page_index, &index_block_hash) + { + Ok(inventory) => Ok(AttachmentPage { + inventory, + index: *page_index, + }), + Err(e) => { + let msg = format!("Unable to read Atlas DB - {}", e); + warn!("{}", msg); + Err(msg) + } + } + }); + + match page_res { + Ok(page) => { + pages.push(page); + } + Err(msg) => { + return StacksHttpResponse::new_error(&preamble, &HttpNotFound::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + } + } + + let content = GetAttachmentsInvResponse { + block_id: index_block_hash.clone(), + pages, + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&content)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetAttachmentsInvRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pages: GetAttachmentsInvResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pages)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for attachment inventory page + pub fn new_getattachmentsinv( + host: PeerHost, + index_block_hash: StacksBlockId, + page_indexes: HashSet, + ) -> StacksHttpRequest { + let page_list: Vec = page_indexes.into_iter().map(|i| format!("{}", i)).collect(); + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/attachments/inv".into(), + HttpRequestContents::new() + .query_arg("index_block_hash".into(), format!("{}", &index_block_hash)) + .query_arg("pages_indexes".into(), page_list[..].join(",")), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_atlas_attachments_inv_response( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetAttachmentsInvResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getblock.rs b/stackslib/src/net/api/getblock.rs new file mode 100644 index 0000000000..924c165de7 --- /dev/null +++ b/stackslib/src/net/api/getblock.rs @@ -0,0 +1,308 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlock}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCBlocksRequestHandler { + pub block_id: Option, +} + +impl RPCBlocksRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct StacksBlockStream { + /// index block hash of the block to download + pub index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + pub offset: u64, + /// total number of bytes read. + pub total_bytes: u64, + + /// connection to the underlying chainstate + blocks_path: String, +} + +impl StacksBlockStream { + pub fn new(chainstate: &StacksChainState, block: &StacksBlockId) -> Result { + let _ = StacksChainState::load_staging_block_info(chainstate.db(), block)? + .ok_or(ChainError::NoSuchBlockError)?; + + let blocks_path = chainstate.blocks_path.clone(); + + Ok(StacksBlockStream { + index_block_hash: block.clone(), + offset: 0, + total_bytes: 0, + blocks_path, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCBlocksRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/blocks/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id_str = captures + .name("block_id") + .ok_or(Error::DecodeError( + "Failed to match path to block ID group".to_string(), + ))? + .as_str(); + + let block_id = StacksBlockId::from_hex(block_id_str) + .map_err(|_| Error::DecodeError("Invalid path: unparseable block ID".to_string()))?; + self.block_id = Some(block_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCBlocksRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("Missing `block_id`".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksBlockStream::new(chainstate, &block_id) + }); + + // start loading up the block + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCBlocksRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksBlockStream { + #[cfg(test)] + fn hint_chunk_size(&self) -> usize { + // make this hurt + 32 + } + + #[cfg(not(test))] + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + let block_path = + StacksChainState::get_index_block_path(&self.blocks_path, &self.index_block_hash) + .map_err(|e| { + let msg = format!( + "Failed to load block path for {}: {:?}", + &self.index_block_hash, &e + ); + warn!("{}", &msg); + msg + })?; + + // The reason we open a file on each call to stream data is because we don't want to + // exhaust the supply of file descriptors. Maybe a future version of this code will do + // something like cache the set of open files so we don't have to keep re-opening them. + let mut file_fd = fs::OpenOptions::new() + .read(true) + .write(false) + .create(false) + .truncate(false) + .open(&block_path) + .map_err(|e| { + if e.kind() == io::ErrorKind::NotFound { + let msg = format!("Blook file not found for {}", &self.index_block_hash); + warn!("{}", &msg); + msg + } else { + let msg = format!("Failed to open block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + } + })?; + + file_fd.seek(SeekFrom::Start(self.offset)).map_err(|e| { + let msg = format!("Failed to read block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + })?; + + let mut buf = vec![0u8; self.hint_chunk_size()]; + let num_read = file_fd.read(&mut buf).map_err(|e| { + let msg = format!("Failed to read block {}: {:?}", &self.index_block_hash, &e); + warn!("{}", &msg); + msg + })?; + + buf.truncate(num_read); + + self.offset += num_read as u64; + self.total_bytes += num_read as u64; + + Ok(buf) + } +} + +impl StacksHttpRequest { + pub fn new_getblock(host: PeerHost, index_block_hash: StacksBlockId) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/blocks/{}", &index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_getblock(block: StacksBlock, with_content_length: bool) -> StacksHttpResponse { + let value = block.serialize_to_vec(); + let length = value.len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::Bytes, + true, + ); + let body = HttpResponsePayload::Bytes(value); + StacksHttpResponse::new(preamble, body) + } + + /// Decode an HTTP response into a block. + /// If it fails, return Self::Error(..) + pub fn decode_block(self) -> Result { + let contents = self.get_http_payload_ok()?; + + // contents will be raw bytes + let block_bytes: Vec = contents.try_into()?; + let block = StacksBlock::consensus_deserialize(&mut &block_bytes[..])?; + + Ok(block) + } +} diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs new file mode 100644 index 0000000000..a7845c7fec --- /dev/null +++ b/stackslib/src/net/api/getconstantval.rs @@ -0,0 +1,227 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ConstantValResponse { + pub data: String, +} + +#[derive(Clone)] +pub struct RPCGetConstantValRequestHandler { + pub constname: Option, + pub contract_identifier: Option, +} + +impl RPCGetConstantValRequestHandler { + pub fn new() -> Self { + Self { + constname: None, + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetConstantValRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/constant_val/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let constname = request::get_clarity_name(captures, "constname")?; + + self.contract_identifier = Some(contract_identifier); + self.constname = Some(constname); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetConstantValRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.constname = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let constant_name = self + .constname + .take() + .ok_or(NetError::SendError("`constname` not set".to_string()))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let contract = clarity_db.get_contract(&contract_identifier).ok()?; + + let cst = contract + .contract_context + .lookup_variable(constant_name.as_str())? + .serialize_to_hex(); + + let data = format!("0x{cst}"); + Some(ConstantValResponse { data }) + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Constant not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetConstantValRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let constant_val: ConstantValResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(constant_val)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a constant val + pub fn new_getconstantval( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + constant_name: ClarityName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/constant_val/{}/{}/{}", + &contract_addr, &contract_name, &constant_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_constant_val_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ConstantValResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs new file mode 100644 index 0000000000..38d614b1f4 --- /dev/null +++ b/stackslib/src/net/api/getcontractabi.rs @@ -0,0 +1,208 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::analysis::contract_interface_builder::ContractInterface; +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::clarity_store::{make_contract_hash_key, ContractCommitment}; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Clone)] +pub struct RPCGetContractAbiRequestHandler { + pub contract_identifier: Option, +} + +impl RPCGetContractAbiRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetContractAbiRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/interface/(?P
{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + + self.contract_identifier = Some(contract_identifier); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetContractAbiRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + clarity_tx.with_analysis_db_readonly(|db| { + let contract = db.load_contract(&contract_identifier, &epoch)?; + contract.contract_interface + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No contract interface data found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetContractAbiRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let contract_src: ContractInterface = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(contract_src)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a contract ABI + pub fn new_getcontractabi( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/contracts/interface/{}/{}", + &contract_addr, &contract_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_contract_abi_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ContractInterface = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs new file mode 100644 index 0000000000..f670d1020c --- /dev/null +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -0,0 +1,231 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::clarity_store::{make_contract_hash_key, ContractCommitment}; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ContractSrcResponse { + pub source: String, + pub publish_height: u32, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetContractSrcRequestHandler { + pub contract_identifier: Option, +} + +impl RPCGetContractSrcRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetContractSrcRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/contracts/source/(?P
{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + self.contract_identifier = Some(contract_identifier); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetContractSrcRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let with_proof = contents.get_with_proof(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let source = db.get_contract_src(&contract_identifier)?; + let contract_commit_key = make_contract_hash_key(&contract_identifier); + let (contract_commit, proof) = if with_proof { + db.get_with_proof::(&contract_commit_key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .expect("BUG: obtained source, but couldn't get contract commit") + } else { + db.get::(&contract_commit_key) + .map(|a| (a, None)) + .expect("BUG: obtained source, but couldn't get contract commit") + }; + + let publish_height = contract_commit.block_height; + Some(ContractSrcResponse { + source, + publish_height, + marf_proof: proof, + }) + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No contract source data found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetContractSrcRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let contract_src: ContractSrcResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(contract_src)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a contract's source code + pub fn new_getcontractsrc( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/contracts/source/{}/{}", &contract_addr, &contract_name), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_contract_src_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ContractSrcResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs new file mode 100644 index 0000000000..0594dc8639 --- /dev/null +++ b/stackslib/src/net/api/getdatavar.rs @@ -0,0 +1,242 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DataVarResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetDataVarRequestHandler { + pub contract_identifier: Option, + pub varname: Option, +} +impl RPCGetDataVarRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + varname: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetDataVarRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/data_var/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let varname = request::get_clarity_name(captures, "varname")?; + + self.contract_identifier = Some(contract_identifier); + self.varname = Some(varname); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetDataVarRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.varname = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let var_name = self + .varname + .take() + .ok_or(NetError::SendError("`varname` not set".to_string()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let with_proof = contents.get_with_proof(); + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_trip( + &contract_identifier, + StoreType::Variable, + &var_name, + ); + + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db.get(&key).map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(DataVarResponse { data, marf_proof }) + }) + }) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Data var not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetDataVarRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let datavar: DataVarResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(datavar)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a data var + pub fn new_getdatavar( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + var_name: ClarityName, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/data_var/{}/{}/{}", + &contract_addr, &contract_name, &var_name + ), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_data_var_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: DataVarResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getheaders.rs b/stackslib/src/net/api/getheaders.rs new file mode 100644 index 0000000000..b2a3e4dc96 --- /dev/null +++ b/stackslib/src/net/api/getheaders.rs @@ -0,0 +1,325 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCHeadersRequestHandler { + pub quantity: Option, +} + +impl RPCHeadersRequestHandler { + pub fn new() -> Self { + Self { quantity: None } + } +} + +#[derive(Debug)] +pub struct StacksHeaderStream { + /// index block hash of the block to download + pub index_block_hash: StacksBlockId, + /// offset into whatever is being read (the blob, or the file in the chunk store) + pub offset: u64, + /// total number of bytes read. + pub total_bytes: u64, + /// number of headers remaining to stream + pub num_headers: u32, + + /// header buffer data + pub end_of_stream: bool, + pub corked: bool, + + /// connection to the underlying chainstate + chainstate_db: DBConn, + blocks_path: String, +} + +impl StacksHeaderStream { + pub fn new( + chainstate: &StacksChainState, + tip: &StacksBlockId, + num_headers_requested: u32, + ) -> Result { + let header_info = StacksChainState::load_staging_block_info(chainstate.db(), tip)? + .ok_or(ChainError::NoSuchBlockError)?; + + let num_headers = if header_info.height < (num_headers_requested as u64) { + header_info.height as u32 + } else { + num_headers_requested + }; + + let db = chainstate.reopen_db()?; + let blocks_path = chainstate.blocks_path.clone(); + + Ok(StacksHeaderStream { + index_block_hash: tip.clone(), + offset: 0, + total_bytes: 0, + num_headers: num_headers, + end_of_stream: false, + corked: false, + chainstate_db: db, + blocks_path, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCHeadersRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/headers/(?P[0-9]+)$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + + let quantity = request::get_u32(captures, "quantity")?; + self.quantity = Some(quantity); + + let contents = HttpRequestContents::new().query_string(query); + + Ok(contents) + } +} + +impl RPCRequestHandler for RPCHeadersRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.quantity = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let quantity = self + .quantity + .take() + .ok_or(NetError::SendError("`quantity` not set".to_string()))?; + if (quantity as usize) > MAX_HEADERS { + return StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Invalid request: requested more than {} headers\n", + MAX_HEADERS + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + + // find requested chain tip + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksHeaderStream::new(chainstate, &tip, quantity) + }); + + // start loading headers + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &tip)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block header: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::JSON, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCHeadersRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let headers: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(headers)?) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksHeaderStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + if self.total_bytes == 0 { + // headers are a JSON array. Start by writing '[', then write each header, and + // then write ']' + test_debug!("Opening header stream"); + self.total_bytes += 1; + return Ok(vec!['[' as u8]); + } + if self.num_headers == 0 { + test_debug!("End of header stream"); + self.end_of_stream = true; + } + if self.total_bytes > 0 && !self.end_of_stream && !self.corked { + // have more data to send. + // read next header as JSON + match StacksChainState::read_extended_header( + &self.chainstate_db, + &self.blocks_path, + &self.index_block_hash, + ) { + Ok(extended_header) => { + // serialize + let mut header_bytes = vec![]; + serde_json::to_writer(&mut header_bytes, &extended_header).map_err(|e| { + let msg = format!("Failed to encoded Stacks header: {:?}", &e); + warn!("{}", &msg); + msg + })?; + + // advance + self.index_block_hash = extended_header.parent_block_id; + self.num_headers -= 1; + + if self.num_headers > 0 { + header_bytes.push(',' as u8); + } else { + self.end_of_stream = true; + } + + self.total_bytes += header_bytes.len() as u64; + return Ok(header_bytes); + } + Err(ChainError::DBError(DBError::NotFoundError)) => { + // end of headers + test_debug!("Header not found; ending stream"); + self.end_of_stream = true; + } + Err(e) => { + warn!("Header DB error: {:?}", &e); + self.end_of_stream = true; + return Err(format!( + "Failed to read extended header {}: {:?}", + &self.index_block_hash, &e + )); + } + }; + } + if self.end_of_stream && !self.corked { + // sent all the headers we're gonna send. + test_debug!("Corking header stream"); + self.corked = true; + self.total_bytes += 1; + return Ok(vec![']' as u8]); + } + + test_debug!("Header stream terminated"); + // end of stream and corked. we're done! + return Ok(vec![]); + } +} + +impl StacksHttpRequest { + pub fn new_getheaders(host: PeerHost, quantity: u64, tip_req: TipRequest) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/headers/{}", quantity), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_headers(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let headers: Vec = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(headers) + } +} diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs new file mode 100644 index 0000000000..f83173a44d --- /dev/null +++ b/stackslib/src/net/api/getinfo.rs @@ -0,0 +1,266 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{Hash160, Sha256Sum}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; +use crate::version_string; + +/// The request to GET /v2/info +#[derive(Clone)] +pub struct RPCPeerInfoRequestHandler {} +impl RPCPeerInfoRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCAffirmationData { + pub heaviest: AffirmationMap, + pub stacks_tip: AffirmationMap, + pub sortition_tip: AffirmationMap, + pub tentative_best: AffirmationMap, +} + +/// Information about the last PoX anchor block +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCLastPoxAnchorData { + pub anchor_block_hash: BlockHeaderHash, + pub anchor_block_txid: Txid, +} + +/// The response to GET /v2/info +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPeerInfoData { + pub peer_version: u32, + pub pox_consensus: ConsensusHash, + pub burn_block_height: u64, + pub stable_pox_consensus: ConsensusHash, + pub stable_burn_block_height: u64, + pub server_version: String, + pub network_id: u32, + pub parent_network_id: u32, + pub stacks_tip_height: u64, + pub stacks_tip: BlockHeaderHash, + pub stacks_tip_consensus_hash: ConsensusHash, + pub genesis_chainstate_hash: Sha256Sum, + pub unanchored_tip: Option, + pub unanchored_seq: Option, + pub exit_at_block_height: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub node_public_key: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub node_public_key_hash: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub affirmations: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub last_pox_anchor: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub stackerdbs: Option>, +} + +impl RPCPeerInfoData { + pub fn from_network( + network: &PeerNetwork, + chainstate: &StacksChainState, + exit_at_block_height: Option, + genesis_chainstate_hash: &Sha256Sum, + ) -> RPCPeerInfoData { + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let (unconfirmed_tip, unconfirmed_seq) = match chainstate.unconfirmed_state { + Some(ref unconfirmed) => { + if unconfirmed.num_mined_txs() > 0 { + ( + Some(unconfirmed.unconfirmed_chain_tip.clone()), + Some(unconfirmed.last_mblock_seq), + ) + } else { + (None, None) + } + } + None => (None, None), + }; + + let public_key = StacksPublicKey::from_private(&network.get_local_peer().private_key); + let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); + let public_key_hash = Hash160::from_node_public_key(&public_key); + let stackerdb_contract_ids = network.get_local_peer().stacker_dbs.clone(); + + RPCPeerInfoData { + peer_version: network.burnchain.peer_version, + pox_consensus: network.burnchain_tip.consensus_hash.clone(), + burn_block_height: network.chain_view.burn_block_height, + stable_pox_consensus: network.chain_view_stable_consensus_hash.clone(), + stable_burn_block_height: network.chain_view.burn_stable_block_height, + server_version, + network_id: network.local_peer.network_id, + parent_network_id: network.local_peer.parent_network_id, + stacks_tip_height: network.burnchain_tip.canonical_stacks_tip_height, + stacks_tip: network.burnchain_tip.canonical_stacks_tip_hash.clone(), + stacks_tip_consensus_hash: network + .burnchain_tip + .canonical_stacks_tip_consensus_hash + .clone(), + unanchored_tip: unconfirmed_tip, + unanchored_seq: unconfirmed_seq, + exit_at_block_height: exit_at_block_height, + genesis_chainstate_hash: genesis_chainstate_hash.clone(), + node_public_key: Some(public_key_buf), + node_public_key_hash: Some(public_key_hash), + affirmations: Some(RPCAffirmationData { + heaviest: network.heaviest_affirmation_map.clone(), + stacks_tip: network.stacks_tip_affirmation_map.clone(), + sortition_tip: network.sortition_tip_affirmation_map.clone(), + tentative_best: network.tentative_best_affirmation_map.clone(), + }), + last_pox_anchor: Some(RPCLastPoxAnchorData { + anchor_block_hash: network.last_anchor_block_hash.clone(), + anchor_block_txid: network.last_anchor_block_txid.clone(), + }), + stackerdbs: Some( + stackerdb_contract_ids + .into_iter() + .map(|cid| format!("{}", cid)) + .collect(), + ), + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPeerInfoRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/info$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPeerInfoRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let rpc_peer_info = + node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { + RPCPeerInfoData::from_network( + network, + chainstate, + rpc_args.exit_at_block_height.clone(), + &rpc_args.genesis_chainstate_hash, + ) + }); + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&rpc_peer_info)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPeerInfoRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let peer_info: RPCPeerInfoData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(peer_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getinfo(host: PeerHost, stacks_height: Option) -> StacksHttpRequest { + let mut req = StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/info".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + req.preamble_mut() + .set_canonical_stacks_tip_height(stacks_height); + req + } +} + +impl StacksHttpResponse { + pub fn decode_peer_info(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let peer_info: RPCPeerInfoData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(peer_info) + } +} diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs new file mode 100644 index 0000000000..8aa0a8fbef --- /dev/null +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -0,0 +1,263 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{ + PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TraitIdentifier, +}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GetIsTraitImplementedResponse { + pub is_implemented: bool, +} + +#[derive(Clone)] +pub struct RPCGetIsTraitImplementedRequestHandler { + pub contract_identifier: Option, + pub trait_contract_identifier: Option, + pub trait_name: Option, +} +impl RPCGetIsTraitImplementedRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + trait_contract_identifier: None, + trait_name: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetIsTraitImplementedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/traits/(?P
{})/(?P{})/(?P{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let trait_contract_identifier = + request::get_contract_address(captures, "traitContractAddr", "traitContractName")?; + let trait_name = request::get_clarity_name(captures, "traitName")?; + + self.contract_identifier = Some(contract_identifier); + self.trait_contract_identifier = Some(trait_contract_identifier); + self.trait_name = Some(trait_name); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.trait_contract_identifier = None; + self.trait_name = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let trait_contract_id = + self.trait_contract_identifier + .take() + .ok_or(NetError::SendError( + "`trait_contract_identifier` not set".into(), + ))?; + let trait_name = self + .trait_name + .take() + .ok_or(NetError::SendError("`trait_name` not set".into()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let trait_id = + TraitIdentifier::new(trait_contract_id.issuer, trait_contract_id.name, trait_name); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let analysis = db.load_contract_analysis(&contract_identifier)?; + if analysis.implemented_traits.contains(&trait_id) { + Some(GetIsTraitImplementedResponse { + is_implemented: true, + }) + } else { + let trait_defining_contract = + db.load_contract_analysis(&trait_id.contract_identifier)?; + let trait_definition = + trait_defining_contract.get_defined_trait(&trait_id.name)?; + let is_implemented = analysis + .check_trait_compliance( + &db.get_clarity_epoch_version(), + &trait_id, + trait_definition, + ) + .is_ok(); + Some(GetIsTraitImplementedResponse { is_implemented }) + } + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new( + "No contract analysis found or trait definition not found".to_string(), + ), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetIsTraitImplementedRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let is_implemented: GetIsTraitImplementedResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(is_implemented)?) + } +} + +impl StacksHttpResponse { + pub fn decode_is_trait_implemented_response( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: GetIsTraitImplementedResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} + +impl StacksHttpRequest { + /// Make a new is-trait-implemented request + pub fn new_get_is_trait_implemented( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + trait_contract_addr: StacksAddress, + trait_contract_name: ContractName, + trait_name: ClarityName, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/traits/{}/{}/{}/{}/{}", + &contract_addr, + &contract_name, + &trait_contract_addr, + &trait_contract_name, + &trait_name + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs new file mode 100644 index 0000000000..9a5cc24e82 --- /dev/null +++ b/stackslib/src/net/api/getmapentry.rs @@ -0,0 +1,270 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::{ClarityDatabase, STXBalance, StoreType}; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{ + PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, + BOUND_VALUE_SERIALIZATION_HEX, +}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Sha256Sum}; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPayload, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MapEntryResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetMapEntryRequestHandler { + pub contract_identifier: Option, + pub map_name: Option, + pub key: Option, +} +impl RPCGetMapEntryRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + map_name: None, + key: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetMapEntryRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + "^/v2/map_entry/(?P
{})/(?P{})/(?P{})$", + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX + )) + .unwrap() + } + + /// Try to decode this request. + /// The body must be a hex string, encoded as a JSON string. + /// So, something like `"123abc"`. It encodes the map key as a serialized Clarity value. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < BOUND_VALUE_SERIALIZATION_HEX) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for GetMapEntry ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".into(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let map_name = request::get_clarity_name(captures, "map")?; + + let mut body_ptr = body; + let value_hex: String = serde_json::from_reader(&mut body_ptr) + .map_err(|_e| Error::DecodeError("Failed to parse JSON body".into()))?; + + let value = Value::try_deserialize_hex_untyped(&value_hex) + .map_err(|_e| Error::DecodeError("Failed to deserialize key value".into()))?; + + self.contract_identifier = Some(contract_identifier); + self.map_name = Some(map_name); + self.key = Some(value); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetMapEntryRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.map_name = None; + self.key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let map_name = self + .map_name + .take() + .ok_or(NetError::SendError("`map_name` not set".into()))?; + let key = self + .key + .take() + .ok_or(NetError::SendError("`key` not set".into()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let with_proof = contents.get_with_proof(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_data_map_entry( + &contract_identifier, + &map_name, + &key, + ); + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_with_proof(&key) + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), Some("".into())) + }) + } else { + clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), None) + }) + }; + + let data = format!("0x{}", value_hex); + MapEntryResponse { data, marf_proof } + }) + }) + }); + + let data_resp = match data_resp { + Ok(Some(data)) => data, + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetMapEntryRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let map_entry: MapEntryResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(map_entry)?) + } +} + +impl StacksHttpRequest { + /// Make a new request for a data map + pub fn new_getmapentry( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + map_name: ClarityName, + key: Value, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/map_entry/{}/{}/{}", + &contract_addr, &contract_name, &map_name + ), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()) + .payload_json(serde_json::Value::String(key.serialize_to_hex())), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_map_entry_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: MapEntryResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getmicroblocks_confirmed.rs b/stackslib/src/net/api/getmicroblocks_confirmed.rs new file mode 100644 index 0000000000..3c07a07a48 --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_confirmed.rs @@ -0,0 +1,192 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{read_next, StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlockHeader, StacksMicroblock}; +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCMicroblocksConfirmedRequestHandler { + pub block_id: Option, +} + +impl RPCMicroblocksConfirmedRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +impl StacksIndexedMicroblockStream { + /// Make a new indexed microblock streamer using the descendent Stacks anchored block + pub fn new_confirmed( + chainstate: &StacksChainState, + child_block_id: &StacksBlockId, + ) -> Result { + let tail_microblock_index_hash = + if let Some(bhh) = chainstate.get_confirmed_microblock_index_hash(child_block_id)? { + bhh + } else { + return Err(ChainError::NoSuchBlockError); + }; + + StacksIndexedMicroblockStream::new(chainstate, &tail_microblock_index_hash) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksConfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/confirmed/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id = request::get_block_hash(captures, "block_id")?; + + self.block_id = Some(block_id); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksConfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("`block_id` not set".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + let res = StacksIndexedMicroblockStream::new_confirmed(chainstate, &block_id); + res + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + test_debug!("Failed to load block {}: Not found", &block_id); + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksConfirmedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_confirmed( + host: PeerHost, + child_block_id: StacksBlockId, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/confirmed/{}", &child_block_id), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/getmicroblocks_indexed.rs b/stackslib/src/net/api/getmicroblocks_indexed.rs new file mode 100644 index 0000000000..8f5eb7bc59 --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_indexed.rs @@ -0,0 +1,313 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{read_next, StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlockHeader, StacksMicroblock}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCMicroblocksIndexedRequestHandler { + pub tail_microblock_id: Option, +} +impl RPCMicroblocksIndexedRequestHandler { + pub fn new() -> Self { + Self { + tail_microblock_id: None, + } + } +} + +#[derive(Debug)] +pub struct StacksIndexedMicroblockStream { + /// length prefix + pub num_items_buf: [u8; 4], + pub num_items_ptr: usize, + + /// microblock pointer + pub microblock_hash: BlockHeaderHash, + pub parent_index_block_hash: StacksBlockId, + + /// connection to the chain state + chainstate_db: DBConn, +} + +impl StacksIndexedMicroblockStream { + pub fn new( + chainstate: &StacksChainState, + tail_index_microblock_hash: &StacksBlockId, + ) -> Result { + // look up parent + let mblock_info = StacksChainState::load_staging_microblock_info_indexed( + &chainstate.db(), + tail_index_microblock_hash, + )? + .ok_or(ChainError::NoSuchBlockError)?; + + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &mblock_info.consensus_hash, + &mblock_info.anchored_block_hash, + ); + + // need to send out the consensus_serialize()'ed array length before sending microblocks. + // this is exactly what seq tells us, though. + test_debug!( + "Will stream {} microblocks back from {}", + mblock_info.sequence, + &tail_index_microblock_hash + ); + let num_items_buf = ((mblock_info.sequence as u32) + 1).to_be_bytes(); + + Ok(StacksIndexedMicroblockStream { + microblock_hash: mblock_info.microblock_hash, + parent_index_block_hash: parent_index_block_hash, + num_items_buf: num_items_buf, + num_items_ptr: 0, + chainstate_db: chainstate.reopen_db()?, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksIndexedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let tail_microblock_id = request::get_block_hash(captures, "tail_microblock_id")?; + + self.tail_microblock_id = Some(tail_microblock_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksIndexedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.tail_microblock_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tail_microblock_id = self + .tail_microblock_id + .take() + .ok_or(NetError::SendError("`tail_microblock_id` not set".into()))?; + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksIndexedMicroblockStream::new(chainstate, &tail_microblock_id) + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such microblock {:?}\n", &tail_microblock_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load microblock: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksIndexedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksIndexedMicroblockStream { + #[cfg(not(test))] + fn hint_chunk_size(&self) -> usize { + 4096 + } + + #[cfg(test)] + fn hint_chunk_size(&self) -> usize { + // make this hurt + 32 + } + + /// Stream back microblock chunks. + /// The first chunk is a 4-byte length prefix + /// Subsequent chunks are microblocks + fn generate_next_chunk(&mut self) -> Result, String> { + if self.num_items_ptr == 0 { + // send length prefix + self.num_items_ptr += self.num_items_buf.len(); + return Ok(self.num_items_buf.to_vec()); + } + + // load next microblock + let mblock_info_opt = StacksChainState::load_staging_microblock_indexed( + &self.chainstate_db, + &self.parent_index_block_hash, + &self.microblock_hash, + ).map_err(|e| { + warn!("Failed to load microblock"; "microblock" => %self.microblock_hash, "parent anchored block" => %self.parent_index_block_hash, "error" => %e); + let msg = format!("Failed to load microblock {}-{}: {:?}", &self.parent_index_block_hash, &self.microblock_hash, &e); + msg + })?; + + let mblock_info = if let Some(x) = mblock_info_opt { + x + } else { + // out of microblocks + debug!( + "Out of microblocks to stream"; + "last microblock" => %self.microblock_hash, + "parent anchored block" => %self.parent_index_block_hash + ); + return Ok(vec![]); + }; + + let buf = mblock_info.block_data; + + self.microblock_hash = mblock_info.parent_hash; + return Ok(buf); + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_indexed( + host: PeerHost, + index_microblock_hash: StacksBlockId, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/{}", &index_microblock_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_getmicroblocks_indexed( + mblocks: Vec, + with_content_length: bool, + ) -> StacksHttpResponse { + let value = mblocks.serialize_to_vec(); + let length = value.len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::Bytes, + true, + ); + let body = HttpResponsePayload::Bytes(value); + StacksHttpResponse::new(preamble, body) + } + + /// Decode an HTTP response into a microblock stream + /// If it fails, return Self::Error(..) + pub fn decode_microblocks(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + + // contents will be a SIP-003 bytestream + let mblock_bytes: Vec = contents.try_into()?; + let microblocks: Vec = read_next(&mut &mblock_bytes[..])?; + + Ok(microblocks) + } +} diff --git a/stackslib/src/net/api/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs new file mode 100644 index 0000000000..f18d1855d4 --- /dev/null +++ b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs @@ -0,0 +1,320 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{read_next, Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::retry::BoundReader; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlockHeader, StacksMicroblock}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{ + Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS, MAX_MICROBLOCKS_UNCONFIRMED, +}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCMicroblocksUnconfirmedRequestHandler { + pub parent_block_id: Option, + pub start_sequence: Option, +} +impl RPCMicroblocksUnconfirmedRequestHandler { + pub fn new() -> Self { + Self { + parent_block_id: None, + start_sequence: None, + } + } +} + +#[derive(Debug)] +pub struct StacksUnconfirmedMicroblockStream { + /// microblock pointer + pub microblock_hash: BlockHeaderHash, + pub parent_index_block_hash: StacksBlockId, + pub seq: u16, + pub finished: bool, + pub next_microblock: StacksMicroblock, + + /// connection to the chain state + chainstate_db: DBConn, +} + +impl StacksUnconfirmedMicroblockStream { + pub fn new( + chainstate: &StacksChainState, + parent_block_id: &StacksBlockId, + seq: u16, + ) -> Result { + let mblock_info = StacksChainState::load_next_descendant_microblock( + &chainstate.db(), + parent_block_id, + seq, + )? + .ok_or(ChainError::NoSuchBlockError)?; + + // need to send out the consensus_serialize()'ed array length before sending microblocks. + // this is exactly what seq tells us, though. + Ok(StacksUnconfirmedMicroblockStream { + microblock_hash: mblock_info.block_hash(), + parent_index_block_hash: parent_block_id.clone(), + seq, + finished: false, + next_microblock: mblock_info, + chainstate_db: chainstate.reopen_db()?, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMicroblocksUnconfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks/unconfirmed/(?P[0-9a-f]{64})/(?P[0-9]{1,6})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + + let parent_block_id = request::get_block_hash(captures, "parent_block_id")?; + let start_sequence_u32 = request::get_u32(captures, "start_sequence")?; + + if start_sequence_u32 > u16::MAX.into() { + return Err(Error::DecodeError("`start_sequence` is too big".into())); + } + + let start_sequence = start_sequence_u32 as u16; + + self.parent_block_id = Some(parent_block_id); + self.start_sequence = Some(start_sequence); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMicroblocksUnconfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.parent_block_id = None; + self.start_sequence = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .parent_block_id + .take() + .ok_or(NetError::SendError("`parent_block_id` not set".into()))?; + let start_seq = self + .start_sequence + .take() + .ok_or(NetError::SendError("`start_seq` not set".into()))?; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + StacksUnconfirmedMicroblockStream::new(chainstate, &block_id, start_seq) + }); + + // start loading up the microblocks + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load microblock: {:?}\n", &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMicroblocksUnconfirmedRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +/// Stream implementation for HeaderStreamData +impl HttpChunkGenerator for StacksUnconfirmedMicroblockStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + /// Stream back microblock chunks. + /// The first chunk is a 4-byte length prefix + /// Subsequent chunks are microblocks + fn generate_next_chunk(&mut self) -> Result, String> { + if self.finished { + // no more to load + return Ok(vec![]); + } + + // advance streamer to next microblock in the sequence + let next_seq = match self.seq { + u16::MAX => { + return Err("No more microblocks; exceeded maximum sequence number".to_string()); + } + x => x + 1, + }; + + let next_mblock_opt = StacksChainState::load_next_descendant_microblock( + &self.chainstate_db, + &self.parent_index_block_hash, + next_seq, + ).map_err(|e| { + warn!("Failed to query for next descendant microblock"; "parent anchored block" => %self.parent_index_block_hash, "next_seq" => %next_seq); + let msg = format!("Failed to query for next descendant microblock of {} at {}: {:?}", &self.parent_index_block_hash, next_seq, &e); + msg + })?; + + let buf = self.next_microblock.serialize_to_vec(); + if let Some(mblock) = next_mblock_opt { + test_debug!( + "Switch to {}-{} ({})", + &self.parent_index_block_hash, + &mblock.block_hash(), + next_seq + ); + self.microblock_hash = mblock.block_hash(); + self.seq = next_seq; + self.next_microblock = mblock; + } else { + // we're EOF + self.finished = true; + } + + return Ok(buf); + } +} + +impl StacksHttpRequest { + pub fn new_getmicroblocks_unconfirmed( + host: PeerHost, + parent_block_id: StacksBlockId, + seq: u16, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/microblocks/unconfirmed/{}/{}", &parent_block_id, seq), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into an unconfirmed microblock stream + pub fn decode_microblocks_unconfirmed(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let mblock_bytes: Vec = contents.try_into()?; + let mut mblock_bytes_ptr = mblock_bytes.as_slice(); + + let mut microblocks = vec![]; + let mut bound_reader = + BoundReader::from_reader(&mut mblock_bytes_ptr, MAX_MESSAGE_LEN.into()); + loop { + let mblock: StacksMicroblock = match read_next(&mut bound_reader) { + Ok(mblock) => Ok(mblock), + Err(e) => match e { + CodecError::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + // end of stream -- this is fine + break; + } + _ => Err(e), + }, + _ => Err(e), + }, + }?; + + microblocks.push(mblock); + if microblocks.len() == MAX_MICROBLOCKS_UNCONFIRMED { + break; + } + } + + Ok(microblocks) + } +} diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs new file mode 100644 index 0000000000..3b14d8397f --- /dev/null +++ b/stackslib/src/net/api/getneighbors.rs @@ -0,0 +1,283 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::types::QualifiedContractIdentifier; +use regex::{Captures, Regex}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::hash::Hash160; + +use crate::net::db::PeerDB; +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpVersion, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, NeighborKey, StacksNodeState, MAX_NEIGHBORS_DATA_LEN}; + +#[derive(Clone)] +pub struct RPCNeighborsRequestHandler {} +impl RPCNeighborsRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +/// Items in the NeighborsInfo -- combines NeighborKey and NeighborAddress +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCNeighbor { + pub network_id: u32, + pub peer_version: u32, + #[serde(rename = "ip")] + pub addrbytes: PeerAddress, + pub port: u16, + pub public_key_hash: Hash160, + pub authenticated: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub stackerdbs: Option>, +} + +impl RPCNeighbor { + pub fn from_neighbor_key_and_pubkh( + nk: NeighborKey, + pkh: Hash160, + auth: bool, + stackerdbs: Vec, + ) -> RPCNeighbor { + RPCNeighbor { + network_id: nk.network_id, + peer_version: nk.peer_version, + addrbytes: nk.addrbytes, + port: nk.port, + public_key_hash: pkh, + authenticated: auth, + stackerdbs: Some(stackerdbs), + } + } +} + +/// Struct given back from a call to `/v2/neighbors`. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCNeighborsInfo { + pub bootstrap: Vec, + pub sample: Vec, + pub inbound: Vec, + pub outbound: Vec, +} + +impl RPCNeighborsInfo { + /// Load neighbor address information from the peer network + pub fn from_p2p(network: &PeerNetwork) -> Result { + let network_epoch = network.get_current_epoch().network_epoch; + let network_id = network.get_local_peer().network_id; + let max_neighbor_age = network.get_connection_opts().max_neighbor_age; + let burnchain_view = network.get_chain_view(); + let peerdb_conn = network.peerdb_conn(); + + let bootstrap_nodes = + PeerDB::get_bootstrap_peers(peerdb_conn, network_id).map_err(NetError::DBError)?; + let bootstrap = bootstrap_nodes + .into_iter() + .map(|n| { + let stackerdb_contract_ids = + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + RPCNeighbor::from_neighbor_key_and_pubkh( + n.addr.clone(), + Hash160::from_node_public_key(&n.public_key), + true, + stackerdb_contract_ids, + ) + }) + .collect(); + + let neighbor_sample = PeerDB::get_fresh_random_neighbors( + peerdb_conn, + network_id, + network_epoch, + max_neighbor_age, + MAX_NEIGHBORS_DATA_LEN, + burnchain_view.burn_block_height, + false, + ) + .map_err(NetError::DBError)?; + + let sample: Vec = neighbor_sample + .into_iter() + .map(|n| { + let stackerdb_contract_ids = + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + RPCNeighbor::from_neighbor_key_and_pubkh( + n.addr.clone(), + Hash160::from_node_public_key(&n.public_key), + true, + stackerdb_contract_ids, + ) + }) + .collect(); + + let mut inbound = vec![]; + let mut outbound = vec![]; + for event_id in network.iter_peer_event_ids() { + let convo = if let Some(convo) = network.get_p2p_convo(*event_id) { + convo + } else { + continue; + }; + + let nk = convo.to_neighbor_key(); + let naddr = convo.to_neighbor_address(); + if convo.is_outbound() { + outbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( + nk, + naddr.public_key_hash, + convo.is_authenticated(), + convo.get_stackerdb_contract_ids().to_vec(), + )); + } else { + inbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( + nk, + naddr.public_key_hash, + convo.is_authenticated(), + convo.get_stackerdb_contract_ids().to_vec(), + )); + } + } + + Ok(RPCNeighborsInfo { + bootstrap, + sample, + inbound, + outbound, + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNeighborsRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/neighbors$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetNeighbors".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNeighborsRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let neighbor_data = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + RPCNeighborsInfo::from_p2p(network) + })?; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&neighbor_data)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNeighborsRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let neighbor_info: RPCNeighborsInfo = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(neighbor_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getneighbors request to this endpoint + pub fn new_getneighbors(host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/neighbors".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Make a new neighbors response + #[cfg(test)] + pub fn new_getneighbors( + neighbors: RPCNeighborsInfo, + with_content_length: bool, + ) -> StacksHttpResponse { + let value = + serde_json::to_value(neighbors).expect("FATAL: failed to encode infallible data"); + let length = serde_json::to_string(&value) + .expect("FATAL: failed to encode infallible data") + .len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::JSON, + true, + ); + let body = HttpResponsePayload::JSON(value); + StacksHttpResponse::new(preamble, body) + } + + pub fn decode_rpc_neighbors(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let rpc_neighbor_info = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(rpc_neighbor_info) + } +} diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs new file mode 100644 index 0000000000..70d02fe198 --- /dev/null +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -0,0 +1,484 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::types::{PrincipalData, StandardPrincipalData}; +use clarity::vm::ClarityVersion; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Clone)] +pub struct RPCPoxInfoRequestHandler {} +impl RPCPoxInfoRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxCurrentCycleInfo { + pub id: u64, + pub min_threshold_ustx: u64, + pub stacked_ustx: u64, + pub is_pox_active: bool, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxNextCycleInfo { + pub id: u64, + pub min_threshold_ustx: u64, + pub min_increment_ustx: u64, + pub stacked_ustx: u64, + pub prepare_phase_start_block_height: u64, + pub blocks_until_prepare_phase: i64, + pub reward_phase_start_block_height: u64, + pub blocks_until_reward_phase: u64, + pub ustx_until_pox_rejection: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxContractVersion { + pub contract_id: String, + pub activation_burnchain_block_height: u64, + pub first_reward_cycle_id: u64, +} + +/// The data we return on GET /v2/pox +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCPoxInfoData { + pub contract_id: String, + pub pox_activation_threshold_ustx: u64, + pub first_burnchain_block_height: u64, + pub current_burnchain_block_height: u64, + pub prepare_phase_block_length: u64, + pub reward_phase_block_length: u64, + pub reward_slots: u64, + pub rejection_fraction: u64, + pub total_liquid_supply_ustx: u64, + pub current_cycle: RPCPoxCurrentCycleInfo, + pub next_cycle: RPCPoxNextCycleInfo, + + // below are included for backwards-compatibility + pub min_amount_ustx: u64, + pub prepare_cycle_length: u64, + pub reward_cycle_id: u64, + pub reward_cycle_length: u64, + pub rejection_votes_left_required: u64, + pub next_reward_cycle_in: u64, + + // Information specific to each PoX contract version + pub contract_versions: Vec, +} + +impl RPCPoxInfoData { + pub fn from_db( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip: &StacksBlockId, + burnchain: &Burnchain, + ) -> Result { + let mainnet = chainstate.mainnet; + let chain_id = chainstate.chain_id; + let current_burn_height = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?.block_height; + + let pox_contract_name = burnchain + .pox_constants + .active_pox_contract(current_burn_height); + + let contract_identifier = boot_code_id(pox_contract_name, mainnet); + let function = "get-pox-info"; + let cost_track = LimitedCostTracker::new_free(); + let sender = PrincipalData::Standard(StandardPrincipalData::transient()); + + debug!( + "Active PoX contract is '{}' (current_burn_height = {}, v1_unlock_height = {}", + &contract_identifier, current_burn_height, burnchain.pox_constants.v1_unlock_height + ); + + // Note: should always be 0 unless somehow configured to start later + let pox_1_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.first_block_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-1 first reward cycle begins before first burn block height".to_string(), + ))?; + + let pox_2_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-2 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + + let pox_3_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .ok_or(NetError::ChainstateError( + "PoX-3 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + + let data = chainstate + .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + sender, + None, + cost_track, + |env| env.execute_contract(&contract_identifier, function, &vec![], true), + ) + }) + .map_err(|_| NetError::NotFoundError)?; + + let res = match data { + Some(Ok(res)) => res.expect_result_ok().expect_tuple(), + _ => return Err(NetError::DBError(DBError::NotFoundError)), + }; + + let first_burnchain_block_height = res + .get("first-burnchain-block-height") + .expect(&format!("FATAL: no 'first-burnchain-block-height'")) + .to_owned() + .expect_u128() as u64; + + let min_stacking_increment_ustx = res + .get("min-amount-ustx") + .expect(&format!("FATAL: no 'min-amount-ustx'")) + .to_owned() + .expect_u128() as u64; + + let prepare_cycle_length = res + .get("prepare-cycle-length") + .expect(&format!("FATAL: no 'prepare-cycle-length'")) + .to_owned() + .expect_u128() as u64; + + let rejection_fraction = res + .get("rejection-fraction") + .expect(&format!("FATAL: no 'rejection-fraction'")) + .to_owned() + .expect_u128() as u64; + + let reward_cycle_id = res + .get("reward-cycle-id") + .expect(&format!("FATAL: no 'reward-cycle-id'")) + .to_owned() + .expect_u128() as u64; + + let reward_cycle_length = res + .get("reward-cycle-length") + .expect(&format!("FATAL: no 'reward-cycle-length'")) + .to_owned() + .expect_u128() as u64; + + let current_rejection_votes = res + .get("current-rejection-votes") + .expect(&format!("FATAL: no 'current-rejection-votes'")) + .to_owned() + .expect_u128() as u64; + + let total_liquid_supply_ustx = res + .get("total-liquid-supply-ustx") + .expect(&format!("FATAL: no 'total-liquid-supply-ustx'")) + .to_owned() + .expect_u128() as u64; + + let total_required = (total_liquid_supply_ustx as u128 / 100) + .checked_mul(rejection_fraction as u128) + .ok_or_else(|| NetError::DBError(DBError::Overflow))? + as u64; + + let rejection_votes_left_required = total_required.saturating_sub(current_rejection_votes); + + let burnchain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + let pox_consts = &burnchain.pox_constants; + + if prepare_cycle_length != pox_consts.prepare_length as u64 { + error!( + "PoX Constants in config mismatched with PoX contract constants: {} != {}", + prepare_cycle_length, pox_consts.prepare_length + ); + return Err(NetError::DBError(DBError::Corruption)); + } + + if reward_cycle_length != pox_consts.reward_cycle_length as u64 { + error!( + "PoX Constants in config mismatched with PoX contract constants: {} != {}", + reward_cycle_length, pox_consts.reward_cycle_length + ); + return Err(NetError::DBError(DBError::Corruption)); + } + + let effective_height = burnchain_tip.block_height - first_burnchain_block_height; + let next_reward_cycle_in = reward_cycle_length - (effective_height % reward_cycle_length); + + let next_rewards_start = burnchain_tip.block_height + next_reward_cycle_in; + let next_prepare_phase_start = next_rewards_start - prepare_cycle_length; + + let next_prepare_phase_in = i64::try_from(next_prepare_phase_start) + .map_err(|_| NetError::ChainstateError("Burn block height overflowed i64".into()))? + - i64::try_from(burnchain_tip.block_height).map_err(|_| { + NetError::ChainstateError("Burn block height overflowed i64".into()) + })?; + + let cur_block_pox_contract = pox_consts.active_pox_contract(burnchain_tip.block_height); + let cur_cycle_pox_contract = + pox_consts.active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id)); + let next_cycle_pox_contract = pox_consts + .active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id + 1)); + + let cur_cycle_stacked_ustx = chainstate.get_total_ustx_stacked( + &sortdb, + tip, + reward_cycle_id as u128, + cur_cycle_pox_contract, + )?; + let next_cycle_stacked_ustx = + // next_cycle_pox_contract might not be instantiated yet + match chainstate.get_total_ustx_stacked( + &sortdb, + tip, + reward_cycle_id as u128 + 1, + next_cycle_pox_contract, + ) { + Ok(ustx) => ustx, + Err(ChainError::ClarityError(_)) => { + // contract not instantiated yet + 0 + } + Err(e) => { + return Err(e.into()); + } + }; + + let reward_slots = pox_consts.reward_slots() as u64; + + let cur_cycle_threshold = StacksChainState::get_threshold_from_participation( + total_liquid_supply_ustx as u128, + cur_cycle_stacked_ustx, + reward_slots as u128, + ) as u64; + + let next_threshold = StacksChainState::get_threshold_from_participation( + total_liquid_supply_ustx as u128, + next_cycle_stacked_ustx, + reward_slots as u128, + ) as u64; + + let pox_activation_threshold_ustx = (total_liquid_supply_ustx as u128) + .checked_mul(pox_consts.pox_participation_threshold_pct as u128) + .map(|x| x / 100) + .ok_or_else(|| NetError::DBError(DBError::Overflow))? + as u64; + + let cur_cycle_pox_active = sortdb.is_pox_active(burnchain, &burnchain_tip)?; + + Ok(RPCPoxInfoData { + contract_id: boot_code_id(cur_block_pox_contract, chainstate.mainnet).to_string(), + pox_activation_threshold_ustx, + first_burnchain_block_height, + current_burnchain_block_height: burnchain_tip.block_height, + prepare_phase_block_length: prepare_cycle_length, + reward_phase_block_length: reward_cycle_length - prepare_cycle_length, + reward_slots, + rejection_fraction, + total_liquid_supply_ustx, + current_cycle: RPCPoxCurrentCycleInfo { + id: reward_cycle_id, + min_threshold_ustx: cur_cycle_threshold, + stacked_ustx: cur_cycle_stacked_ustx as u64, + is_pox_active: cur_cycle_pox_active, + }, + next_cycle: RPCPoxNextCycleInfo { + id: reward_cycle_id + 1, + min_threshold_ustx: next_threshold, + min_increment_ustx: min_stacking_increment_ustx, + stacked_ustx: next_cycle_stacked_ustx as u64, + prepare_phase_start_block_height: next_prepare_phase_start, + blocks_until_prepare_phase: next_prepare_phase_in, + reward_phase_start_block_height: next_rewards_start, + blocks_until_reward_phase: next_reward_cycle_in, + ustx_until_pox_rejection: rejection_votes_left_required, + }, + min_amount_ustx: next_threshold, + prepare_cycle_length, + reward_cycle_id, + reward_cycle_length, + rejection_votes_left_required, + next_reward_cycle_in, + contract_versions: vec![ + RPCPoxContractVersion { + contract_id: boot_code_id(POX_1_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain.first_block_height, + first_reward_cycle_id: pox_1_first_cycle, + }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_2_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain.pox_constants.v1_unlock_height + as u64, + first_reward_cycle_id: pox_2_first_cycle, + }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_3_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_3_activation_height + as u64, + first_reward_cycle_id: pox_3_first_cycle, + }, + ], + }) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPoxInfoRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/pox$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetPoxInfo".to_string(), + )); + } + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPoxInfoRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let pox_info_res = + node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + RPCPoxInfoData::from_db(sortdb, chainstate, &tip, network.get_burnchain()) + }); + + let pox_info = match pox_info_res { + Ok(pox_info) => pox_info, + Err(NetError::NotFoundError) | Err(NetError::DBError(DBError::NotFoundError)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("No such chain tip".into()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load PoX info: {:?}", &e)), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&pox_info)?; + Ok((preamble, body)) + } +} + +impl HttpResponse for RPCPoxInfoRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let pox_info: RPCPoxInfoData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(pox_info)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getpoxinfo(host: PeerHost, tip_req: TipRequest) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/pox".into(), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_rpc_get_pox_info(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let pox_info: RPCPoxInfoData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(pox_info) + } +} diff --git a/stackslib/src/net/api/getstackerdbchunk.rs b/stackslib/src/net/api/getstackerdbchunk.rs new file mode 100644 index 0000000000..72bd80685a --- /dev/null +++ b/stackslib/src/net/api/getstackerdbchunk.rs @@ -0,0 +1,251 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::{ + CLARITY_NAME_REGEX, CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, + STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ContractName}; +use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlock}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCGetStackerDBChunkRequestHandler { + pub contract_identifier: Option, + pub slot_id: Option, + pub slot_version: Option, +} +impl RPCGetStackerDBChunkRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + slot_id: None, + slot_version: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStackerDBChunkRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)(/(?P[0-9]+)){{0,1}}$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let slot_id = request::get_u32(captures, "slot_id")?; + let slot_version = if captures.name("slot_version").is_some() { + Some(request::get_u32(captures, "slot_version")?) + } else { + None + }; + + self.contract_identifier = Some(contract_identifier); + self.slot_id = Some(slot_id); + self.slot_version = slot_version; + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStackerDBChunkRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.slot_id = None; + self.slot_version = None; + } + + /// Make the response. + /// NOTE: it's not safe to stream chunks; they have to be sent all at once. + /// This is because any streaming algorithm that does not lock the chunk row is at risk of + /// racing a chunk-download or a chunk-push, which would atomically overwrite the data being + /// streamed (and lead to corrupt data being sent). As a result, StackerDB chunks are capped + /// at 1MB, and StackerDB replication is always an opt-in protocol. Node operators subscribe + /// to StackerDB replicas at their own risk. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let slot_id = self + .slot_id + .take() + .ok_or(NetError::SendError("`slot_id` not set".into()))?; + let slot_version = self.slot_version.take(); + + let chunk_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + let chunk_res = if let Some(version) = slot_version.as_ref() { + network + .get_stackerdbs() + .get_chunk(&contract_identifier, slot_id, *version) + .map(|chunk_data| chunk_data.map(|chunk_data| chunk_data.data)) + } else { + network + .get_stackerdbs() + .get_latest_chunk(&contract_identifier, slot_id) + }; + + match chunk_res { + Ok(Some(chunk)) => Ok(chunk), + Ok(None) | Err(NetError::NoSuchStackerDB(..)) => { + // not found + Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB contract or chunk not found".to_string()), + )) + } + Err(e) => { + // some other error + error!("Failed to load StackerDB chunk"; + "smart_contract_id" => contract_identifier.to_string(), + "slot_id" => slot_id, + "slot_version" => slot_version, + "error" => format!("{:?}", &e) + ); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new("Failed to load StackerDB chunk".to_string()), + )) + } + } + }); + + let chunk_resp = match chunk_resp { + Ok(chunk) => chunk, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::from_ram(chunk_resp); + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStackerDBChunkRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let data: Vec = parse_bytes(preamble, body, STACKERDB_MAX_CHUNK_SIZE.into())?; + Ok(HttpResponsePayload::Bytes(data)) + } +} + +impl StacksHttpRequest { + /// Make a request for a stackerDB's chunk + pub fn new_get_stackerdb_chunk( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + slot_id: u32, + slot_version: Option, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + if let Some(version) = slot_version { + format!( + "/v2/stackerdb/{}/{}/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name, slot_id, version + ) + } else { + format!( + "/v2/stackerdb/{}/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name, slot_id + ) + }, + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a chunk + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_chunk(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let chunk_bytes: Vec = contents.try_into()?; + Ok(chunk_bytes) + } +} diff --git a/stackslib/src/net/api/getstackerdbmetadata.rs b/stackslib/src/net/api/getstackerdbmetadata.rs new file mode 100644 index 0000000000..9d0fd7c049 --- /dev/null +++ b/stackslib/src/net/api/getstackerdbmetadata.rs @@ -0,0 +1,185 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ContractName}; +use libstackerdb::SlotMetadata; +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlock}; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCGetStackerDBMetadataRequestHandler { + pub contract_identifier: Option, +} +impl RPCGetStackerDBMetadataRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStackerDBMetadataRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + self.contract_identifier = Some(contract_identifier); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStackerDBMetadataRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + + let metadata_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + network + .get_stackerdbs() + .get_db_slot_metadata(&contract_identifier) + .map_err(|_e| { + StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB contract not found".to_string()), + ) + }) + }); + + let metadata_resp = match metadata_resp { + Ok(metadata) => metadata, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&metadata_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStackerDBMetadataRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_get_stackerdb_metadata( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/stackerdb/{}/{}", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name + ), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a block. + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_metadata(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: Vec = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs new file mode 100644 index 0000000000..961cfe4f1b --- /dev/null +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -0,0 +1,133 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{Hash160, Sha256Sum}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; +use crate::version_string; + +#[derive(Clone)] +pub struct RPCGetStxTransferCostRequestHandler {} +impl RPCGetStxTransferCostRequestHandler { + pub fn new() -> Self { + Self {} + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetStxTransferCostRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/fees/transfer$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetInfo".to_string(), + )); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { + /// Reset internal state + fn restart(&mut self) {} + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // todo -- need to actually estimate the cost / length for token transfers + // right now, it just uses the minimum. + let fee = MINIMUM_TX_FEE_RATE_PER_BYTE; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&fee)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetStxTransferCostRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let fee: u64 = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(fee)?) + } +} + +impl StacksHttpRequest { + pub fn new_get_stx_transfer_cost(host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + "/v2/fees/transfer".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stx_transfer_fee(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let fee: u64 = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(fee) + } +} diff --git a/stackslib/src/net/api/gettransaction_unconfirmed.rs b/stackslib/src/net/api/gettransaction_unconfirmed.rs new file mode 100644 index 0000000000..2ed8ba346e --- /dev/null +++ b/stackslib/src/net/api/gettransaction_unconfirmed.rs @@ -0,0 +1,215 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use regex::{Captures, Regex}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum}; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum UnconfirmedTransactionStatus { + Microblock { + block_hash: BlockHeaderHash, + seq: u16, + }, + Mempool, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct UnconfirmedTransactionResponse { + pub tx: String, + pub status: UnconfirmedTransactionStatus, +} + +#[derive(Clone)] +pub struct RPCGetTransactionUnconfirmedRequestHandler { + pub txid: Option, +} +impl RPCGetTransactionUnconfirmedRequestHandler { + pub fn new() -> Self { + Self { txid: None } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetTransactionUnconfirmedRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/transactions/unconfirmed/(?P[0-9a-f]{64})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body for GetTransactionUnconfirmed" + .to_string(), + )); + } + + let txid = request::get_txid(captures, "txid")?; + self.txid = Some(txid); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCGetTransactionUnconfirmedRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.txid = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let txid = self + .txid + .take() + .ok_or(NetError::SendError("`txid` no set".into()))?; + + let txinfo_res = + node.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { + // present in the unconfirmed state? + if let Some(ref unconfirmed) = chainstate.unconfirmed_state.as_ref() { + if let Some((transaction, mblock_hash, seq)) = + unconfirmed.get_unconfirmed_transaction(&txid) + { + return Ok(UnconfirmedTransactionResponse { + status: UnconfirmedTransactionStatus::Microblock { + block_hash: mblock_hash, + seq: seq, + }, + tx: to_hex(&transaction.serialize_to_vec()), + }); + } + } + + // present in the mempool? + if let Some(txinfo) = MemPoolDB::get_tx(mempool.conn(), &txid)? { + return Ok(UnconfirmedTransactionResponse { + status: UnconfirmedTransactionStatus::Mempool, + tx: to_hex(&txinfo.tx.serialize_to_vec()), + }); + } + + return Err(NetError::NotFoundError); + }); + + let txinfo = match txinfo_res { + Ok(txinfo) => txinfo, + Err(NetError::NotFoundError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!( + "Transaction {} not found in mempool or unconfirmed microblock stream", + &txid + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(e) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to query transaction {}: {:?}", + &txid, &e + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&txinfo)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetTransactionUnconfirmedRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let txinfo: UnconfirmedTransactionResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(txinfo)?) + } +} + +impl StacksHttpRequest { + /// Make a new get-unconfirmed-tx request + pub fn new_gettransaction_unconfirmed(host: PeerHost, txid: Txid) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/transactions/unconfirmed/{}", &txid), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_gettransaction_unconfirmed( + self, + ) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let txinfo: UnconfirmedTransactionResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(txinfo) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs new file mode 100644 index 0000000000..3eaa6148d2 --- /dev/null +++ b/stackslib/src/net/api/mod.rs @@ -0,0 +1,126 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::convert::From; + +use clarity::vm::costs::ExecutionCost; +use stacks_common::codec::read_next; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::{StacksMicroblock, StacksTransaction}; +use crate::core::mempool; +use crate::cost_estimates::FeeRateEstimate; +use crate::net::atlas::GetAttachmentResponse; +use crate::net::http::{ + Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{StacksHttp, StacksHttpRequest, StacksHttpResponse}; +use crate::net::Error as NetError; +use crate::stacks_common::codec::StacksMessageCodec; + +pub mod callreadonly; +pub mod getaccount; +pub mod getattachment; +pub mod getattachmentsinv; +pub mod getblock; +pub mod getconstantval; +pub mod getcontractabi; +pub mod getcontractsrc; +pub mod getdatavar; +pub mod getheaders; +pub mod getinfo; +pub mod getistraitimplemented; +pub mod getmapentry; +pub mod getmicroblocks_confirmed; +pub mod getmicroblocks_indexed; +pub mod getmicroblocks_unconfirmed; +pub mod getneighbors; +pub mod getpoxinfo; +pub mod getstackerdbchunk; +pub mod getstackerdbmetadata; +pub mod getstxtransfercost; +pub mod gettransaction_unconfirmed; +pub mod postblock; +pub mod postfeerate; +pub mod postmempoolquery; +pub mod postmicroblock; +pub mod poststackerdbchunk; +pub mod posttransaction; + +#[cfg(test)] +mod tests; + +impl StacksHttp { + /// Register all RPC methods. + /// Put your new RPC method handlers here. + pub fn register_rpc_methods(&mut self) { + self.register_rpc_endpoint(callreadonly::RPCCallReadOnlyRequestHandler::new( + self.maximum_call_argument_size, + self.read_only_call_limit.clone(), + )); + self.register_rpc_endpoint(getaccount::RPCGetAccountRequestHandler::new()); + self.register_rpc_endpoint(getattachment::RPCGetAttachmentRequestHandler::new()); + self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); + self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); + self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); + self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); + self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); + self.register_rpc_endpoint(getdatavar::RPCGetDataVarRequestHandler::new()); + self.register_rpc_endpoint(getheaders::RPCHeadersRequestHandler::new()); + self.register_rpc_endpoint(getinfo::RPCPeerInfoRequestHandler::new()); + self.register_rpc_endpoint( + getistraitimplemented::RPCGetIsTraitImplementedRequestHandler::new(), + ); + self.register_rpc_endpoint(getmapentry::RPCGetMapEntryRequestHandler::new()); + self.register_rpc_endpoint( + getmicroblocks_confirmed::RPCMicroblocksConfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint( + getmicroblocks_indexed::RPCMicroblocksIndexedRequestHandler::new(), + ); + self.register_rpc_endpoint( + getmicroblocks_unconfirmed::RPCMicroblocksUnconfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint(getneighbors::RPCNeighborsRequestHandler::new()); + self.register_rpc_endpoint(getstxtransfercost::RPCGetStxTransferCostRequestHandler::new()); + self.register_rpc_endpoint(getstackerdbchunk::RPCGetStackerDBChunkRequestHandler::new()); + self.register_rpc_endpoint(getpoxinfo::RPCPoxInfoRequestHandler::new()); + self.register_rpc_endpoint( + getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), + ); + self.register_rpc_endpoint( + gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), + ); + self.register_rpc_endpoint(postblock::RPCPostBlockRequestHandler::new()); + self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); + self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); + self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); + self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); + self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); + } +} + +/// Helper conversion for NetError to Error +impl From for Error { + fn from(e: NetError) -> Error { + match e { + NetError::Http(e) => e, + x => Error::AppError(format!("{:?}", &x)), + } + } +} diff --git a/stackslib/src/net/api/postblock.rs b/stackslib/src/net/api/postblock.rs new file mode 100644 index 0000000000..3380102101 --- /dev/null +++ b/stackslib/src/net/api/postblock.rs @@ -0,0 +1,292 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::costs::ExecutionCost; +use regex::{Captures, Regex}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{hex_bytes, Hash160, Sha256Sum}; +use stacks_common::util::retry::BoundReader; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + StacksBlock, StacksBlockHeader, StacksTransaction, TransactionPayload, +}; +use crate::core::mempool::MemPoolDB; +use crate::cost_estimates::FeeRateEstimate; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::relay::Relayer; +use crate::net::{ + Attachment, BlocksData, BlocksDatum, Error as NetError, StacksMessageType, StacksNodeState, +}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StacksBlockAcceptedData { + pub stacks_block_id: StacksBlockId, + pub accepted: bool, +} + +#[derive(Clone)] +pub struct RPCPostBlockRequestHandler { + pub block: Option, + pub consensus_hash: Option, +} + +impl RPCPostBlockRequestHandler { + pub fn new() -> Self { + Self { + block: None, + consensus_hash: None, + } + } + + /// Decode a bare block from the body + fn parse_postblock_octets(mut body: &[u8]) -> Result { + let block = StacksBlock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(block) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostBlockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/blocks/upload/(?P[0-9a-f]{40})$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock takes application/octet-stream".to_string(), + )); + } + + let consensus_hash = request::get_consensus_hash(captures, "consensus_hash")?; + let block = Self::parse_postblock_octets(body)?; + + self.consensus_hash = Some(consensus_hash); + self.block = Some(block); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostBlockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.consensus_hash = None; + self.block = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // get out the request body + let block = self + .block + .take() + .ok_or(NetError::SendError("`block` not set".into()))?; + let consensus_hash = self + .consensus_hash + .take() + .ok_or(NetError::SendError("`consensus_hash` not set".into()))?; + + let block_hash = block.block_hash(); + + let data_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + match SortitionDB::get_sortition_id_by_consensus(&sortdb.conn(), &consensus_hash) { + Ok(Some(_)) => { + // we recognize this consensus hash + let ic = sortdb.index_conn(); + match Relayer::process_new_anchored_block( + &ic, + chainstate, + &consensus_hash, + &block, + 0, + ) { + Ok(accepted) => { + debug!( + "{} Stacks block {}/{}", + if accepted { + "Accepted" + } else { + "Did not accept" + }, + &consensus_hash, + &block_hash, + ); + return Ok(accepted); + } + Err(e) => { + let msg = format!( + "Failed to process anchored block {}/{}: {:?}", + consensus_hash, + &block.block_hash(), + &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + } + } + Ok(None) => { + let msg = format!( + "Unrecognized consensus hash {} for block {}", + consensus_hash, + &block.block_hash() + ); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query sortition ID by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + } + }); + + let data_resp = match data_resp { + Ok(accepted) => StacksBlockAcceptedData { + accepted, + stacks_block_id: StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block_hash, + ), + }, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if data_resp.accepted { + node.set_relay_message(StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostBlockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let accepted: StacksBlockAcceptedData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(accepted)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-block request + pub fn new_post_block( + host: PeerHost, + ch: ConsensusHash, + block: StacksBlock, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!("/v2/blocks/upload/{}", &ch), + HttpRequestContents::new().payload_stacks(&block), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_block_accepted(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let result: StacksBlockAcceptedData = serde_json::from_value(response_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(result) + } +} diff --git a/stackslib/src/net/api/postfeerate.rs b/stackslib/src/net/api/postfeerate.rs new file mode 100644 index 0000000000..42fbfbc347 --- /dev/null +++ b/stackslib/src/net/api/postfeerate.rs @@ -0,0 +1,301 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::costs::ExecutionCost; +use regex::{Captures, Regex}; +use stacks_common::codec::{StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{hex_bytes, Hash160, Sha256Sum}; +use stacks_common::util::retry::BoundReader; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::mempool::MemPoolDB; +use crate::cost_estimates::FeeRateEstimate; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Serialize, Deserialize)] +pub struct FeeRateEstimateRequestBody { + #[serde(default)] + pub estimated_len: Option, + pub transaction_payload: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCFeeEstimate { + pub fee_rate: f64, + pub fee: u64, +} + +impl RPCFeeEstimate { + pub fn estimate_fees(scalar: u64, fee_rates: FeeRateEstimate) -> Vec { + let estimated_fees_f64 = fee_rates.clone() * (scalar as f64); + vec![ + RPCFeeEstimate { + fee: estimated_fees_f64.low as u64, + fee_rate: fee_rates.low, + }, + RPCFeeEstimate { + fee: estimated_fees_f64.middle as u64, + fee_rate: fee_rates.middle, + }, + RPCFeeEstimate { + fee: estimated_fees_f64.high as u64, + fee_rate: fee_rates.high, + }, + ] + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RPCFeeEstimateResponse { + pub estimated_cost: ExecutionCost, + pub estimated_cost_scalar: u64, + pub estimations: Vec, + pub cost_scalar_change_by_byte: f64, +} + +#[derive(Clone)] +pub struct RPCPostFeeRateRequestHandler { + pub estimated_len: Option, + pub transaction_payload: Option, +} +impl RPCPostFeeRateRequestHandler { + pub fn new() -> Self { + Self { + estimated_len: None, + transaction_payload: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostFeeRateRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/fees/transaction$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + let content_len = preamble.get_content_length(); + if !(content_len > 0 && content_len < MAX_PAYLOAD_LEN) { + return Err(Error::DecodeError(format!( + "Invalid Http request: invalid body length for FeeRateEstimate ({})", + content_len + ))); + } + + if preamble.content_type != Some(HttpContentType::JSON) { + return Err(Error::DecodeError( + "Invalid content-type: expected application/json".to_string(), + )); + } + + let body: FeeRateEstimateRequestBody = serde_json::from_slice(body) + .map_err(|e| Error::DecodeError(format!("Failed to parse JSON body: {}", e)))?; + + let payload_hex = if body.transaction_payload.starts_with("0x") { + &body.transaction_payload[2..] + } else { + &body.transaction_payload + }; + + let payload_data = hex_bytes(payload_hex).map_err(|_e| { + Error::DecodeError("Bad hex string supplied for transaction payload".into()) + })?; + + let tx = TransactionPayload::consensus_deserialize(&mut payload_data.as_slice())?; + let estimated_len = + std::cmp::max(body.estimated_len.unwrap_or(0), payload_data.len() as u64); + + self.transaction_payload = Some(tx); + self.estimated_len = Some(estimated_len); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostFeeRateRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.estimated_len = None; + self.transaction_payload = None; + } + + /// Make the response + /// TODO: accurately estimate the cost/length fee for token transfers, based on mempool + /// pressure. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let estimated_len = self + .estimated_len + .take() + .ok_or(NetError::SendError("`estimated_len` not set".into()))?; + let tx = self + .transaction_payload + .take() + .ok_or(NetError::SendError("`transaction_payload` not set".into()))?; + + let data_resp = + node.with_node_state(|_network, sortdb, _chainstate, _mempool, rpc_args| { + let tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, tip.block_height)?; + + if let Some((cost_estimator, fee_estimator, metric)) = rpc_args.get_estimators_ref() + { + let estimated_cost = cost_estimator + .estimate_cost(&tx, &stacks_epoch.epoch_id) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate tx {}: {:?}", + &tx.name(), + &e + )), + ) + })?; + + let scalar_cost = metric.from_cost_and_len( + &estimated_cost, + &stacks_epoch.block_limit, + estimated_len, + ); + let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate fees for tx {}: {:?}", + &tx.name(), + &e + )), + ) + })?; + + let mut estimations = + RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); + + let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; + + for estimate in estimations.iter_mut() { + if estimate.fee < minimum_fee { + estimate.fee = minimum_fee; + } + } + + Ok(RPCFeeEstimateResponse { + estimated_cost, + estimations, + estimated_cost_scalar: scalar_cost, + cost_scalar_change_by_byte: metric.change_per_byte(), + }) + } else { + debug!("Fee and cost estimation not configured on this stacks node"); + Err(StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new( + "Fee estimation not supported on this node".to_string(), + ), + )) + } + }); + + let data_resp = match data_resp { + Ok(data) => data, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostFeeRateRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let fee: RPCFeeEstimateResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(fee)?) + } +} + +impl StacksHttpResponse { + pub fn decode_fee_estimate(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let fee: RPCFeeEstimateResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(fee) + } +} + +impl StacksHttpRequest { + pub fn new_post_fee_rate( + host: PeerHost, + fee_request: FeeRateEstimateRequestBody, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/fees/transaction".into(), + HttpRequestContents::new().payload_json( + serde_json::to_value(fee_request) + .expect("FATAL: failed to encode fee rate request to JSON"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs new file mode 100644 index 0000000000..0ad3168661 --- /dev/null +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -0,0 +1,360 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use rand::{thread_rng, Rng}; +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use url::form_urlencoded; +use {serde, serde_json}; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksTransaction}; +use crate::core::mempool::{decode_tx_stream, MemPoolDB, MemPoolSyncData}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCMempoolQueryRequestHandler { + pub page_id: Option, + pub mempool_query: Option, +} + +impl RPCMempoolQueryRequestHandler { + pub fn new() -> Self { + Self { + page_id: None, + mempool_query: None, + } + } + + /// Obtain the mempool page_id query string, if it is present + fn get_page_id_query(&self, query: Option<&str>) -> Option { + match query { + Some(query_string) => { + for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { + if key != "page_id" { + continue; + } + if let Ok(page_id) = Txid::from_hex(&value) { + return Some(page_id); + } + } + return None; + } + None => { + return None; + } + } + } +} + +#[derive(Debug)] +pub struct StacksMemPoolStream { + /// Mempool sync data requested + pub tx_query: MemPoolSyncData, + /// last txid loaded + pub last_randomized_txid: Txid, + /// number of transactions visited in the DB so far + pub num_txs: u64, + /// maximum we can visit in the query + pub max_txs: u64, + /// height of the chain at time of query + pub height: u64, + /// Are we done sending transactions, and are now in the process of sending the trailing page + /// ID? + pub corked: bool, + /// Did we run out of transactions to send? + pub finished: bool, + /// link to the mempool DB + mempool_db: DBConn, +} + +impl StacksMemPoolStream { + pub fn new( + mempool_db: DBConn, + tx_query: MemPoolSyncData, + max_txs: u64, + height: u64, + page_id_opt: Option, + ) -> Self { + let last_randomized_txid = page_id_opt.unwrap_or_else(|| { + let random_bytes = thread_rng().gen::<[u8; 32]>(); + Txid(random_bytes) + }); + + Self { + tx_query, + last_randomized_txid: last_randomized_txid, + num_txs: 0, + max_txs: max_txs, + height: height, + corked: false, + finished: false, + mempool_db, + } + } +} + +impl HttpChunkGenerator for StacksMemPoolStream { + fn hint_chunk_size(&self) -> usize { + 4096 + } + + fn generate_next_chunk(&mut self) -> Result, String> { + if self.corked { + test_debug!( + "Finished streaming txs; last page was {:?}", + &self.last_randomized_txid + ); + return Ok(vec![]); + } + + if self.num_txs >= self.max_txs || self.finished { + test_debug!( + "Finished sending transactions after {:?}. Corking tx stream.", + &self.last_randomized_txid + ); + + // cork the stream -- send the next page_id the requester should use to continue + // streaming. + self.corked = true; + return Ok(self.last_randomized_txid.serialize_to_vec()); + } + + let remaining = self.max_txs.saturating_sub(self.num_txs); + let (next_txs, next_last_randomized_txid_opt, num_rows_visited) = + MemPoolDB::static_find_next_missing_transactions( + &self.mempool_db, + &self.tx_query, + self.height, + &self.last_randomized_txid, + 1, + remaining, + ) + .map_err(|e| format!("Failed to find next missing transactions: {:?}", &e))?; + + debug!( + "Streaming mempool propagation stepped"; + "rows_visited" => num_rows_visited, + "last_rand_txid" => %self.last_randomized_txid, + "num_txs" => self.num_txs, + "max_txs" => self.max_txs + ); + + if next_txs.len() > 0 { + // have another tx to send + let chunk = next_txs[0].serialize_to_vec(); + if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { + // we have more after this + self.last_randomized_txid = next_last_randomized_txid; + } else { + // that was the last transaction. + // next call will cork the stream + self.finished = true; + } + self.num_txs += next_txs.len() as u64; + return Ok(chunk); + } else if let Some(next_txid) = next_last_randomized_txid_opt { + // no more txs to send + test_debug!( + "No rows returned for {}; cork tx stream with next page {}", + &self.last_randomized_txid, + &next_txid + ); + + // send the page ID as the final chunk + let chunk = next_txid.serialize_to_vec(); + self.finished = true; + self.corked = true; + return Ok(chunk); + } else { + test_debug!( + "No more txs to send after {:?}; corking stream", + &self.last_randomized_txid + ); + + // no more transactions, and none after this + self.finished = true; + self.corked = true; + return Ok(vec![]); + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCMempoolQueryRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/mempool/query$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected nonzero body length".to_string(), + )); + } + + let mut body_ptr = body; + let mempool_body = MemPoolSyncData::consensus_deserialize(&mut body_ptr)?; + + self.mempool_query = Some(mempool_body); + if let Some(page_id) = self.get_page_id_query(query) { + self.page_id = Some(page_id); + } + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCMempoolQueryRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.mempool_query = None; + self.page_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let mempool_query = self + .mempool_query + .take() + .ok_or(NetError::SendError("`mempool_query` not set".into()))?; + let page_id = self.page_id.take(); + + let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { + let height = self.get_stacks_chain_tip(&preamble, sortdb, chainstate).map(|blk| blk.height).unwrap_or(0); + let max_txs = network.connection_opts.mempool_max_tx_query; + debug!( + "Begin mempool query"; + "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), + "block_height" => height, + "max_txs" => max_txs + ); + + let mempool_db = match mempool.reopen(false) { + Ok(db) => db, + Err(e) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to open mempool DB: {:?}", &e)))); + } + }; + + Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, height, page_id)) + }); + + let stream = match stream_res { + Ok(stream) => stream, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCMempoolQueryRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +impl StacksHttpRequest { + pub fn new_mempool_query( + host: PeerHost, + query: MemPoolSyncData, + page_id_opt: Option, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/mempool/query".into(), + if let Some(page_id) = page_id_opt { + HttpRequestContents::new() + .query_arg("page_id".into(), format!("{}", &page_id)) + .payload_stacks(&query) + } else { + HttpRequestContents::new().payload_stacks(&query) + }, + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response body into the transactions and next-page ID returned from + /// /v2/mempool/query. + pub fn decode_mempool_txs_page( + self, + ) -> Result<(Vec, Option), NetError> { + let contents = self.get_http_payload_ok()?; + let raw_bytes: Vec = contents.try_into()?; + let (txs, page_id_opt) = decode_tx_stream(&mut &raw_bytes[..])?; + Ok((txs, page_id_opt)) + } +} diff --git a/stackslib/src/net/api/postmicroblock.rs b/stackslib/src/net/api/postmicroblock.rs new file mode 100644 index 0000000000..0ff17d724e --- /dev/null +++ b/stackslib/src/net/api/postmicroblock.rs @@ -0,0 +1,272 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::costs::ExecutionCost; +use regex::{Captures, Regex}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{hex_bytes, Hash160, Sha256Sum}; +use stacks_common::util::retry::BoundReader; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as ChainError, StacksBlockHeader, StacksMicroblock, StacksTransaction, TransactionPayload, +}; +use crate::core::mempool::MemPoolDB; +use crate::cost_estimates::FeeRateEstimate; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::relay::Relayer; +use crate::net::{ + Attachment, Error as NetError, MicroblocksData, StacksMessageType, StacksNodeState, TipRequest, +}; + +#[derive(Clone)] +pub struct RPCPostMicroblockRequestHandler { + pub microblock: Option, +} + +impl RPCPostMicroblockRequestHandler { + pub fn new() -> Self { + Self { microblock: None } + } + + /// Decode a bare block from the body + fn parse_postmicroblock_octets(mut body: &[u8]) -> Result { + let mblock = StacksMicroblock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted microblock: {}", msg)) + } else { + e.into() + } + })?; + Ok(mblock) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostMicroblockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/microblocks$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostMicroblock" + .to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostMicroblock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostMicroblock takes application/octet-stream".to_string(), + )); + } + + let microblock = Self::parse_postmicroblock_octets(&body)?; + self.microblock = Some(microblock); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostMicroblockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.microblock = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let microblock = self + .microblock + .take() + .ok_or(NetError::SendError("`microblock` not set".into()))?; + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let stacks_tip = match StacksChainState::load_staging_block_info(chainstate.db(), &tip) { + Ok(Some(tip_info)) => tip_info, + Ok(None) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpNotFound::new("No such stacks tip".into()))); + }, + Err(e) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)))); + } + }; + + let consensus_hash = &stacks_tip.consensus_hash; + let block_hash = &stacks_tip.anchored_block_hash; + + // make sure we can accept this + let ch_sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) { + Ok(Some(sn)) => sn, + Ok(None) => { + return Err(StacksHttpResponse::new_error(&preamble, &HttpNotFound::new("No such snapshot for Stacks tip consensus hash".to_string()))); + } + Err(e) => { + debug!("No block snapshot for consensus hash {}", &consensus_hash); + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(ChainError::DBError(e).into_json()))); + } + }; + + let sort_handle = sortdb.index_handle(&ch_sn.sortition_id); + let parent_block_snapshot = Relayer::get_parent_stacks_block_snapshot(&sort_handle, consensus_hash, block_hash) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load parent block for Stacks tip: {:?}", &e))))?; + + let ast_rules = SortitionDB::get_ast_rules(&sort_handle, parent_block_snapshot.block_height) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load AST rules for Bitcoin block height {}: {:?}", parent_block_snapshot.block_height, &e))))?; + + let epoch_id = self.get_stacks_epoch(&preamble, sortdb, parent_block_snapshot.block_height)?.epoch_id; + + if !Relayer::static_check_problematic_relayed_microblock( + chainstate.mainnet, + epoch_id, + µblock, + ast_rules, + ) { + info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", µblock.block_hash(), consensus_hash, &block_hash); + + // NOTE: txid is ignored in chainstate error .into_json() + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(ChainError::ProblematicTransaction(Txid([0x00; 32])).into_json()))); + } + + match chainstate.preprocess_streamed_microblock(consensus_hash, block_hash, µblock) { + Ok(accepted) => { + debug!("{} uploaded microblock {}/{}-{}", + if accepted { "Accepted" } else { "Did not accept" }, + consensus_hash, + block_hash, + µblock.block_hash() + ); + return Ok((accepted, StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash))); + }, + Err(e) => { + debug!("Failed to process microblock {}/{}-{}: {:?}", &consensus_hash, &block_hash, µblock.block_hash(), &e); + return Err(StacksHttpResponse::new_error(&preamble, &HttpBadRequest::new_json(e.into_json()))); + } + } + }); + + let (accepted, parent_block_id, data_resp) = match data_resp { + Ok((accepted, parent_block_id)) => (accepted, parent_block_id, microblock.block_hash()), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if accepted { + node.set_relay_message(StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: parent_block_id, + microblocks: vec![microblock], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostMicroblockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let mblock_hash: BlockHeaderHash = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(mblock_hash)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-microblock request + pub fn new_post_microblock( + host: PeerHost, + mblock: StacksMicroblock, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/microblocks".into(), + HttpRequestContents::new() + .payload_stacks(&mblock) + .for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_stacks_microblock_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let result: BlockHeaderHash = serde_json::from_value(response_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(result) + } +} diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs new file mode 100644 index 0000000000..190ba1f710 --- /dev/null +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -0,0 +1,344 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs::OpenOptions; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::{ + CLARITY_NAME_REGEX, CONTRACT_NAME_REGEX_STRING, PRINCIPAL_DATA_REGEX_STRING, + STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::{ClarityName, ContractName}; +use libstackerdb::{ + SlotMetadata, StackerDBChunkAckData, StackerDBChunkData, STACKERDB_MAX_CHUNK_SIZE, +}; +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::MessageSignature; +use {serde, serde_json}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainError, StacksBlock}; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{ + Error as NetError, StackerDBPushChunkData, StacksMessageType, StacksNodeState, TipRequest, +}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCPostStackerDBChunkRequestHandler { + pub contract_identifier: Option, + pub chunk: Option, +} +impl RPCPostStackerDBChunkRequestHandler { + pub fn new() -> Self { + Self { + contract_identifier: None, + chunk: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostStackerDBChunkRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^/v2/stackerdb/(?P
{})/(?P{})/chunks$"#, + *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-empty body".to_string(), + )); + } + + if preamble.get_content_length() > MAX_MESSAGE_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostStackerDBChunk body is too big".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + let chunk: StackerDBChunkData = serde_json::from_slice(body).map_err(Error::JsonError)?; + + self.contract_identifier = Some(contract_identifier); + self.chunk = Some(chunk); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum StackerDBErrorCodes { + DataAlreadyExists, + NoSuchSlot, +} + +impl StackerDBErrorCodes { + pub fn code(&self) -> u32 { + match self { + Self::DataAlreadyExists => 0, + Self::NoSuchSlot => 1, + } + } + + pub fn reason(&self) -> &'static str { + match self { + Self::DataAlreadyExists => "Data for this slot and version already exist", + Self::NoSuchSlot => "No such StackerDB slot", + } + } + + pub fn into_json(self) -> serde_json::Value { + json!({ + "code": self.code(), + "message": format!("{:?}", &self), + "reason": self.reason() + }) + } +} + +impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.chunk = None; + } + + /// Make the response. + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self + .contract_identifier + .take() + .ok_or(NetError::SendError("`contract_identifier` not set".into()))?; + let stackerdb_chunk = self + .chunk + .take() + .ok_or(NetError::SendError("`chunk` not set".into()))?; + + let ack_resp = + node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { + let tx = if let Ok(tx) = network.stackerdbs_tx_begin(&contract_identifier) { + tx + } else { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB not found".to_string()), + )); + }; + if let Err(_e) = tx.get_stackerdb_id(&contract_identifier) { + // shouldn't be necessary (this is checked against the peer network's configured DBs), + // but you never know. + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("StackerDB not found".to_string()), + )); + } + if let Err(_e) = tx.try_replace_chunk( + &contract_identifier, + &stackerdb_chunk.get_slot_metadata(), + &stackerdb_chunk.data, + ) { + let slot_metadata_opt = + match tx.get_slot_metadata(&contract_identifier, stackerdb_chunk.slot_id) { + Ok(slot_opt) => slot_opt, + Err(e) => { + // some other error + error!("Failed to load replaced StackerDB chunk metadata"; + "smart_contract_id" => contract_identifier.to_string(), + "error" => format!("{:?}", &e) + ); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to load StackerDB chunk for {}: {:?}", + &contract_identifier, &e + )), + )); + } + }; + + let (reason, slot_metadata_opt) = if let Some(slot_metadata) = slot_metadata_opt + { + ( + serde_json::to_string( + &StackerDBErrorCodes::DataAlreadyExists.into_json(), + ) + .unwrap_or("(unable to encode JSON)".to_string()), + Some(slot_metadata), + ) + } else { + ( + serde_json::to_string(&StackerDBErrorCodes::NoSuchSlot.into_json()) + .unwrap_or("(unable to encode JSON)".to_string()), + None, + ) + }; + + let ack = StackerDBChunkAckData { + accepted: false, + reason: Some(reason), + metadata: slot_metadata_opt, + }; + return Ok(ack); + } + + let slot_metadata = if let Ok(Some(md)) = + tx.get_slot_metadata(&contract_identifier, stackerdb_chunk.slot_id) + { + md + } else { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new( + "Failed to load slot metadata after storing chunk".to_string(), + ), + )); + }; + + if let Err(e) = tx.commit() { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to commit StackerDB tx: {:?}", &e)), + )); + } + + // success! + let ack = StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: Some(slot_metadata), + }; + + return Ok(ack); + }); + + let ack_resp = match ack_resp { + Ok(ack) => ack, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + if ack_resp.accepted { + let push_chunk_data = StackerDBPushChunkData { + contract_id: contract_identifier, + rc_consensus_hash: node.with_node_state(|network, _, _, _, _| { + network.get_chain_view().rc_consensus_hash.clone() + }), + chunk_data: stackerdb_chunk, + }; + node.set_relay_message(StacksMessageType::StackerDBPushChunk(push_chunk_data)); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&ack_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostStackerDBChunkRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let ack: StackerDBChunkAckData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(ack)?) + } +} + +impl StacksHttpRequest { + pub fn new_post_stackerdb_chunk( + host: PeerHost, + stackerdb_contract_id: QualifiedContractIdentifier, + slot_id: u32, + slot_version: u32, + sig: MessageSignature, + data: Vec, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + format!( + "/v2/stackerdb/{}/{}/chunks", + &stackerdb_contract_id.issuer, &stackerdb_contract_id.name + ), + HttpRequestContents::new().payload_json( + serde_json::to_value(StackerDBChunkData { + slot_id, + slot_version, + sig, + data, + }) + .expect("FATAL: failed to construct JSON from infallible structure"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + /// Decode an HTTP response into a chunk + /// If it fails, return Self::Error(..) + pub fn decode_stackerdb_chunk_ack(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let data: StackerDBChunkAckData = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(data) + } +} diff --git a/stackslib/src/net/api/posttransaction.rs b/stackslib/src/net/api/posttransaction.rs new file mode 100644 index 0000000000..655a3a221a --- /dev/null +++ b/stackslib/src/net/api/posttransaction.rs @@ -0,0 +1,366 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Write}; + +use clarity::vm::costs::ExecutionCost; +use regex::{Captures, Regex}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha256Sum}; +use stacks_common::util::retry::BoundReader; + +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{StacksTransaction, TransactionPayload}; +use crate::core::mempool::MemPoolDB; +use crate::cost_estimates::FeeRateEstimate; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, + HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::relay::Relayer; +use crate::net::{Attachment, Error as NetError, StacksMessageType, StacksNodeState}; + +#[derive(Serialize, Deserialize)] +pub struct PostTransactionRequestBody { + pub tx: String, + pub attachment: Option, +} + +#[derive(Clone)] +pub struct RPCPostTransactionRequestHandler { + pub tx: Option, + pub attachment: Option, +} +impl RPCPostTransactionRequestHandler { + pub fn new() -> Self { + Self { + tx: None, + attachment: None, + } + } + + /// Decode a bare transaction from the body + fn parse_posttransaction_octets(mut body: &[u8]) -> Result { + let tx = StacksTransaction::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(tx) + } + + /// Decode a JSON-encoded transaction and Atlas attachment pair + fn parse_posttransaction_json( + body: &[u8], + ) -> Result<(StacksTransaction, Option), Error> { + let body: PostTransactionRequestBody = serde_json::from_slice(body) + .map_err(|_e| Error::DecodeError("Failed to parse body".into()))?; + + let tx = { + let tx_bytes = hex_bytes(&body.tx) + .map_err(|_e| Error::DecodeError("Failed to parse tx".into()))?; + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + }) + }?; + + let attachment = match body.attachment { + None => None, + Some(ref attachment_content) => { + let content = hex_bytes(attachment_content) + .map_err(|_e| Error::DecodeError("Failed to parse attachment".into()))?; + Some(Attachment::new(content)) + } + }; + + Ok((tx, attachment)) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostTransactionRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/transactions$"#).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostTransaction" + .to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostTransaction body is too big".to_string(), + )); + } + + match preamble.content_type { + None => { + return Err(Error::DecodeError( + "Missing Content-Type for transaction".to_string(), + )); + } + Some(HttpContentType::Bytes) => { + // expect a bare transaction + let tx = Self::parse_posttransaction_octets(body)?; + self.tx = Some(tx); + self.attachment = None; + } + Some(HttpContentType::JSON) => { + // expect a transaction and an attachment + let (tx, attachment_opt) = Self::parse_posttransaction_json(body)?; + self.tx = Some(tx); + self.attachment = attachment_opt; + } + _ => { + return Err(Error::DecodeError( + "Wrong Content-Type for transaction; expected application/json".to_string(), + )); + } + } + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostTransactionRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.tx = None; + self.attachment = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tx = self + .tx + .take() + .ok_or(NetError::SendError("`tx` not set".into()))?; + let attachment_opt = self.attachment.take(); + + let txid = tx.txid(); + + let data_resp = node.with_node_state(|network, sortdb, chainstate, mempool, rpc_args| { + if mempool.has_tx(&txid) { + // will not accept + debug!("Mempool already has POSTed transaction {}", &txid); + return Ok(false); + } + + let event_observer = rpc_args.event_observer.as_deref(); + let burn_tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, burn_tip.block_height)?; + + // check for defects which can be determined statically + if Relayer::do_static_problematic_checks() + && !Relayer::static_check_problematic_relayed_tx( + chainstate.mainnet, + stacks_epoch.epoch_id, + &tx, + network.ast_rules, + ) + .is_ok() + { + // we statically check the tx for known problems, and it had some. Reject. + debug!( + "Transaction {} is problematic in rules {:?}; will not store or relay", + &tx.txid(), + network.ast_rules + ); + return Ok(false); + } + + let stacks_tip = self.get_stacks_chain_tip(&preamble, sortdb, chainstate)?; + + // accept to mempool + if let Err(e) = mempool.submit( + chainstate, + sortdb, + &stacks_tip.consensus_hash, + &stacks_tip.anchored_block_hash, + &tx, + event_observer, + &stacks_epoch.block_limit, + &stacks_epoch.epoch_id, + ) { + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new_json(e.into_json(&txid)), + )); + }; + + // store attachment as well, if it's part of a contract-call + if let Some(ref attachment) = attachment_opt { + if let TransactionPayload::ContractCall(ref contract_call) = tx.payload { + if network + .get_atlasdb() + .should_keep_attachment(&contract_call.to_clarity_contract_id(), attachment) + { + network + .get_atlasdb_mut() + .insert_uninstantiated_attachment(attachment) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!( + "Failed to store contract-call attachment: {:?}", + &e + )), + ) + })?; + } + } + } + + Ok(true) + }); + + let (accepted, txid) = match data_resp { + Ok(accepted) => (accepted, txid), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + // don't forget to forward this to the p2p network! + if accepted { + node.set_relay_message(StacksMessageType::Transaction(tx)); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&txid)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostTransactionRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let txid: Txid = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(txid)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-transaction request + pub fn new_post_transaction(host: PeerHost, tx: StacksTransaction) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/transactions".to_string(), + HttpRequestContents::new().payload_stacks(&tx), + ) + .expect("FATAL: failed to construct request from infallible data") + } + + /// Make a new post-transaction request with an attachment + pub fn new_post_transaction_with_attachment( + host: PeerHost, + tx: StacksTransaction, + attachment: Option>, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + "/v2/transactions".to_string(), + HttpRequestContents::new().payload_json( + serde_json::to_value(PostTransactionRequestBody { + tx: to_hex(&tx.serialize_to_vec()), + attachment: attachment.map(|bytes| to_hex(&bytes)), + }) + .expect("FATAL: failed to construct request from infallible data"), + ), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + #[cfg(test)] + pub fn new_posttransaction(txid: Txid, with_content_length: bool) -> StacksHttpResponse { + let value = serde_json::to_value(txid).expect("FATAL: failed to serialize infallible data"); + let length = serde_json::to_string(&value) + .expect("FATAL: failed to serialize infallible data") + .len(); + let preamble = HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + if with_content_length { + Some(length as u32) + } else { + None + }, + HttpContentType::JSON, + true, + ); + let body = HttpResponsePayload::JSON(value); + StacksHttpResponse::new(preamble, body) + } + + pub fn decode_txid(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let txid: Txid = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(txid) + } +} diff --git a/stackslib/src/net/api/tests/callreadonly.rs b/stackslib/src/net/api/tests/callreadonly.rs new file mode 100644 index 0000000000..577d2e0b12 --- /dev/null +++ b/stackslib/src/net/api/tests/callreadonly.rs @@ -0,0 +1,273 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = + callreadonly::RPCCallReadOnlyRequestHandler::new(4096, BLOCK_LIMIT_MAINNET_21); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.function, Some("ro-test".into())); + assert_eq!( + handler.sender, + Some(PrincipalData::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap()) + ); + assert_eq!(handler.sponsor, None); + assert_eq!(handler.arguments, Some(vec![])); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + // restart clears the handler state + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.function.is_none()); + assert!(handler.sender.is_none()); + assert!(handler.sponsor.is_none()); + assert!(handler.arguments.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query confirmed tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-confirmed".try_into().unwrap(), + vec![], + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query unconfirmed tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent function + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "does-not-exist".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent contract + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-test".try_into().unwrap(), + vec![], + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existent tip + let request = StacksHttpRequest::new_callreadonlyfunction( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + None, + "ro-confirmed".try_into().unwrap(), + vec![], + TipRequest::SpecificTip(StacksBlockId([0x11; 32])), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // confirmed tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(resp.okay); + assert!(resp.result.is_some()); + assert!(resp.cause.is_none()); + + // u1 + assert_eq!(resp.result.unwrap(), "0x0100000000000000000000000000000001"); + + // unconfirmed tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(resp.okay); + assert!(resp.result.is_some()); + assert!(resp.cause.is_none()); + + // (ok 1) + assert_eq!( + resp.result.unwrap(), + "0x070000000000000000000000000000000001" + ); + + // non-existent function + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(!resp.okay); + assert!(resp.result.is_none()); + assert!(resp.cause.is_some()); + + assert!(resp.cause.unwrap().find("UndefinedFunction").is_some()); + + // non-existent function + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_call_readonly_response().unwrap(); + + assert!(!resp.okay); + assert!(resp.result.is_none()); + assert!(resp.cause.is_some()); + + assert!(resp.cause.unwrap().find("NoSuchContract").is_some()); + + // non-existent tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, payload) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getaccount.rs b/stackslib/src/net/api/tests/getaccount.rs new file mode 100644 index 0000000000..996edfd3e2 --- /dev/null +++ b/stackslib/src/net/api/tests/getaccount.rs @@ -0,0 +1,206 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getaccount::RPCGetAccountRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.account, + Some(PrincipalData::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap()) + ); + + assert_eq!(&preamble, request.preamble()); + + // reset works + handler.restart(); + assert!(handler.account.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing account + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + false, + ); + requests.push(request); + + // query existing account with proof + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query nonexistant + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST165ZBV86V4NJ0V73F52YZGBMJ0FZAQ1BM43C553") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing account with unconfirmed state + let request = StacksHttpRequest::new_getaccount( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9aca00"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 2); + assert!(resp.balance_proof.is_none()); + assert!(resp.nonce_proof.is_none()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9aca00"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 2); + assert!(resp.balance_proof.is_some()); + assert!(resp.nonce_proof.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x00000000000000000000000000000000"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 0); + assert_eq!(resp.balance_proof, Some("".to_string())); + assert_eq!(resp.nonce_proof, Some("".to_string())); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_account_entry_response().unwrap(); + + assert_eq!(resp.balance, "0x0000000000000000000000003b9ac985"); + assert_eq!(resp.locked, "0x00000000000000000000000000000000"); + assert_eq!(resp.nonce, 4); + assert!(resp.balance_proof.is_some()); + assert!(resp.nonce_proof.is_some()); +} diff --git a/stackslib/src/net/api/tests/getattachment.rs b/stackslib/src/net/api/tests/getattachment.rs new file mode 100644 index 0000000000..aa409aea42 --- /dev/null +++ b/stackslib/src/net/api/tests/getattachment.rs @@ -0,0 +1,113 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::Hash160; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{Attachment, ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getattachment(addr.into(), Hash160([0x11; 20])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getattachment::RPCGetAttachmentRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.attachment_hash, Some(Hash160([0x11; 20]))); + + assert_eq!(&preamble, request.preamble()); + + // restart works + handler.restart(); + assert!(handler.attachment_hash.is_none()); +} + +#[test] +fn test_try_make_response() { + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + let attachment_hash = attachment.hash(); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing attachment + let request = StacksHttpRequest::new_getattachment(addr.into(), attachment_hash.clone()); + requests.push(request); + + // query non-existant + let request = StacksHttpRequest::new_getattachment(addr.into(), Hash160([0x22; 20])); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_atlas_get_attachment().unwrap(); + assert_eq!(resp.attachment, attachment); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getattachmentsinv.rs b/stackslib/src/net/api/tests/getattachmentsinv.rs new file mode 100644 index 0000000000..c7dd8e0328 --- /dev/null +++ b/stackslib/src/net/api/tests/getattachmentsinv.rs @@ -0,0 +1,150 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::{test_rpc, TestRPC}; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{Attachment, ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let mut pages = HashSet::new(); + for i in 0..10 { + pages.insert(i); + } + + let request = + StacksHttpRequest::new_getattachmentsinv(addr.into(), StacksBlockId([0x11; 32]), pages); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.index_block_hash, Some(StacksBlockId([0x11; 32]))); + assert_eq!( + handler.page_indexes, + Some(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.index_block_hash.is_none()); + assert!(handler.page_indexes.is_none()); +} + +#[test] +fn test_try_make_response() { + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + let attachment_hash = attachment.hash(); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + + let mut requests = vec![]; + let mut pages = HashSet::new(); + pages.insert(1); + + // query existing attachment + let request = StacksHttpRequest::new_getattachmentsinv( + addr.into(), + stacks_chain_tip.clone(), + pages.clone(), + ); + requests.push(request); + + // query non-existant block + let request = StacksHttpRequest::new_getattachmentsinv( + addr.into(), + StacksBlockId([0x11; 32]), + pages.clone(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_atlas_attachments_inv_response().unwrap(); + + // there should be a bit set in the inventory vector + assert_eq!(resp.block_id, stacks_chain_tip); + assert_eq!(resp.pages.len(), 1); + assert_eq!(resp.pages[0].index, 1); + assert!(resp.pages[0].inventory.iter().find(|&&x| x == 1).is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_atlas_attachments_inv_response().unwrap(); + + // this is a HTTP 200, but no bits are set + assert_eq!(resp.block_id, StacksBlockId([0x11; 32])); + assert_eq!(resp.pages.len(), 1); + assert_eq!(resp.pages[0].index, 1); + assert!(resp.pages[0].inventory.iter().find(|&&x| x == 1).is_none()); +} diff --git a/stackslib/src/net/api/tests/getblock.rs b/stackslib/src/net/api/tests/getblock.rs new file mode 100644 index 0000000000..c873c52620 --- /dev/null +++ b/stackslib/src/net/api/tests/getblock.rs @@ -0,0 +1,189 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::net::api::getblock::StacksBlockStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getblock(addr.into(), StacksBlockId([0x11; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getblock::RPCBlocksRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.block_id, Some(StacksBlockId([0x11; 32]))); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing block + let request = StacksHttpRequest::new_getblock(addr.into(), stacks_chain_tip.clone()); + requests.push(request); + + // query non-existant block + let request = StacksHttpRequest::new_getblock(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the block + let response = responses.remove(0); + let resp = response.decode_block().unwrap(); + + assert_eq!( + StacksBlockHeader::make_index_block_hash(&consensus_hash, &resp.block_hash()), + stacks_chain_tip + ); + + // no block + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_blocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_16k_block(&privk); + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let index_block_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); + + // can't stream a non-existant block + assert!(StacksBlockStream::new(&chainstate, &index_block_header).is_err()); + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // should succeed now + let mut stream = StacksBlockStream::new(&chainstate, &index_block_header).unwrap(); + + // stream it back + let mut all_block_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + all_block_bytes.len() + ); + all_block_bytes.append(&mut next_bytes); + } + + // should decode back into the block + let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); + assert_eq!(staging_block, block); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + + // can still stream it + let mut stream = StacksBlockStream::new(&chainstate, &index_block_header).unwrap(); + + // stream from chunk store + let mut all_block_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from chunkstore; add to {} total", + next_bytes.len(), + all_block_bytes.len() + ); + all_block_bytes.append(&mut next_bytes); + } + + // should decode back into the block + let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); + assert_eq!(staging_block, block); +} diff --git a/stackslib/src/net/api/tests/getconstantval.rs b/stackslib/src/net/api/tests/getconstantval.rs new file mode 100644 index 0000000000..4e03679f1f --- /dev/null +++ b/stackslib/src/net/api/tests/getconstantval.rs @@ -0,0 +1,187 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-const".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getconstantval::RPCGetConstantValRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.constname, Some("test-const".into())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.constname.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "cst".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "cst-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant data + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getconstantval( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "cst".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_constant_val_response().unwrap(); + + assert_eq!(resp.data, "0x000000000000000000000000000000007b"); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_constant_val_response().unwrap(); + + assert_eq!(resp.data, "0x00000000000000000000000000000001c8"); + + // no such data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getcontractabi.rs b/stackslib/src/net/api/tests/getcontractabi.rs new file mode 100644 index 0000000000..a1b3738a92 --- /dev/null +++ b/stackslib/src/net/api/tests/getcontractabi.rs @@ -0,0 +1,158 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getcontractabi::RPCGetContractAbiRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getcontractabi( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_abi_response().unwrap(); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_abi_response().unwrap(); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getcontractsrc.rs b/stackslib/src/net/api/tests/getcontractsrc.rs new file mode 100644 index 0000000000..cb376b66d2 --- /dev/null +++ b/stackslib/src/net/api/tests/getcontractsrc.rs @@ -0,0 +1,167 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getcontractsrc::RPCGetContractSrcRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getcontractsrc( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_src_response().unwrap(); + assert_eq!(resp.publish_height, 1); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_contract_src_response().unwrap(); + assert_eq!(resp.publish_height, 2); + assert!(resp.marf_proof.is_some()); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getdatavar.rs b/stackslib/src/net/api/tests/getdatavar.rs new file mode 100644 index 0000000000..f4ce526fc9 --- /dev/null +++ b/stackslib/src/net/api/tests/getdatavar.rs @@ -0,0 +1,194 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-var".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getdatavar::RPCGetDataVarRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.varname, Some("test-var".into())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.varname.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "bar-unconfirmed".try_into().unwrap(), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant var + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getdatavar( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "bar".try_into().unwrap(), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0000000000000000000000000000000000"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_data_var_response().unwrap(); + assert_eq!(resp.data, "0x0100000000000000000000000000000001"); + assert!(resp.marf_proof.is_some()); + + // no such var + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getheaders.rs b/stackslib/src/net/api/tests/getheaders.rs new file mode 100644 index 0000000000..4ea4480082 --- /dev/null +++ b/stackslib/src/net/api/tests/getheaders.rs @@ -0,0 +1,400 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::net::api::getheaders::StacksHeaderStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getheaders::RPCHeadersRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.quantity, Some(2100)); + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.quantity.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing headers + let request = + StacksHttpRequest::new_getheaders(addr.into(), 2100, TipRequest::UseLatestAnchoredTip); + requests.push(request); + + // this fails if we use a microblock tip + let request = + StacksHttpRequest::new_getheaders(addr.into(), 2100, TipRequest::UseLatestUnconfirmedTip); + requests.push(request); + + // query existing headers + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(stacks_chain_tip.clone()), + ); + requests.push(request); + + // query non-existant headers + let request = StacksHttpRequest::new_getheaders( + addr.into(), + 2100, + TipRequest::SpecificTip(StacksBlockId([0x11; 32])), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_headers().unwrap(); + + assert_eq!(resp.len(), 1); + + // fails on microblock tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); + + // got the headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_headers().unwrap(); + + assert_eq!(resp.len(), 1); + + // no headers + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +fn stream_headers_to_vec(stream: &mut StacksHeaderStream) -> Vec { + let mut header_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + header_bytes.append(&mut next_bytes); + } + header_bytes +} + +#[test] +fn test_stream_getheaders() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let mut blocks: Vec = vec![]; + let mut blocks_index_hashes: Vec = vec![]; + + // make a linear stream + for i in 0..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 0 { + block.header.total_work.work = 1; + block.header.total_work.burn = 1; + } + if i > 0 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 1; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8); 20]); + let parent_consensus_hash = ConsensusHash([(i as u8); 20]); + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks.push(block); + } + + let mut blocks_fork = blocks[0..16].to_vec(); + let mut blocks_fork_index_hashes = blocks_index_hashes[0..16].to_vec(); + + // make a stream that branches off + for i in 16..32 { + let mut block = make_empty_coinbase_block(&privk); + + if i == 16 { + block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 2; + } else { + block.header.parent_block = blocks_fork.get(i - 1).unwrap().block_hash(); + block.header.total_work.work = + blocks_fork.get(i - 1).unwrap().header.total_work.work + 1; + block.header.total_work.burn = + blocks_fork.get(i - 1).unwrap().header.total_work.burn + 2; + } + + let consensus_hash = ConsensusHash([((i + 1) as u8) | 0x80; 20]); + let parent_consensus_hash = if i == 16 { + ConsensusHash([(i as u8); 20]) + } else { + ConsensusHash([(i as u8) | 0x80; 20]) + }; + + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + i as u64, + i as u64, + ); + + blocks_fork_index_hashes.push(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash(), + )); + blocks_fork.push(block); + } + + // can't stream a non-existant header + assert!(StacksHeaderStream::new(&chainstate, &StacksBlockId([0x11; 32]), 1).is_err()); + + // stream back individual headers + for i in 0..blocks.len() { + let mut stream = StacksHeaderStream::new(&chainstate, &blocks_index_hashes[i], 1).unwrap(); + let next_header_bytes = stream_headers_to_vec(&mut stream); + + test_debug!("Got {} total bytes", next_header_bytes.len()); + test_debug!( + "bytes: '{}'", + std::str::from_utf8(&next_header_bytes).unwrap() + ); + let header: Vec = + serde_json::from_reader(&mut &next_header_bytes[..]).unwrap(); + + assert_eq!(header.len(), 1); + let header = header[0].clone(); + assert_eq!(header.consensus_hash, ConsensusHash([(i + 1) as u8; 20])); + assert_eq!(header.header, blocks[i].header); + + if i > 0 { + assert_eq!(header.parent_block_id, blocks_index_hashes[i - 1]); + } + } + + // stream back a run of headers + let block_expected_headers: Vec = + blocks.iter().rev().map(|blk| blk.header.clone()).collect(); + + let block_expected_index_hashes: Vec = blocks_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + let block_fork_expected_headers: Vec = blocks_fork + .iter() + .rev() + .map(|blk| blk.header.clone()) + .collect(); + + let block_fork_expected_index_hashes: Vec = blocks_fork_index_hashes + .iter() + .rev() + .map(|idx| idx.clone()) + .collect(); + + // get them all -- ask for more than there is + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_index_hashes.last().unwrap(), 4096).unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + + eprintln!( + "headers: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), block_expected_headers.len()); + for ((i, h), eh) in headers + .iter() + .enumerate() + .zip(block_expected_headers.iter()) + { + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, ConsensusHash([(32 - i) as u8; 20])); + if i + 1 < block_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_expected_index_hashes[i + 1]); + } + } + + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_fork_index_hashes.last().unwrap(), 4096) + .unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + let fork_headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(fork_headers.len(), block_fork_expected_headers.len()); + for ((i, h), eh) in fork_headers + .iter() + .enumerate() + .zip(block_fork_expected_headers.iter()) + { + let consensus_hash = if i >= 16 { + ConsensusHash([((32 - i) as u8); 20]) + } else { + ConsensusHash([((32 - i) as u8) | 0x80; 20]) + }; + + assert_eq!(h.header, *eh); + assert_eq!(h.consensus_hash, consensus_hash); + if i + 1 < block_fork_expected_index_hashes.len() { + assert_eq!(h.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } + } + + assert_eq!(fork_headers[16..32], headers[16..32]); + + // ask for only a few + let mut stream = + StacksHeaderStream::new(&chainstate, blocks_index_hashes.last().unwrap(), 10).unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + eprintln!( + "header bytes: {}", + String::from_utf8(header_bytes.clone()).unwrap() + ); + + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_expected_index_hashes[i + 1]); + } + + // ask for only a few + let mut stream = + StacksHeaderStream::new(&chainstate, &blocks_fork_index_hashes.last().unwrap(), 10) + .unwrap(); + let header_bytes = stream_headers_to_vec(&mut stream); + let headers: Vec = + serde_json::from_reader(&mut &header_bytes[..]).unwrap(); + + assert_eq!(headers.len(), 10); + for (i, hdr) in headers.iter().enumerate() { + assert_eq!(hdr.header, block_fork_expected_headers[i]); + assert_eq!(hdr.parent_block_id, block_fork_expected_index_hashes[i + 1]); + } +} diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs new file mode 100644 index 0000000000..da1ca4ba19 --- /dev/null +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -0,0 +1,105 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getinfo(addr.into(), Some(123)); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .try_parse_request(&parsed_preamble.expect_request(), &bytes[offset..]) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + parsed_request.add_header( + "X-Canonical-Stacks-Tip-Height".to_string(), + "123".to_string(), + ); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_getinfo_compat() { + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + + // they all parse + for json_obj in &[ + &old_getinfo_json, + &getinfo_no_pubkey_json, + &getinfo_no_pubkey_hash_json, + &getinfo_full_json, + ] { + let _v: RPCPeerInfoData = serde_json::from_str(json_obj).unwrap(); + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing account + let request = StacksHttpRequest::new_getinfo(addr.into(), Some(123)); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_peer_info().unwrap(); +} diff --git a/stackslib/src/net/api/tests/getistraitimplemented.rs b/stackslib/src/net/api/tests/getistraitimplemented.rs new file mode 100644 index 0000000000..99c1f4be4f --- /dev/null +++ b/stackslib/src/net/api/tests/getistraitimplemented.rs @@ -0,0 +1,233 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed-trait-def".try_into().unwrap(), + "trait-name".try_into().unwrap(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getistraitimplemented::RPCGetIsTraitImplementedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!( + handler.trait_contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed-trait-def" + ) + .unwrap() + ) + ); + assert_eq!(handler.trait_name, Some("trait-name".into())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.trait_contract_identifier.is_none()); + assert!(handler.trait_name.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing, but does not conform + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait-2".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // query non-existant trait + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_get_is_trait_implemented( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-trait".into(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(resp.is_implemented); + + // latest data but not conforming + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(!resp.is_implemented); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_is_trait_implemented_response().unwrap(); + assert!(resp.is_implemented); + + // no such trait + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getmapentry.rs b/stackslib/src/net/api/tests/getmapentry.rs new file mode 100644 index 0000000000..d8e25cfe5f --- /dev/null +++ b/stackslib/src/net/api/tests/getmapentry.rs @@ -0,0 +1,406 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-map".into(), + Value::UInt(13), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + false, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), false); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmapentry::RPCGetMapEntryRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!(handler.map_name, Some("test-map".into())); + assert_eq!(handler.key, Some(Value::UInt(13))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.map_name.is_none()); + assert!(handler.key.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "test-map".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world-unconfirmed".try_into().unwrap(), + "test-map-unconfirmed".try_into().unwrap(), + Value::Int(3), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant map + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "does-not-exist".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getmapentry( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "does-not-exist".try_into().unwrap(), + "test-map".try_into().unwrap(), + Value::UInt(1), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // latest data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x0a0100000000000000000000000000000002"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x0a0000000000000000000000000000000004"); + assert!(resp.marf_proof.is_some()); + + // no such map (this just returns `none`) + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x09"); + assert_eq!(resp.marf_proof, Some("".to_string())); + + // no such contract (this just returns `none`) + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_map_entry_response().unwrap(); + assert_eq!(resp.data, "0x09"); + assert_eq!(resp.marf_proof, Some("".to_string())); +} + +/* +#[test] +#[ignore] +fn test_rpc_get_map_entry() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we don't set any tip parameters, and we expect that querying for map data + // against the canonical Stacks tip will succeed. + test_rpc( + function_name!(), + 40130, + 40131, + 50130, + 50131, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(123))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_get_map_entry_unconfirmed() { + // Test v2/map_entry (aka GetMapEntry) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for map data + // against the unconfirmed state will succeed. + test_rpc( + function_name!(), + 40140, + 40141, + 50140, + 50141, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let unconfirmed_tip = peer_client + .chainstate() + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::SpecificTip(unconfirmed_tip), + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(1))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_get_map_entry_use_latest_tip() { + test_rpc( + function_name!(), + 40142, + 40143, + 50142, + 50143, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let principal = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap() + .to_account_principal(); + convo_client.new_getmapentry( + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") + .unwrap(), + "hello-world".try_into().unwrap(), + "unit-map".try_into().unwrap(), + Value::Tuple( + TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) + .unwrap(), + ), + TipRequest::UseLatestAnchoredTip, + false, + ) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match http_response { + HttpResponseType::GetMapEntry(response_md, data) => { + assert_eq!( + Value::try_deserialize_hex_untyped(&data.data).unwrap(), + Value::some(Value::Tuple( + TupleData::from_data(vec![("units".into(), Value::Int(1))]) + .unwrap() + )) + .unwrap() + ); + true + } + _ => { + error!("Invalid response; {:?}", &http_response); + false + } + } + }, + ); +} +*/ diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs new file mode 100644 index 0000000000..a4eb372abf --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -0,0 +1,290 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = + StacksHttpRequest::new_getmicroblocks_confirmed(addr.into(), StacksBlockId([0x22; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_confirmed::RPCMicroblocksConfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.block_id, Some(StacksBlockId([0x22; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + // store an additional block and microblock stream, so we can fetch it. + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let parent_block = make_codec_test_block(25); + let parent_consensus_hash = ConsensusHash([0x02; 20]); + + let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); + mblocks.truncate(15); + + let mut child_block = make_codec_test_block(25); + let child_consensus_hash = ConsensusHash([0x03; 20]); + + child_block.header.parent_block = parent_block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let child_index_block_hash = + StacksBlockHeader::make_index_block_hash(&child_consensus_hash, &child_block.block_hash()); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block, + &ConsensusHash([0x01; 20]), + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + true, + ); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block, + &parent_consensus_hash, + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + &mblock, + ); + } + + set_microblocks_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + let mut requests = vec![]; + + // query existing microblock stream + let request = StacksHttpRequest::new_getmicroblocks_confirmed( + addr.into(), + child_index_block_hash.clone(), + ); + requests.push(request); + + // query non-existant microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_confirmed(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let mut resp = response.decode_microblocks().unwrap(); + + resp.reverse(); + debug!("microblocks: {:?}", &resp); + assert_eq!(resp, mblocks); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_confirmed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(5); + + let mut child_block = make_empty_coinbase_block(&privk); + child_block.header.parent_block = block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let child_consensus_hash = ConsensusHash([3u8; 20]); + + // store microblocks to staging + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + } + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // store child block to staging + store_staging_block( + &mut chainstate, + &child_consensus_hash, + &child_block, + &consensus_hash, + 1, + 2, + ); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + set_block_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for i in 0..mblocks.len() { + set_microblocks_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + &mblocks[i].block_hash(), + ); + } + + // verify that we can stream everything + let child_block_header = + StacksBlockHeader::make_index_block_hash(&child_consensus_hash, &child_block.block_hash()); + + let mut stream = + StacksIndexedMicroblockStream::new_confirmed(&chainstate, &child_block_header).unwrap(); + + let mut confirmed_mblock_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + confirmed_mblock_bytes.len() + ); + confirmed_mblock_bytes.append(&mut next_bytes); + } + + // decode stream (should be length-prefixed) + let mut confirmed_mblocks = + Vec::::consensus_deserialize(&mut &confirmed_mblock_bytes[..]).unwrap(); + + confirmed_mblocks.reverse(); + + assert_eq!(confirmed_mblocks.len(), mblocks.len()); + for i in 0..mblocks.len() { + test_debug!("check {}", i); + assert_eq!(confirmed_mblocks[i], mblocks[i]) + } +} diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs new file mode 100644 index 0000000000..0676ecc497 --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -0,0 +1,294 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::getmicroblocks_indexed::StacksIndexedMicroblockStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), StacksBlockId([0x22; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_indexed::RPCMicroblocksIndexedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.tail_microblock_id, Some(StacksBlockId([0x22; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tail_microblock_id.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + // store an additional block and microblock stream, so we can fetch it. + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + let parent_block = make_codec_test_block(25); + let parent_consensus_hash = ConsensusHash([0x02; 20]); + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_block.block_hash(), + ); + + let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); + mblocks.truncate(15); + + let mut child_block = make_codec_test_block(25); + let child_consensus_hash = ConsensusHash([0x03; 20]); + + child_block.header.parent_block = parent_block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + store_staging_block( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block, + &ConsensusHash([0x01; 20]), + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + true, + ); + + store_staging_block( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block, + &parent_consensus_hash, + 456, + 123, + ); + set_block_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + let index_microblock_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &parent_consensus_hash, + &parent_block.block_hash(), + &mblock, + ); + } + + set_microblocks_processed( + rpc_test.peer_2.chainstate(), + &child_consensus_hash, + &child_block.block_hash(), + &mblocks.last().as_ref().unwrap().block_hash(), + ); + + let mut requests = vec![]; + + // query existing microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), index_microblock_hash.clone()); + requests.push(request); + + // query non-existant microblock stream + let request = + StacksHttpRequest::new_getmicroblocks_indexed(addr.into(), StacksBlockId([0x11; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let mut resp = response.decode_microblocks().unwrap(); + + resp.reverse(); + debug!("microblocks: {:?}", &resp); + assert_eq!(resp, mblocks); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_indexed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(5); + + let mut child_block = make_empty_coinbase_block(&privk); + child_block.header.parent_block = block.block_hash(); + child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); + child_block.header.parent_microblock_sequence = + mblocks.last().as_ref().unwrap().header.sequence; + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let child_consensus_hash = ConsensusHash([3u8; 20]); + + // store microblocks to staging + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + } + + // store block to staging + store_staging_block( + &mut chainstate, + &consensus_hash, + &block, + &parent_consensus_hash, + 1, + 2, + ); + + // store child block to staging + store_staging_block( + &mut chainstate, + &child_consensus_hash, + &child_block, + &consensus_hash, + 1, + 2, + ); + + // accept it + set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); + set_block_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + true, + ); + + for i in 0..mblocks.len() { + // set different parts of this stream as confirmed + set_microblocks_processed( + &mut chainstate, + &child_consensus_hash, + &child_block.block_hash(), + &mblocks[i].block_hash(), + ); + + // verify that we can stream everything + let microblock_index_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &mblocks[i].block_hash()); + + let mut stream = + StacksIndexedMicroblockStream::new(&chainstate, µblock_index_header).unwrap(); + + let mut confirmed_mblock_bytes = vec![]; + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + confirmed_mblock_bytes.len() + ); + confirmed_mblock_bytes.append(&mut next_bytes); + } + + // decode stream (should be length-prefixed) + let mut confirmed_mblocks = + Vec::::consensus_deserialize(&mut &confirmed_mblock_bytes[..]) + .unwrap(); + + confirmed_mblocks.reverse(); + + assert_eq!(confirmed_mblocks.len(), mblocks[0..(i + 1)].len()); + for j in 0..(i + 1) { + test_debug!("check {}", j); + assert_eq!(confirmed_mblocks[j], mblocks[j]) + } + } +} diff --git a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs new file mode 100644 index 0000000000..f4facf717c --- /dev/null +++ b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs @@ -0,0 +1,244 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::instantiate_chainstate; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::getmicroblocks_unconfirmed::StacksUnconfirmedMicroblockStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getmicroblocks_unconfirmed( + addr.into(), + StacksBlockId([0x22; 32]), + 123, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getmicroblocks_unconfirmed::RPCMicroblocksUnconfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // consumed path args and body + assert_eq!(handler.parent_block_id, Some(StacksBlockId([0x22; 32]))); + assert_eq!(handler.start_sequence, Some(123)); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.parent_block_id.is_none()); + assert!(handler.start_sequence.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut rpc_test = TestRPC::setup(function_name!()); + + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let consensus_hash = ConsensusHash([0x02; 20]); + let anchored_block_hash = BlockHeaderHash([0x03; 32]); + let index_block_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); + + let mut mblocks = make_sample_microblock_stream(&privk, &anchored_block_hash); + mblocks.truncate(15); + + for mblock in mblocks.iter() { + store_staging_microblock( + rpc_test.peer_2.chainstate(), + &consensus_hash, + &anchored_block_hash, + &mblock, + ); + } + + let mut requests = vec![]; + + // get the unconfirmed stream starting at the 5th microblock + let request = + StacksHttpRequest::new_getmicroblocks_unconfirmed(addr.into(), index_block_hash.clone(), 5); + requests.push(request); + + // get an unconfirmed stream for a non-existant block + let request = StacksHttpRequest::new_getmicroblocks_unconfirmed( + addr.into(), + StacksBlockId([0x11; 32]), + 5, + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the microblock stream + let response = responses.remove(0); + let resp = response.decode_microblocks_unconfirmed().unwrap(); + + debug!("microblocks: {:?}", &resp); + assert_eq!(resp.len(), 10); + assert_eq!(resp, mblocks[5..].to_vec()); + + // no microblock stream + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +#[test] +fn test_stream_unconfirmed_microblocks() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let privk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let block = make_empty_coinbase_block(&privk); + let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); + mblocks.truncate(15); + + let consensus_hash = ConsensusHash([2u8; 20]); + let parent_consensus_hash = ConsensusHash([1u8; 20]); + let index_block_header = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); + + // can't stream a non-existant microblock + if let Err(chainstate_error::NoSuchBlockError) = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, 0) + { + } else { + panic!("Opened nonexistant microblock"); + } + + // store microblocks to staging and stream them back + for (i, mblock) in mblocks.iter().enumerate() { + store_staging_microblock( + &mut chainstate, + &consensus_hash, + &block.block_hash(), + mblock, + ); + + // read back all the data we have so far, block-by-block + let mut staging_mblocks = vec![]; + for j in 0..(i + 1) { + let mut next_mblock_bytes = vec![]; + let mut stream = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, j as u16) + .unwrap(); + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + next_mblock_bytes.len() + ); + next_mblock_bytes.append(&mut next_bytes); + } + test_debug!("Got {} total bytes", next_mblock_bytes.len()); + + // should deserialize to a microblock + let staging_mblock = + StacksMicroblock::consensus_deserialize(&mut &next_mblock_bytes[..]).unwrap(); + staging_mblocks.push(staging_mblock); + } + + assert_eq!(staging_mblocks.len(), mblocks[0..(i + 1)].len()); + for j in 0..(i + 1) { + test_debug!("check {}", j); + assert_eq!(staging_mblocks[j], mblocks[j]) + } + + // can also read partial stream in one shot, from any seq + for k in 0..(i + 1) { + test_debug!("start at seq {}", k); + let mut staging_mblock_bytes = vec![]; + let mut stream = + StacksUnconfirmedMicroblockStream::new(&chainstate, &index_block_header, k as u16) + .unwrap(); + loop { + let mut next_bytes = stream.generate_next_chunk().unwrap(); + if next_bytes.is_empty() { + break; + } + test_debug!( + "Got {} more bytes from staging; add to {} total", + next_bytes.len(), + staging_mblock_bytes.len() + ); + staging_mblock_bytes.append(&mut next_bytes); + } + + test_debug!("Got {} total bytes", staging_mblock_bytes.len()); + + // decode stream + let staging_mblocks = decode_microblock_stream(&staging_mblock_bytes); + + assert_eq!(staging_mblocks.len(), mblocks[k..(i + 1)].len()); + for j in 0..staging_mblocks.len() { + test_debug!("check {}", j); + assert_eq!(staging_mblocks[j], mblocks[k + j]) + } + } + } +} diff --git a/stackslib/src/net/api/tests/getneighbors.rs b/stackslib/src/net/api/tests/getneighbors.rs new file mode 100644 index 0000000000..43acd650cc --- /dev/null +++ b/stackslib/src/net/api/tests/getneighbors.rs @@ -0,0 +1,105 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getneighbors(addr.into()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getneighbors::RPCNeighborsRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let request = StacksHttpRequest::new_getneighbors(addr.into()); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_rpc_neighbors().unwrap(); + + // as configured there's one boostrap neighbor + assert_eq!(resp.bootstrap.len(), 1); + + // all neighboring peers (there's one) report stackerdbs + for n in resp.sample.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.bootstrap.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.inbound.iter() { + assert!(n.stackerdbs.is_some()); + } + + for n in resp.outbound.iter() { + assert!(n.stackerdbs.is_some()); + } +} diff --git a/stackslib/src/net/api/tests/getpoxinfo.rs b/stackslib/src/net/api/tests/getpoxinfo.rs new file mode 100644 index 0000000000..88ec1bda37 --- /dev/null +++ b/stackslib/src/net/api/tests/getpoxinfo.rs @@ -0,0 +1,229 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getpoxinfo( + addr.into(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getpoxinfo::RPCPoxInfoRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let request = StacksHttpRequest::new_getpoxinfo(addr.into(), TipRequest::UseLatestAnchoredTip); + requests.push(request); + + // bad tip + let request = StacksHttpRequest::new_getpoxinfo( + addr.into(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + // this works + let resp = response.decode_rpc_get_pox_info().unwrap(); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + // this fails with 404 + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} + +/* +#[test] +#[ignore] +fn test_rpc_getpoxinfo() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, `tip_req` is set to UseLatestAnchoredTip. + // Thus, the query for pox info will be against the canonical Stacks tip, which we expect to succeed. + let pox_server_info = RefCell::new(None); + test_rpc( + function_name!(), + 40002, + 40003, + 50002, + 50003, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let mut sortdb = peer_server.sortdb.as_mut().unwrap(); + let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; + let stacks_block_id = { + let tip = chainstate.get_stacks_chain_tip(sortdb).unwrap().unwrap(); + StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_block_hash, + ) + }; + let pox_info = RPCPoxInfoData::from_db( + &mut sortdb, + chainstate, + &stacks_block_id, + &peer_client.config.burnchain, + ) + .unwrap(); + *pox_server_info.borrow_mut() = Some(pox_info); + convo_client.new_getpoxinfo(TipRequest::UseLatestAnchoredTip) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + convo_client, + convo_server| { + let req_md = http_request.preamble().clone(); + match (*http_response).clone().decode_rpc_get_pox_info() { + Ok(pox_data) => { + assert_eq!(Some(pox_data.clone()), *pox_server_info.borrow()); + true + } + Err(e) => { + error!("Invalid response: {:?}", &e); + false + } + } + }, + ); +} + +#[test] +#[ignore] +fn test_rpc_getpoxinfo_use_latest_tip() { + // Test v2/pox (aka GetPoxInfo) endpoint. + // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for pox + // info against the unconfirmed state will succeed. + let pox_server_info = RefCell::new(None); + test_rpc( + function_name!(), + 40004, + 40005, + 50004, + 50005, + true, + |ref mut peer_client, + ref mut convo_client, + ref mut peer_server, + ref mut convo_server| { + let mut sortdb = peer_server.sortdb.as_mut().unwrap(); + let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; + let stacks_block_id = chainstate + .unconfirmed_state + .as_ref() + .unwrap() + .unconfirmed_chain_tip + .clone(); + let pox_info = RPCPoxInfoData::from_db( + &mut sortdb, + chainstate, + &stacks_block_id, + &peer_client.config.burnchain, + ) + .unwrap(); + *pox_server_info.borrow_mut() = Some(pox_info); + convo_client.new_getpoxinfo(TipRequest::UseLatestUnconfirmedTip) + }, + |ref http_request, + ref http_response, + ref mut peer_client, + ref mut peer_server, + ref convo_client, + ref convo_server| { + let req_md = http_request.preamble().clone(); + match (*http_response).clone().decode_rpc_get_pox_info() { + Ok(pox_data) => { + assert_eq!(Some(pox_data.clone()), *pox_server_info.borrow()); + true + } + Err(e) => { + error!("Invalid response: {:?}", &e); + false + } + } + }, + ); +} +*/ diff --git a/stackslib/src/net/api/tests/getstackerdbchunk.rs b/stackslib/src/net/api/tests/getstackerdbchunk.rs new file mode 100644 index 0000000000..11284c5bb6 --- /dev/null +++ b/stackslib/src/net/api/tests/getstackerdbchunk.rs @@ -0,0 +1,215 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use libstackerdb::SlotMetadata; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(); + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(32), + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstackerdbchunk::RPCGetStackerDBChunkRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some(contract_identifier.clone()) + ); + assert_eq!(handler.slot_id, Some(0)); + assert_eq!(handler.slot_version, Some(32)); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.slot_id.is_none()); + assert!(handler.slot_version.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let contract_identifier = + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(); + let none_contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-ext", + ) + .unwrap(); + + // latest chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + None, + ); + requests.push(request); + + // specific chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(1), + ); + requests.push(request); + + // wrong version + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 0, + Some(2), + ); + requests.push(request); + + // no data + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 1, + None, + ); + requests.push(request); + + // no chunk + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + contract_identifier.clone(), + 4093, + None, + ); + requests.push(request); + + // no contract + let request = StacksHttpRequest::new_get_stackerdb_chunk( + addr.into(), + none_contract_identifier.clone(), + 0, + None, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(std::str::from_utf8(&resp).unwrap(), "hello world"); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(std::str::from_utf8(&resp).unwrap(), "hello world"); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk().unwrap(); + assert_eq!(resp.len(), 0); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getstackerdbmetadata.rs b/stackslib/src/net/api/tests/getstackerdbmetadata.rs new file mode 100644 index 0000000000..c2e72c3092 --- /dev/null +++ b/stackslib/src/net/api/tests/getstackerdbmetadata.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use super::test_rpc; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(); + let request = + StacksHttpRequest::new_get_stackerdb_metadata(addr.into(), contract_identifier.clone()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some(contract_identifier.clone()) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + let contract_identifier = + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(); + let none_contract_identifier = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-ext", + ) + .unwrap(); + + let request = + StacksHttpRequest::new_get_stackerdb_metadata(addr.into(), contract_identifier.clone()); + requests.push(request); + + // no contract + let request = StacksHttpRequest::new_get_stackerdb_metadata( + addr.into(), + none_contract_identifier.clone(), + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_stackerdb_metadata().unwrap(); + + assert_eq!(resp.len(), 6); + for (i, slot) in resp.iter().enumerate() { + assert_eq!(slot.slot_id, i as u32); + + if i > 0 { + assert_eq!(slot.slot_version, 0); + assert_eq!(slot.data_hash, Sha512Trunc256Sum([0u8; 32])); + assert_eq!(slot.signature, MessageSignature::empty()); + } else { + assert_eq!(slot.slot_version, 1); + assert_eq!( + slot.data_hash, + Sha512Trunc256Sum::from_data("hello world".as_bytes()) + ); + } + } + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getstxtransfercost.rs b/stackslib/src/net/api/tests/getstxtransfercost.rs new file mode 100644 index 0000000000..6c4cccc369 --- /dev/null +++ b/stackslib/src/net/api/tests/getstxtransfercost.rs @@ -0,0 +1,84 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_stx_transfer_cost(addr.into()); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getstxtransfercost::RPCGetStxTransferCostRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let request = StacksHttpRequest::new_get_stx_transfer_cost(addr.into()); + + let mut responses = test_rpc(function_name!(), vec![request]); + assert_eq!(responses.len(), 1); + + let response = responses.pop().unwrap(); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let fee_rate = response.decode_stx_transfer_fee().unwrap(); + assert_eq!(fee_rate, MINIMUM_TX_FEE_RATE_PER_BYTE); +} diff --git a/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs b/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs new file mode 100644 index 0000000000..5e249d62c6 --- /dev/null +++ b/stackslib/src/net/api/tests/gettransaction_unconfirmed.rs @@ -0,0 +1,125 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::gettransaction_unconfirmed::UnconfirmedTransactionStatus; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_gettransaction_unconfirmed(addr.into(), Txid([0x11; 32])); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.txid, Some(Txid([0x11; 32]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.txid.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let mut requests = vec![]; + + // get mempool txn + let request = StacksHttpRequest::new_gettransaction_unconfirmed( + addr.into(), + rpc_test.mempool_txids[0].clone(), + ); + requests.push(request); + + // get microblock txn + let request = StacksHttpRequest::new_gettransaction_unconfirmed( + addr.into(), + rpc_test.microblock_txids[0].clone(), + ); + requests.push(request); + + // get neither + let request = StacksHttpRequest::new_gettransaction_unconfirmed(addr.into(), Txid([0x21; 32])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_gettransaction_unconfirmed().unwrap(); + assert_eq!(resp.status, UnconfirmedTransactionStatus::Mempool); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_gettransaction_unconfirmed().unwrap(); + match resp.status { + UnconfirmedTransactionStatus::Microblock { .. } => {} + _ => { + panic!("Not in microblock"); + } + }; + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs new file mode 100644 index 0000000000..adcd681ae5 --- /dev/null +++ b/stackslib/src/net/api/tests/mod.rs @@ -0,0 +1,927 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; + +use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use libstackerdb::SlotMetadata; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, +}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::pipe::Pipe; + +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlock, StacksBlockBuilder, StacksBlockHeader, StacksMicroblock, + StacksTransaction, StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, + TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, +}; +use crate::core::MemPoolDB; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::relay::Relayer; +use crate::net::rpc::ConversationHttp; +use crate::net::test::{TestPeer, TestPeerConfig}; +use crate::net::{ + Attachment, AttachmentInstance, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, +}; + +mod callreadonly; +mod getaccount; +mod getattachment; +mod getattachmentsinv; +mod getblock; +mod getconstantval; +mod getcontractabi; +mod getcontractsrc; +mod getdatavar; +mod getheaders; +mod getinfo; +mod getistraitimplemented; +mod getmapentry; +mod getmicroblocks_confirmed; +mod getmicroblocks_indexed; +mod getmicroblocks_unconfirmed; +mod getneighbors; +mod getpoxinfo; +mod getstackerdbchunk; +mod getstackerdbmetadata; +mod getstxtransfercost; +mod gettransaction_unconfirmed; +mod postblock; +mod postfeerate; +mod postmempoolquery; +mod postmicroblock; +mod poststackerdbchunk; +mod posttransaction; + +const TEST_CONTRACT: &'static str = " + (define-trait test-trait + ( + (do-test () (response uint uint)) + ) + ) + (define-trait test-trait-2 + ( + (do-test-2 () (response uint uint)) + ) + ) + + (define-constant cst 123) + (define-data-var bar int 0) + (define-map unit-map { account: principal } { units: int }) + (define-map test-map uint uint) + (map-set test-map u1 u2) + (define-public (get-bar) (ok (var-get bar))) + (define-public (set-bar (x int) (y int)) + (begin (var-set bar (/ x y)) (ok (var-get bar)))) + (define-public (add-unit) + (begin + (map-set unit-map { account: tx-sender } { units: 1 } ) + (var-set bar 1) + (ok 1))) + (begin + (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) + + (define-read-only (ro-confirmed) u1) + + (define-public (do-test) (ok u0)) + + ;; stacker DB + (define-read-only (stackerdb-get-signer-slots) + (ok (list + { + signer: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R, + num-slots: u3 + } + { + signer: 'STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW, + num-slots: u3 + }))) + + (define-read-only (stackerdb-get-config) + (ok { + chunk-size: u4096, + write-freq: u0, + max-writes: u4096, + max-neighbors: u32, + hint-replicas: (list ) + })) +"; + +const TEST_CONTRACT_UNCONFIRMED: &'static str = " +(define-read-only (ro-test) (ok 1)) +(define-constant cst-unconfirmed 456) +(define-data-var bar-unconfirmed uint u1) +(define-map test-map-unconfirmed int int) +(map-set test-map-unconfirmed 3 4) +(define-public (do-test) (ok u1)) +"; + +/// This helper function drives I/O between a sender and receiver Http conversation. +fn convo_send_recv(sender: &mut ConversationHttp, receiver: &mut ConversationHttp) -> () { + let (mut pipe_read, mut pipe_write) = Pipe::new(); + pipe_read.set_nonblocking(true); + + loop { + sender.try_flush().unwrap(); + receiver.try_flush().unwrap(); + + pipe_write.try_flush().unwrap(); + + let all_relays_flushed = + receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0; + + let nw = sender.send(&mut pipe_write).unwrap(); + let nr = receiver.recv(&mut pipe_read).unwrap(); + + debug!( + "test_rpc: all_relays_flushed = {} ({},{}), nr = {}, nw = {}", + all_relays_flushed, + receiver.num_pending_outbound(), + sender.num_pending_outbound(), + nr, + nw + ); + if all_relays_flushed && nr == 0 && nw == 0 { + debug!("test_rpc: Breaking send_recv"); + break; + } + } +} + +/// TestRPC state +pub struct TestRPC<'a> { + pub privk1: StacksPrivateKey, + pub privk2: StacksPrivateKey, + pub peer_1: TestPeer<'a>, + pub peer_2: TestPeer<'a>, + pub peer_1_indexer: BitcoinIndexer, + pub peer_2_indexer: BitcoinIndexer, + pub convo_1: ConversationHttp, + pub convo_2: ConversationHttp, + /// hash of the chain tip + pub canonical_tip: StacksBlockId, + /// consensus hash of the chain tip + pub consensus_hash: ConsensusHash, + /// hash of last microblock + pub microblock_tip_hash: BlockHeaderHash, + /// list of mempool transactions + pub mempool_txids: Vec, + /// list of microblock transactions + pub microblock_txids: Vec, + /// next block to post, and its consensus hash + pub next_block: (ConsensusHash, StacksBlock), + /// next microblock to post (may already be posted) + pub next_microblock: StacksMicroblock, + /// transactions that can be posted to the mempool + pub sendable_txs: Vec, +} + +impl<'a> TestRPC<'a> { + pub fn setup(test_name: &str) -> TestRPC<'a> { + Self::setup_ex(test_name, true) + } + + pub fn setup_ex(test_name: &str, process_microblock: bool) -> TestRPC<'a> { + // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R + let privk1 = StacksPrivateKey::from_hex( + "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", + ) + .unwrap(); + + // STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW + let privk2 = StacksPrivateKey::from_hex( + "94c319327cc5cd04da7147d32d836eb2e4c44f4db39aa5ede7314a761183d0c701", + ) + .unwrap(); + let microblock_privkey = StacksPrivateKey::new(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + + let addr1 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk1)], + ) + .unwrap(); + let addr2 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk2)], + ) + .unwrap(); + + let mut peer_1_config = TestPeerConfig::new(&format!("{}-peer1", test_name), 0, 0); + let mut peer_2_config = TestPeerConfig::new(&format!("{}-peer2", test_name), 0, 0); + + peer_1_config.connection_opts.read_only_call_limit = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 1500, + read_count: 3, + runtime: 1500000, + }; + peer_1_config.connection_opts.maximum_call_argument_size = 4096; + + peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 1500, + read_count: 3, + runtime: 1500000, + }; + peer_2_config.connection_opts.maximum_call_argument_size = 4096; + + // stacker DBs get initialized thru reconfiguration when the above block gets processed + peer_1_config.add_stacker_db( + QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), + StackerDBConfig::noop(), + ); + peer_2_config.add_stacker_db( + QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), + StackerDBConfig::noop(), + ); + + let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); + let peer_2_indexer = BitcoinIndexer::new_unit_test(&peer_2_config.burnchain.working_dir); + + peer_1_config.initial_balances = vec![ + (addr1.to_account_principal(), 1000000000), + (addr2.to_account_principal(), 1000000000), + ]; + + peer_2_config.initial_balances = vec![ + (addr1.to_account_principal(), 1000000000), + (addr2.to_account_principal(), 1000000000), + ]; + + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + // mine one block with a contract in it + // first the coinbase + // make a coinbase for this miner + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + + // next the contract + let contract = TEST_CONTRACT.clone(); + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_smart_contract( + &format!("hello-world"), + &contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(1); + tx_contract.set_tx_fee(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_contract_signed = tx_signer.get_tx().unwrap(); + + // update account and state in a microblock that will be unconfirmed + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_contract_call(addr1.clone(), "hello-world", "add-unit", vec![]) + .unwrap(), + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(2); + tx_cc.set_tx_fee(123); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_cc_signed = tx_signer.get_tx().unwrap(); + let tx_cc_len = { + let mut bytes = vec![]; + tx_cc_signed.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + // make an unconfirmed contract + let unconfirmed_contract = TEST_CONTRACT_UNCONFIRMED.clone(); + let mut tx_unconfirmed_contract = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_smart_contract( + &format!("hello-world-unconfirmed"), + &unconfirmed_contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_unconfirmed_contract.chain_id = 0x80000000; + tx_unconfirmed_contract.auth.set_origin_nonce(3); + tx_unconfirmed_contract.set_tx_fee(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx_unconfirmed_contract); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_unconfirmed_contract_signed = tx_signer.get_tx().unwrap(); + let tx_unconfirmed_contract_len = { + let mut bytes = vec![]; + tx_unconfirmed_contract_signed + .consensus_serialize(&mut bytes) + .unwrap(); + bytes.len() as u64 + }; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_size = 0; + + // make a block + // Put the coinbase and smart-contract in the anchored block. + // Put the contract-call in the microblock + let (burn_ops, stacks_block, microblocks) = peer_1.make_tenure( + |ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, _| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &parent_tip, + vrf_proof, + tip.total_burn, + microblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, anchored_block_size, anchored_block_cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], + ) + .unwrap(); + + anchor_size = anchored_block_size; + anchor_cost = anchored_block_cost; + + (anchored_block, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, &vec![]); + peer_2.process_stacks_epoch_at_tip(&stacks_block, &vec![]); + + // build 1-block microblock stream with the contract-call and the unconfirmed contract + let microblock = { + let sortdb = peer_1.sortdb.take().unwrap(); + Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); + let mblock = { + let sort_iconn = sortdb.index_conn(); + let mut microblock_builder = StacksMicroblockBuilder::new( + stacks_block.block_hash(), + consensus_hash.clone(), + peer_1.chainstate(), + &sort_iconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + let microblock = microblock_builder + .mine_next_microblock_from_txs( + vec![ + (tx_cc_signed, tx_cc_len), + (tx_unconfirmed_contract_signed, tx_unconfirmed_contract_len), + ], + µblock_privkey, + ) + .unwrap(); + microblock + }; + peer_1.sortdb = Some(sortdb); + mblock + }; + + let microblock_txids = microblock.txs.iter().map(|tx| tx.txid()).collect(); + let canonical_tip = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + + if process_microblock { + // store microblock stream + peer_1 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + peer_2 + .chainstate() + .preprocess_streamed_microblock( + &consensus_hash, + &stacks_block.block_hash(), + µblock, + ) + .unwrap(); + + // process microblock stream to generate unconfirmed state + let sortdb1 = peer_1.sortdb.take().unwrap(); + let sortdb2 = peer_2.sortdb.take().unwrap(); + peer_1 + .chainstate() + .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_2 + .chainstate() + .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .unwrap(); + peer_1.sortdb = Some(sortdb1); + peer_2.sortdb = Some(sortdb2); + } + + let mut mempool_txids = vec![]; + + // stuff some transactions into peer_2's mempool + // (relates to mempool query tests) + // Also, create some transactions that could be sent + let mut mempool = peer_2.mempool.take().unwrap(); + let mut mempool_tx = mempool.tx_begin().unwrap(); + let mut sendable_txs = vec![]; + for i in 0..20 { + let pk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&privk2).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(i); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk2).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + if i < 10 { + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &consensus_hash, + &stacks_block.block_hash(), + txid.clone(), + tx_bytes, + tx_fee, + stacks_block.header.total_work.work, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + mempool_txids.push(txid); + } else { + sendable_txs.push(tx); + } + } + mempool_tx.commit().unwrap(); + peer_2.mempool.replace(mempool); + + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let _ = peer_1 + .network + .refresh_burnchain_view( + &peer_1_indexer, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + false, + ) + .unwrap(); + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + + let peer_2_sortdb = peer_2.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let _ = peer_2 + .network + .refresh_burnchain_view( + &peer_2_indexer, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + false, + ) + .unwrap(); + peer_2.sortdb = Some(peer_2_sortdb); + peer_2.stacks_node = Some(peer_2_stacks_node); + + // insert some fake Atlas attachment data + let attachment = Attachment { + content: vec![0, 1, 2, 3, 4], + }; + + let attachment_instance = AttachmentInstance { + content_hash: attachment.hash(), + attachment_index: 123, + stacks_block_height: 1, + index_block_hash: canonical_tip.clone(), + metadata: "000102030405".to_string(), + contract_id: QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") + .unwrap(), + tx_id: Txid([0x22; 32]), + canonical_stacks_tip_height: Some(1), + }; + + peer_1 + .network + .get_atlasdb_mut() + .insert_initial_attachment_instance(&attachment_instance) + .unwrap(); + peer_2 + .network + .get_atlasdb_mut() + .insert_initial_attachment_instance(&attachment_instance) + .unwrap(); + + peer_1 + .network + .get_atlasdb_mut() + .insert_instantiated_attachment(&attachment) + .unwrap(); + peer_2 + .network + .get_atlasdb_mut() + .insert_instantiated_attachment(&attachment) + .unwrap(); + + // next tip, coinbase + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(4); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + + // make another block for the test framework to POST + let (next_burn_ops, next_stacks_block, _) = peer_1.make_tenure( + |ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, _| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &parent_tip, + vrf_proof, + tip.total_burn, + microblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, anchored_block_size, anchored_block_cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![tx_coinbase_signed.clone()], + ) + .unwrap(); + + anchor_size = anchored_block_size; + anchor_cost = anchored_block_cost; + + (anchored_block, vec![]) + }, + ); + + let (_, _, next_consensus_hash) = peer_1.next_burnchain_block(next_burn_ops.clone()); + peer_2.next_burnchain_block(next_burn_ops.clone()); + + let view_1 = peer_1.get_burnchain_view().unwrap(); + let view_2 = peer_2.get_burnchain_view().unwrap(); + + // extract ports allocated to us + let peer_1_http = peer_1.config.http_port; + let peer_2_http = peer_2.config.http_port; + + debug!("test_rpc: Peer 1 HTTP port: {}", &peer_1_http); + debug!("test_rpc: Peer 2 HTTP port: {}", &peer_2_http); + + // store a chunk in the peers' stackerdb + let data = "hello world".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); + slot_metadata.sign(&privk1).unwrap(); + + for peer_server in [&mut peer_1, &mut peer_2] { + let contract_id = QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", + ) + .unwrap(); + let tx = peer_server + .network + .stackerdbs + .tx_begin(StackerDBConfig::noop()) + .unwrap(); + tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) + .unwrap(); + tx.commit().unwrap(); + } + + let convo_1 = ConversationHttp::new( + format!("127.0.0.1:{}", peer_1_http) + .parse::() + .unwrap(), + Some(UrlString::try_from(format!("http://peer1.com")).unwrap()), + peer_1.to_peer_host(), + &peer_1.config.connection_opts, + 0, + 32, + ); + + let convo_2 = ConversationHttp::new( + format!("127.0.0.1:{}", peer_2_http) + .parse::() + .unwrap(), + Some(UrlString::try_from(format!("http://peer2.com")).unwrap()), + peer_2.to_peer_host(), + &peer_2.config.connection_opts, + 1, + 32, + ); + + TestRPC { + privk1, + privk2, + peer_1, + peer_2, + peer_1_indexer, + peer_2_indexer, + convo_1, + convo_2, + canonical_tip, + consensus_hash, + microblock_tip_hash: microblock.block_hash(), + mempool_txids, + microblock_txids, + next_block: (next_consensus_hash, next_stacks_block), + next_microblock: microblock, + sendable_txs, + } + } + + /// Run zero or more HTTP requests on this setup RPC test harness. + /// Return the list of responses. + pub fn run(self, requests: Vec) -> Vec { + let mut peer_1 = self.peer_1; + let mut peer_2 = self.peer_2; + let peer_1_indexer = self.peer_1_indexer; + let peer_2_indexer = self.peer_2_indexer; + let mut convo_1 = self.convo_1; + let mut convo_2 = self.convo_2; + + let mut responses = vec![]; + for request in requests.into_iter() { + convo_1.send_request(request.clone()).unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let peer_2_mempool = peer_2.mempool.take().unwrap(); + + debug!("test_rpc: Peer 1 sends to Peer 2"); + convo_send_recv(&mut convo_1, &mut convo_2); + + // hack around the borrow-checker + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + ); + convo_1.chat(&mut node_state).unwrap(); + } + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + peer_2.mempool = Some(peer_2_mempool); + + debug!("test_rpc: Peer 2 sends to Peer 1"); + + // hack around the borrow-checker + let peer_2_sortdb = peer_2.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + + let _ = peer_2 + .network + .refresh_burnchain_view( + &peer_2_indexer, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + false, + ) + .unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_2_stacks_node.chainstate, &peer_2_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_2.network, + &peer_2_sortdb, + &mut peer_2_stacks_node.chainstate, + &mut peer_2_mempool, + &rpc_args, + ); + convo_2.chat(&mut node_state).unwrap(); + } + + peer_2.sortdb = Some(peer_2_sortdb); + peer_2.stacks_node = Some(peer_2_stacks_node); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + + convo_send_recv(&mut convo_2, &mut convo_1); + + debug!("test_rpc: Peer 1 flush"); + + // hack around the borrow-checker + convo_send_recv(&mut convo_1, &mut convo_2); + + peer_2.mempool = Some(peer_2_mempool); + + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + + let _ = peer_1 + .network + .refresh_burnchain_view( + &peer_1_indexer, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + false, + ) + .unwrap(); + + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) + .unwrap(); + + { + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + ); + convo_1.chat(&mut node_state).unwrap(); + } + + convo_1.try_flush().unwrap(); + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + + // should have gotten a reply + let resp_opt = convo_1.try_get_response(); + assert!(resp_opt.is_some()); + + let resp = resp_opt.unwrap(); + responses.push(resp); + } + + return responses; + } +} + +/// General testing function to test RPC calls. +/// This function sets up two TestPeers and their respective chainstates, and loads them up with +/// some sample blocks and microblocks. The blocks will contain a smart contract transaction +/// called `hello-world` with the code `TEST_CONTRACT` above. In addition, a microblock will be +/// created off of the block with a contract-call to `add-unit`. The second TestPeer will also +/// have a populated mempool, while the first will not. +/// +/// This function causes the first peer to send `request` to the second peer from the first peer, +/// and will return the `StacksHttpResponse` generated by the second peer. +pub fn test_rpc(test_name: &str, requests: Vec) -> Vec { + let test = TestRPC::setup(test_name); + test.run(requests) +} diff --git a/stackslib/src/net/api/tests/postblock.rs b/stackslib/src/net/api/tests/postblock.rs new file mode 100644 index 0000000000..c3d1f29359 --- /dev/null +++ b/stackslib/src/net/api/tests/postblock.rs @@ -0,0 +1,159 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let block = make_codec_test_block(3); + let request = + StacksHttpRequest::new_post_block(addr.into(), ConsensusHash([0x11; 20]), block.clone()); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock::RPCPostBlockRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.consensus_hash, Some(ConsensusHash([0x11; 20]))); + assert_eq!(handler.block, Some(block.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.consensus_hash.is_none()); + assert!(handler.block.is_none()); + + // try to deal with an invalid block + let mut bad_block = block.clone(); + bad_block.txs.clear(); + + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let request = StacksHttpRequest::new_post_block( + addr.into(), + ConsensusHash([0x11; 20]), + bad_block.clone(), + ); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock::RPCPostBlockRequestHandler::new(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad block"); + } + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let stacks_block_id = StacksBlockHeader::make_index_block_hash( + &rpc_test.next_block.0, + &rpc_test.next_block.1.block_hash(), + ); + let mut requests = vec![]; + + // post the block + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + // idempotent + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + // fails if the consensus hash is not recognized + let request = StacksHttpRequest::new_post_block( + addr.into(), + ConsensusHash([0x11; 20]), + rpc_test.next_block.1.clone(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, stacks_block_id); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, stacks_block_id); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/postfeerate.rs b/stackslib/src/net/api/tests/postfeerate.rs new file mode 100644 index 0000000000..b34109b5e5 --- /dev/null +++ b/stackslib/src/net/api/tests/postfeerate.rs @@ -0,0 +1,112 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::to_hex; + +use super::test_rpc; +use crate::chainstate::stacks::TransactionPayload; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let sender_addr = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(); + let tx_payload = + TransactionPayload::new_contract_call(sender_addr, "hello-world", "add-unit", vec![]) + .unwrap(); + + let request = StacksHttpRequest::new_post_fee_rate( + addr.into(), + postfeerate::FeeRateEstimateRequestBody { + estimated_len: Some(123), + transaction_payload: to_hex(&tx_payload.serialize_to_vec()), + }, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postfeerate::RPCPostFeeRateRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.estimated_len, Some(123)); + assert_eq!(handler.transaction_payload, Some(tx_payload.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.estimated_len.is_none()); + assert!(handler.transaction_payload.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let sender_addr = + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(); + let tx_payload = + TransactionPayload::new_contract_call(sender_addr, "hello-world", "add-unit", vec![]) + .unwrap(); + + let mut requests = vec![]; + let request = StacksHttpRequest::new_post_fee_rate( + addr.into(), + postfeerate::FeeRateEstimateRequestBody { + estimated_len: Some(123), + transaction_payload: to_hex(&tx_payload.serialize_to_vec()), + }, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs new file mode 100644 index 0000000000..1f528c57c5 --- /dev/null +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -0,0 +1,439 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashSet; +use std::io; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::{read_next, Error as CodecError, StacksMessageCodec}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::{to_hex, Hash160}; + +use super::TestRPC; +use crate::burnchains::Txid; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::test::{chainstate_path, instantiate_chainstate}; +use crate::chainstate::stacks::db::{ExtendedStacksHeader, StacksChainState}; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksTransaction, TokenTransferMemo, TransactionAnchorMode, + TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, +}; +use crate::core::mempool::{decode_tx_stream, MemPoolSyncData, TxTag, MAX_BLOOM_COUNTER_TXS}; +use crate::core::{MemPoolDB, BLOCK_LIMIT_MAINNET_21}; +use crate::net::api::postmempoolquery::StacksMemPoolStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{Error as NetError, ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_mempool_query( + addr.into(), + MemPoolSyncData::TxTags([0x11; 32], vec![TxTag([0x22; 8])]), + Some(Txid([0x33; 32])), + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmempoolquery::RPCMempoolQueryRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.page_id, Some(Txid([0x33; 32]))); + assert_eq!( + handler.mempool_query, + Some(MemPoolSyncData::TxTags([0x11; 32], vec![TxTag([0x22; 8])])) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.page_id.is_none()); + assert!(handler.mempool_query.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_rpc = TestRPC::setup(function_name!()); + let mempool_txids = test_rpc.mempool_txids.clone(); + let mempool_txids: HashSet<_> = mempool_txids.iter().map(|txid| txid.clone()).collect(); + + let sync_data = test_rpc + .peer_1 + .mempool + .as_ref() + .unwrap() + .make_mempool_sync_data() + .unwrap(); + + let mut requests = vec![]; + let request = StacksHttpRequest::new_mempool_query( + addr.into(), + MemPoolSyncData::TxTags([0x00; 32], vec![]), + Some(Txid([0x00; 32])), + ); + requests.push(request); + + let mut responses = test_rpc.run(requests); + + let response = responses.remove(0); + + let (txs, page) = response.decode_mempool_txs_page().unwrap(); + let received_txids: HashSet<_> = txs.iter().map(|tx| tx.txid()).collect(); + + assert_eq!(received_txids, mempool_txids); + assert!(page.is_none()); +} + +#[test] +fn test_stream_mempool_txs() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + let block_height = 10; + let mut total_len = 0; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + total_len += tx_bytes.len(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &ConsensusHash([0x1 + (block_height as u8); 20]), + &BlockHeaderHash([0x2 + (block_height as u8); 32]), + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + txs.push(tx); + } + mempool_tx.commit().unwrap(); + + let mut buf = vec![]; + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], vec![]), + MAX_BLOOM_COUNTER_TXS.into(), + block_height, + Some(Txid([0u8; 32])), + ); + + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.is_empty() { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + eprintln!("Read {} bytes of tx data", buf.len()); + + // buf decodes to the list of txs we have + let mut decoded_txs = vec![]; + let mut ptr = &buf[..]; + loop { + let tx: StacksTransaction = match read_next::(&mut ptr) { + Ok(tx) => tx, + Err(e) => match e { + CodecError::ReadError(ref ioe) => match ioe.kind() { + io::ErrorKind::UnexpectedEof => { + eprintln!("out of transactions"); + break; + } + _ => { + panic!("IO error: {:?}", &e); + } + }, + _ => { + panic!("other error: {:?}", &e); + } + }, + }; + decoded_txs.push(tx); + } + + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with an empty tx tags + let mut page_id = Txid([0u8; 32]); + let mut decoded_txs = vec![]; + loop { + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], vec![]), + 1, + block_height, + Some(page_id), + ); + + let mut buf = vec![]; + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.is_empty() { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + // buf decodes to the list of txs we have, plus page ids + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (mut next_txs, next_page) = decode_tx_stream(&mut ptr).unwrap(); + + decoded_txs.append(&mut next_txs); + + // for fun, use a page ID that is actually a well-formed prefix of a transaction + if let Some(ref tx) = decoded_txs.last() { + let mut evil_buf = tx.serialize_to_vec(); + let mut evil_page_id = [0u8; 32]; + evil_page_id.copy_from_slice(&evil_buf[0..32]); + evil_buf.extend_from_slice(&evil_page_id); + + test_debug!("Decode evil buf {}", &to_hex(&evil_buf)); + + let (evil_next_txs, evil_next_page) = decode_tx_stream(&mut &evil_buf[..]).unwrap(); + + // should still work + assert_eq!(evil_next_txs.len(), 1); + assert_eq!(evil_next_txs[0].txid(), tx.txid()); + assert_eq!(evil_next_page.unwrap().0[0..32], evil_buf[0..32]); + } + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } + + // make sure we got them all + let mut tx_set = HashSet::new(); + for tx in txs.iter() { + tx_set.insert(tx.txid()); + } + + // the order won't be preserved + assert_eq!(tx_set.len(), decoded_txs.len()); + for tx in decoded_txs { + assert!(tx_set.contains(&tx.txid())); + } + + // verify that we can stream through pagination, with a full bloom filter + let mut page_id = Txid([0u8; 32]); + let all_txs_tags: Vec<_> = txs + .iter() + .map(|tx| TxTag::from(&[0u8; 32], &tx.txid())) + .collect(); + loop { + let mut tx_stream_data = StacksMemPoolStream::new( + mempool.reopen(false).unwrap(), + MemPoolSyncData::TxTags([0u8; 32], all_txs_tags.clone()), + 1, + block_height, + Some(page_id), + ); + + let mut buf = vec![]; + loop { + let chunk = tx_stream_data.generate_next_chunk().unwrap(); + if chunk.is_empty() { + break; + } + buf.extend_from_slice(&chunk[..]); + } + + // buf decodes to an empty list of txs, plus page ID + let mut ptr = &buf[..]; + test_debug!("Decode {}", to_hex(ptr)); + let (next_txs, next_page) = decode_tx_stream(&mut ptr).unwrap(); + + assert_eq!(next_txs.len(), 0); + + if let Some(next_page) = next_page { + page_id = next_page; + } else { + break; + } + } +} + +#[test] +fn test_decode_tx_stream() { + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + for _i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + txs.push(tx); + } + + // valid empty tx stream + let empty_stream = [0x11u8; 32]; + let (next_txs, next_page) = decode_tx_stream(&mut empty_stream.as_ref()).unwrap(); + assert_eq!(next_txs.len(), 0); + assert_eq!(next_page, Some(Txid([0x11; 32]))); + + // valid tx stream with a page id at the end + let mut tx_stream: Vec = vec![]; + for tx in txs.iter() { + tx.consensus_serialize(&mut tx_stream).unwrap(); + } + tx_stream.extend_from_slice(&[0x22; 32]); + + let (next_txs, next_page) = decode_tx_stream(&mut &tx_stream[..]).unwrap(); + assert_eq!(next_txs, txs); + assert_eq!(next_page, Some(Txid([0x22; 32]))); + + // valid tx stream with _no_ page id at the end + let mut partial_stream: Vec = vec![]; + txs[0].consensus_serialize(&mut partial_stream).unwrap(); + let (next_txs, next_page) = decode_tx_stream(&mut &partial_stream[..]).unwrap(); + assert_eq!(next_txs.len(), 1); + assert_eq!(next_txs[0], txs[0]); + assert!(next_page.is_none()); + + // garbage tx stream + let garbage_stream = [0xff; 256]; + let err = decode_tx_stream(&mut garbage_stream.as_ref()); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream that is too short + let short_stream = [0x33u8; 33]; + let err = decode_tx_stream(&mut short_stream.as_ref()); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } + + // tx stream has a tx, a page ID, and then another tx + let mut interrupted_stream = vec![]; + txs[0].consensus_serialize(&mut interrupted_stream).unwrap(); + interrupted_stream.extend_from_slice(&[0x00u8; 32]); + txs[1].consensus_serialize(&mut interrupted_stream).unwrap(); + + let err = decode_tx_stream(&mut &interrupted_stream[..]); + match err { + Err(NetError::ExpectedEndOfStream) => {} + x => { + error!("did not fail: {:?}", &x); + panic!(); + } + } +} diff --git a/stackslib/src/net/api/tests/postmicroblock.rs b/stackslib/src/net/api/tests/postmicroblock.rs new file mode 100644 index 0000000000..9688b4a3fc --- /dev/null +++ b/stackslib/src/net/api/tests/postmicroblock.rs @@ -0,0 +1,147 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::test::make_codec_test_microblock; +use crate::chainstate::stacks::StacksMicroblock; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let mblock = make_codec_test_microblock(3); + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmicroblock::RPCPostMicroblockRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.microblock, Some(mblock.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.microblock.is_none()); + + // try to decode a bad microblock + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut bad_mblock = mblock.clone(); + bad_mblock.txs.clear(); + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + bad_mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postmicroblock::RPCPostMicroblockRequestHandler::new(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad microblock"); + } + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_rpc = TestRPC::setup_ex(function_name!(), false); + let mblock = test_rpc.next_microblock.clone(); + + let mut requests = vec![]; + + // fails due to bad tip + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + requests.push(request); + + // succeeds + let request = StacksHttpRequest::new_post_microblock( + addr.into(), + mblock.clone(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc.run(requests); + + // fails due to bad tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // succeeds + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let mblock_hash = response.decode_stacks_microblock_response().unwrap(); + assert_eq!(mblock_hash, mblock.block_hash()); +} diff --git a/stackslib/src/net/api/tests/poststackerdbchunk.rs b/stackslib/src/net/api/tests/poststackerdbchunk.rs new file mode 100644 index 0000000000..4ab5cac6eb --- /dev/null +++ b/stackslib/src/net/api/tests/poststackerdbchunk.rs @@ -0,0 +1,275 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use libstackerdb::{SlotMetadata, StackerDBChunkData}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use super::TestRPC; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed", + ) + .unwrap(), + 0, + 1, + MessageSignature::empty(), + vec![0, 1, 2, 3, 4], + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed" + ) + .unwrap() + ) + ); + assert_eq!( + handler.chunk, + Some(StackerDBChunkData { + slot_id: 0, + slot_version: 1, + data: vec![0, 1, 2, 3, 4], + sig: MessageSignature::empty() + }) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.contract_identifier.is_none()); + assert!(handler.chunk.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let mut requests = vec![]; + + // try to write a new chunk + let data = "try make response".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 1, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to overwrite a new chunk + let data = "try make response 2".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 2, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to overwrite a new chunk, with the same version (should fail) + let data = "try make response 3".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 2, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write with the wrong key (should fail) + let data = "try make response 4".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 3, data_hash); + slot_metadata.sign(&rpc_test.privk2).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write to a bad slot (should fail) + let data = "try make response 5".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(4093, 3, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world") + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + // try to write to a bad contract (should fail) + let data = "try make response 6".as_bytes(); + let data_hash = Sha512Trunc256Sum::from_data(data); + let mut slot_metadata = SlotMetadata::new_unsigned(1, 3, data_hash); + slot_metadata.sign(&rpc_test.privk1).unwrap(); + + let request = StacksHttpRequest::new_post_stackerdb_chunk( + addr.into(), + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist", + ) + .unwrap(), + slot_metadata.slot_id, + slot_metadata.slot_version, + slot_metadata.signature.clone(), + data.to_vec(), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 1); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.metadata.as_ref().unwrap().slot_id, 1); + assert_eq!(resp.metadata.as_ref().unwrap().slot_version, 2); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stackerdb_chunk_ack().unwrap(); + assert_eq!(resp.accepted, false); + assert!(resp.metadata.is_none()); + assert!(resp.reason.is_some()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/posttransaction.rs b/stackslib/src/net/api/tests/posttransaction.rs new file mode 100644 index 0000000000..fd1c1e7e37 --- /dev/null +++ b/stackslib/src/net/api/tests/posttransaction.rs @@ -0,0 +1,241 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::stacks::{ + StacksTransaction, StacksTransactionSigner, TransactionAuth, TransactionPayload, + TransactionVersion, +}; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::{Attachment, ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R + let privk1 = StacksPrivateKey::from_hex( + "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", + ) + .unwrap(); + + let addr1 = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk1)], + ) + .unwrap(); + + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&privk1).unwrap(), + TransactionPayload::new_contract_call(addr1.clone(), "hello-world", "add-unit", vec![]) + .unwrap(), + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(2); + tx_cc.set_tx_fee(123); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + tx_signer.sign_origin(&privk1).unwrap(); + let tx_cc_signed = tx_signer.get_tx().unwrap(); + + // Test without an attachment + let request = StacksHttpRequest::new_post_transaction(addr.into(), tx_cc_signed.clone()); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert!(handler.attachment.is_none()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); + + // Test with a null attachment + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + tx_cc_signed.clone(), + None, + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert_eq!(handler.attachment, None); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // Test with an attachment + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + tx_cc_signed.clone(), + Some(vec![0, 1, 2, 3, 4]), + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = posttransaction::RPCPostTransactionRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.tx, Some(tx_cc_signed.clone())); + assert_eq!( + handler.attachment, + Some(Attachment::new(vec![0, 1, 2, 3, 4])) + ); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.tx.is_none()); + assert!(handler.attachment.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let rpc_test = TestRPC::setup(function_name!()); + let sendable_txs = rpc_test.sendable_txs.clone(); + + let mut requests = vec![]; + + // send a tx (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[0].clone(), + None, + ); + requests.push(request); + + // send a tx with an attachment (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[1].clone(), + Some(vec![0, 1, 2, 3, 4]), + ); + requests.push(request); + + // send the same tx (should succeed) + let request = StacksHttpRequest::new_post_transaction_with_attachment( + addr.into(), + sendable_txs[0].clone(), + None, + ); + requests.push(request); + + // send a bad tx (should fail) + let mut bad_tx = sendable_txs[2].clone(); + bad_tx.version = TransactionVersion::Mainnet; + let request = + StacksHttpRequest::new_post_transaction_with_attachment(addr.into(), bad_tx.clone(), None); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[0].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[1].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let txid = response.decode_txid().unwrap(); + assert_eq!(txid, sendable_txs[0].txid()); + + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index edcf66d29f..f38c6c54d4 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -18,9 +18,10 @@ use std::fs::File; use std::io::{BufRead, BufReader}; use regex::{Captures, Regex}; +use stacks_common::types::net::PeerAddress; use stacks_common::util::log; -use crate::net::{Error as net_error, PeerAddress}; +use crate::net::Error as net_error; // IPv4 prefix to ASN/org map entry #[derive(Debug, Clone, PartialEq)] diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index fab672c8eb..10f48a6114 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -644,10 +644,7 @@ impl AtlasDB { Ok(rows) } - pub fn find_attachment( - &mut self, - content_hash: &Hash160, - ) -> Result, db_error> { + pub fn find_attachment(&self, content_hash: &Hash160) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index e92638ba24..489050bcbd 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -29,17 +29,14 @@ use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use super::{AtlasDB, Attachment, AttachmentInstance, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::stacks::db::StacksChainState; -use crate::core::mempool::MemPoolDB; -use crate::net::atlas::MAX_RETRY_DELAY; +use crate::net::atlas::{GetAttachmentResponse, GetAttachmentsInvResponse, MAX_RETRY_DELAY}; use crate::net::connection::ConnectionOptions; use crate::net::dns::*; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::p2p::PeerNetwork; use crate::net::server::HttpPeer; -use crate::net::{ - Error as net_error, GetAttachmentResponse, GetAttachmentsInvResponse, HttpRequestMetadata, - HttpRequestType, HttpResponseType, NeighborKey, PeerHost, Requestable, -}; +use crate::net::{Error as net_error, NeighborKey, PeerHost, Requestable}; use crate::util_lib::db::Error as DBError; use crate::util_lib::strings; use crate::util_lib::strings::UrlString; @@ -99,8 +96,6 @@ impl AttachmentsDownloader { pub fn run( &mut self, dns_client: &mut DNSClient, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, network: &mut PeerNetwork, ) -> Result<(Vec<(AttachmentInstance, Attachment)>, Vec), net_error> { let mut resolved_attachments = vec![]; @@ -154,13 +149,8 @@ impl AttachmentsDownloader { } }; - let mut progress = AttachmentsBatchStateMachine::try_proceed( - ongoing_fsm, - dns_client, - network, - mempool, - chainstate, - ); + let mut progress = + AttachmentsBatchStateMachine::try_proceed(ongoing_fsm, dns_client, network); match progress { AttachmentsBatchStateMachine::Done(ref mut context) => { @@ -516,7 +506,15 @@ impl AttachmentsBatchStateContext { .peers .get_mut(request.get_url()) .expect("Atlas: unable to retrieve reliability report for peer"); - if let Some(HttpResponseType::GetAttachmentsInv(_, response)) = response { + + let response = if let Some(r) = response { + r + } else { + report.bump_failed_requests(); + continue; + }; + + if let Ok(response) = response.decode_atlas_attachments_inv_response() { let peer_url = request.get_url().clone(); match self.inventories.entry(request.key()) { Entry::Occupied(responses) => { @@ -552,7 +550,15 @@ impl AttachmentsBatchStateContext { .peers .get_mut(request.get_url()) .expect("Atlas: unable to retrieve reliability report for peer"); - if let Some(HttpResponseType::GetAttachment(_, response)) = response { + + let response = if let Some(r) = response { + r + } else { + report.bump_failed_requests(); + continue; + }; + + if let Ok(response) = response.decode_atlas_get_attachment() { self.attachments.insert(response.attachment); report.bump_successful_requests(); } else { @@ -601,8 +607,6 @@ impl AttachmentsBatchStateMachine { fsm: AttachmentsBatchStateMachine, dns_client: &mut DNSClient, network: &mut PeerNetwork, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> AttachmentsBatchStateMachine { match fsm { AttachmentsBatchStateMachine::Initialized(context) => { @@ -637,8 +641,6 @@ impl AttachmentsBatchStateMachine { attachments_invs_requests, &context.dns_lookups, network, - mempool, - chainstate, &context.connection_options, ) { BatchedRequestsState::Done(ref mut results) => { @@ -662,8 +664,6 @@ impl AttachmentsBatchStateMachine { attachments_requests, &context.dns_lookups, network, - mempool, - chainstate, &context.connection_options, ) { BatchedRequestsState::Done(ref mut results) => { @@ -839,8 +839,6 @@ impl BatchedRequestsState fsm: BatchedRequestsState, dns_lookups: &HashMap>>, network: &mut PeerNetwork, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, connection_options: &ConnectionOptions, ) -> BatchedRequestsState { let mut fsm = fsm; @@ -862,13 +860,8 @@ impl BatchedRequestsState if let Some(requestable) = queue.pop() { let mut requestables = VecDeque::new(); requestables.push_back(requestable); - let res = PeerNetwork::begin_request( - network, - dns_lookups, - &mut requestables, - mempool, - chainstate, - ); + let res = + PeerNetwork::begin_request(network, dns_lookups, &mut requestables); if let Some((request, event_id)) = res { results.remaining.insert(event_id, request); } @@ -923,14 +916,13 @@ impl BatchedRequestsState } Some(response) => { let peer_url = request.get_url().clone(); - - if let HttpResponseType::NotFound(_, _) = response { + if response.preamble().status_code == 404 { state.faulty_peers.insert(event_id, peer_url); continue; } debug!( - "Atlas: Request {} (event_id: {}) received response {:?}", - request, event_id, response + "Atlas: Request {} (event_id: {}) received HTTP 200", + request, event_id ); state.succeeded.insert(request, Some(response)); } @@ -984,7 +976,7 @@ struct BatchedRequestsInitializedState { #[derive(Debug, Default)] pub struct BatchedRequestsResult { pub remaining: HashMap, - pub succeeded: HashMap>, + pub succeeded: HashMap>, pub errors: HashMap, pub faulty_peers: HashMap, } @@ -1056,16 +1048,28 @@ impl Requestable for AttachmentsInventoryRequest { &self.url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { - let mut pages_indexes = HashSet::new(); + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { + let mut page_indexes = HashSet::new(); for page in self.pages.iter() { - pages_indexes.insert(*page); + page_indexes.insert(*page); } - HttpRequestType::GetAttachmentsInv( - HttpRequestMetadata::from_host(peer_host, self.canonical_stacks_tip_height), - self.index_block_hash, - pages_indexes, + let mut page_list: Vec = page_indexes + .into_iter() + .map(|i| format!("{}", &i)) + .collect(); + page_list.sort(); + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + "/v2/attachments/inv".into(), + HttpRequestContents::new() + .query_arg( + "index_block_hash".into(), + format!("{}", &self.index_block_hash), + ) + .query_arg("pages_indexes".into(), page_list[..].join(",")), ) + .expect("FATAL: failed to create an HTTP request for infallible data") } } @@ -1121,11 +1125,14 @@ impl Requestable for AttachmentRequest { url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { - HttpRequestType::GetAttachment( - HttpRequestMetadata::from_host(peer_host, self.canonical_stacks_tip_height), - self.content_hash, + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".to_string(), + format!("/v2/attachments/{}", &self.content_hash), + HttpRequestContents::new(), ) + .expect("FATAL: failed to create an HTTP request for infallible data") } } diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index fbb3848624..441802bff2 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -20,9 +20,11 @@ use std::hash::{Hash, Hasher}; use clarity::vm::types::{QualifiedContractIdentifier, SequenceData, TupleData, Value}; use regex::Regex; +use serde::de::{Deserialize, Error as de_Error}; +use serde::ser::Serialize; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; -use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc}; +use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, MerkleHashFunc}; pub use self::db::AtlasDB; pub use self::download::AttachmentsDownloader; @@ -54,6 +56,40 @@ const MAX_UNINSTANTIATED_ATTACHMENTS_MIN: u32 = 50_000; const UNINSTANTIATED_ATTACHMENTS_EXPIRE_AFTER_MIN: u32 = 86_400; const UNRESOLVED_ATTACHMENT_INSTANCES_EXPIRE_AFTER_MIN: u32 = 172_800; +#[derive(Debug, Clone, PartialEq)] +pub struct GetAttachmentResponse { + pub attachment: Attachment, +} + +impl Serialize for GetAttachmentResponse { + fn serialize(&self, s: S) -> Result { + let hex_encoded = to_hex(&self.attachment.content[..]); + s.serialize_str(hex_encoded.as_str()) + } +} + +impl<'de> Deserialize<'de> for GetAttachmentResponse { + fn deserialize>(d: D) -> Result { + let payload = String::deserialize(d)?; + let hex_encoded = payload.parse::().map_err(de_Error::custom)?; + let bytes = hex_bytes(&hex_encoded).map_err(de_Error::custom)?; + let attachment = Attachment::new(bytes); + Ok(GetAttachmentResponse { attachment }) + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GetAttachmentsInvResponse { + pub block_id: StacksBlockId, + pub pages: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AttachmentPage { + pub index: u32, + pub inventory: Vec, +} + #[derive(Debug, Clone)] pub struct AtlasConfig { pub contracts: HashSet, diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 42cf3de126..567d49fe61 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -20,21 +20,23 @@ use std::{thread, time}; use clarity::vm::types::QualifiedContractIdentifier; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::Hash160; use super::download::{ AttachmentRequest, AttachmentsBatch, AttachmentsBatchStateContext, AttachmentsInventoryRequest, BatchedRequestsResult, ReliabilityReport, }; -use super::{AtlasConfig, AtlasDB, Attachment, AttachmentInstance}; +use super::{ + AtlasConfig, AtlasDB, Attachment, AttachmentInstance, AttachmentPage, GetAttachmentsInvResponse, +}; use crate::burnchains::Txid; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::StacksChainState; use crate::net::connection::ConnectionOptions; -use crate::net::{ - AttachmentPage, GetAttachmentsInvResponse, HttpResponseMetadata, HttpResponseType, HttpVersion, - PeerHost, Requestable, -}; +use crate::net::http::{HttpResponsePayload, HttpResponsePreamble, HttpVersion}; +use crate::net::httpcore::StacksHttpResponse; +use crate::net::Requestable; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::u64_to_sql; use crate::util_lib::strings::UrlString; @@ -129,8 +131,7 @@ fn new_attachments_inventory_request( } } -fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> HttpResponseType { - let md = HttpResponseMetadata::new(HttpVersion::Http11, 1, None, true, None); +fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> StacksHttpResponse { let pages = pages .into_iter() .map(|(index, inventory)| AttachmentPage { index, inventory }) @@ -139,7 +140,14 @@ fn new_attachments_inventory_response(pages: Vec<(u32, Vec)>) -> HttpRespons block_id: StacksBlockId([0u8; 32]), pages, }; - HttpResponseType::GetAttachmentsInv(md, response) + + let response_json = serde_json::to_value(&response).unwrap(); + let body = HttpResponsePayload::try_from_json(response_json).unwrap(); + + StacksHttpResponse::new( + HttpResponsePreamble::raw_ok_json(HttpVersion::Http11, false), + body, + ) } #[test] @@ -613,25 +621,28 @@ fn test_downloader_context_attachment_inventories_requests() { let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:30443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:20443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); let request = request_queue.pop().unwrap(); let request_type = request.make_request_type(localhost.clone()); assert_eq!(&**request.get_url(), "http://localhost:40443"); - assert_eq!( - request_type.request_path(), - "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1,2" + debug!("request path = {}", request_type.request_path()); + assert!( + request_type.request_path() == "/v2/attachments/inv?index_block_hash=0101010101010101010101010101010101010101010101010101010101010101&pages_indexes=1%2C2" || + request_type.request_path() == "/v2/attachments/inv?pages_indexes=1%2C2&index_block_hash=0101010101010101010101010101010101010101010101010101010101010101" ); } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 1189087afe..664ab52c30 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -24,6 +24,7 @@ use clarity::vm::types::QualifiedContractIdentifier; use rand; use rand::{thread_rng, Rng}; use stacks_common::types::chainstate::PoxId; +use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; @@ -45,8 +46,8 @@ use crate::net::p2p::PeerNetwork; use crate::net::relay::*; use crate::net::stackerdb::StackerDBs; use crate::net::{ - Error as net_error, GetBlocksInv, GetPoxInv, Neighbor, NeighborKey, PeerAddress, StacksMessage, - StacksP2P, GETPOXINV_MAX_BITLEN, *, + Error as net_error, GetBlocksInv, GetPoxInv, Neighbor, NeighborKey, StacksMessage, StacksP2P, + GETPOXINV_MAX_BITLEN, *, }; use crate::util_lib::db::{DBConn, Error as db_error}; @@ -701,7 +702,7 @@ impl ConversationP2P { return false; } }; - if *bhh != *their_burn_header_hash { + if bhh != their_burn_header_hash { test_debug!( "Burn header hash mismatch in preamble: {} != {}", bhh, @@ -3323,7 +3324,6 @@ mod test { } #[test] - #[ignore] fn convo_handshake_accept() { with_timeout(100, || { let conn_opts = ConnectionOptions::default(); diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index 32b5f2756a..a9de074061 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -30,6 +30,7 @@ use stacks_common::codec::{ StacksMessageCodec, MAX_MESSAGE_LEN, MAX_RELAYERS_LEN, PREAMBLE_ENCODED_SIZE, }; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; +use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, DoubleSha256, Hash160, MerkleHashFunc}; use stacks_common::util::log; @@ -703,41 +704,6 @@ impl StacksMessageCodec for NatPunchData { } } -impl StacksMessageCodec for MemPoolSyncData { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - match *self { - MemPoolSyncData::BloomFilter(ref bloom_filter) => { - write_next(fd, &MemPoolSyncDataID::BloomFilter.to_u8())?; - write_next(fd, bloom_filter)?; - } - MemPoolSyncData::TxTags(ref seed, ref tags) => { - write_next(fd, &MemPoolSyncDataID::TxTags.to_u8())?; - write_next(fd, seed)?; - write_next(fd, tags)?; - } - } - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let data_id: u8 = read_next(fd)?; - match MemPoolSyncDataID::from_u8(data_id).ok_or(codec_error::DeserializeError(format!( - "Unrecognized MemPoolSyncDataID {}", - &data_id - )))? { - MemPoolSyncDataID::BloomFilter => { - let bloom_filter: BloomFilter = read_next(fd)?; - Ok(MemPoolSyncData::BloomFilter(bloom_filter)) - } - MemPoolSyncDataID::TxTags => { - let seed: [u8; 32] = read_next(fd)?; - let txtags: Vec = read_next(fd)?; - Ok(MemPoolSyncData::TxTags(seed, txtags)) - } - } - } -} - fn contract_id_consensus_serialize( fd: &mut W, cid: &QualifiedContractIdentifier, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index fb8211cd31..522a0f6343 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -29,6 +29,7 @@ use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; use mio; use mio::net as mio_net; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; use stacks_common::util::pipe::*; use stacks_common::util::secp256k1::Secp256k1PublicKey; @@ -46,8 +47,7 @@ use crate::net::neighbors::{ WALK_STATE_TIMEOUT, }; use crate::net::{ - Error as net_error, HttpRequestPreamble, HttpResponsePreamble, MessageSequence, PeerAddress, - Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, + Error as net_error, MessageSequence, Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, }; /// Receiver notification handle. @@ -71,7 +71,7 @@ impl ReceiverNotify

{ /// Send this message to the waiting receiver, consuming this notification handle. /// May fail silently. - pub fn send(self, msg: P::Message) -> () { + pub fn send(self, msg: P::Message) { let msg_name = msg.get_message_name().to_string(); let msg_id = msg.request_id(); match self.receiver_input.send(msg) { @@ -223,25 +223,45 @@ impl NetworkReplyHandle

{ } } - /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe. - /// Only call this once you're done sending -- this is just to move the data along. - /// Return true if we're done sending; false if we need to call this again. - pub fn try_flush(&mut self) -> Result { + /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe if + /// `drop_on_success` is true. Returns `true` if we drained the write end, `false` if not. + pub fn try_flush_ex(&mut self, drop_on_success: bool) -> Result { + let ret; let fd_opt = match self.request_pipe_write.take() { Some(mut fd) => { - let res = fd.try_flush().map_err(net_error::WriteError)?; - if res { - // all data flushed! + ret = fd.try_flush().map_err(net_error::WriteError)?; + if ret && drop_on_success { + // all data flushed, and we won't send more. None } else { - // still have data to send + // still have data to send, or we will send more. + test_debug!( + "Still have data to send, drop_on_success = {}, ret = {}", + drop_on_success, + ret + ); Some(fd) } } - None => None, + None => { + ret = true; + None + } }; self.request_pipe_write = fd_opt; - Ok(self.request_pipe_write.is_none()) + Ok(ret) + } + + /// Try to flush the inner pipe writer. If we succeed, drop the inner pipe. + /// Only call this once you're done sending -- this is just to move the data along. + /// Return true if we're done sending; false if we need to call this again. + pub fn try_flush(&mut self) -> Result { + self.try_flush_ex(true) + } + + /// Get a mutable reference to the inner pipe, if we have it + pub fn inner_pipe_out(&mut self) -> Option<&mut PipeWrite> { + self.request_pipe_write.as_mut() } } @@ -363,6 +383,10 @@ pub struct ConnectionOptions { pub mempool_max_tx_query: u64, /// how long a mempool sync is allowed to take, in total, before timing out pub mempool_sync_timeout: u64, + /// socket read buffer size + pub socket_recv_buffer_size: u32, + /// socket write buffer size + pub socket_send_buffer_size: u32, // fault injection pub disable_neighbor_walk: bool, @@ -452,6 +476,8 @@ impl std::default::Default for ConnectionOptions { mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) + socket_recv_buffer_size: 131072, // Linux default + socket_send_buffer_size: 16384, // Linux default // no faults on by default disable_neighbor_walk: false, @@ -1039,6 +1065,7 @@ impl ConnectionOutbox

{ let _nr_input = match self.pending_message_fd { Some(ref mut message_fd) => { // consume from message-writer until we're out of data + // TODO: make this configurable let mut buf = [0u8; 8192]; let nr_input = match message_fd.read(&mut buf) { Ok(0) => { diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 206f48a88e..25c4ed7e62 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -25,6 +25,7 @@ use rand::seq::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util; use stacks_common::util::hash::{ bin_bytes, hex_bytes, to_bin, to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum, @@ -37,7 +38,7 @@ use crate::burnchains::{PrivateKey, PublicKey}; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::core::NETWORK_P2P_PORT; use crate::net::asn::ASEntry4; -use crate::net::{Neighbor, NeighborAddress, NeighborKey, PeerAddress, ServiceFlags}; +use crate::net::{Neighbor, NeighborAddress, NeighborKey, ServiceFlags}; use crate::util_lib::db::{ query_count, query_row, query_rows, sqlite_open, tx_begin_immediate, tx_busy_handler, u64_to_sql, DBConn, Error as db_error, FromColumn, FromRow, @@ -48,12 +49,6 @@ pub const PEERDB_VERSION: &'static str = "2"; const NUM_SLOTS: usize = 8; -impl PeerAddress { - pub fn to_bin(&self) -> String { - to_bin(&self.0) - } -} - impl FromColumn for PeerAddress { fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { let addrbytes_bin: String = row.get_unwrap(column_name); @@ -1332,7 +1327,7 @@ impl PeerDB { } /// Get a peer's advertized stacker DBs - fn static_get_peer_stacker_dbs( + pub fn static_get_peer_stacker_dbs( conn: &Connection, neighbor: &Neighbor, ) -> Result, db_error> { @@ -1805,10 +1800,11 @@ impl PeerDB { mod test { use clarity::vm::types::{StacksAddressExtensions, StandardPrincipalData}; use stacks_common::types::chainstate::StacksAddress; + use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::Hash160; use super::*; - use crate::net::{Neighbor, NeighborKey, PeerAddress}; + use crate::net::{Neighbor, NeighborKey}; /// Test storage, retrieval, and mutation of LocalPeer, including its stacker DB contract IDs #[test] diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 7f36d88d67..aedb73bd62 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -21,12 +21,13 @@ use std::sync::mpsc::{ sync_channel, Receiver, RecvError, RecvTimeoutError, SyncSender, TryRecvError, TrySendError, }; +use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log, sleep_ms}; use crate::net::asn::ASEntry4; use crate::net::codec::*; -use crate::net::{Error as net_error, Neighbor, NeighborKey, PeerAddress, *}; +use crate::net::{Error as net_error, Neighbor, NeighborKey, *}; use crate::util_lib::db::Error as db_error; /// In Rust, there's no easy way to do non-blocking DNS lookups (I blame getaddrinfo), so do it in diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 6b9bd8723b..5957b9818a 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -26,6 +26,7 @@ use std::sync::mpsc::{ use rand::seq::SliceRandom; use rand::{thread_rng, RngCore}; use stacks_common::types::chainstate::{BlockHeaderHash, PoxId, SortitionId, StacksBlockId}; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; @@ -44,14 +45,15 @@ use crate::net::codec::*; use crate::net::connection::{ConnectionOptions, ReplyHandleHttp}; use crate::net::db::{PeerDB, *}; use crate::net::dns::*; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::inv::InvState; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::net::p2p::PeerNetwork; use crate::net::rpc::*; use crate::net::server::HttpPeer; use crate::net::{ - Error as net_error, GetBlocksInv, Neighbor, NeighborKey, PeerAddress, StacksMessage, StacksP2P, - *, + Error as net_error, GetBlocksInv, Neighbor, NeighborKey, StacksMessage, StacksP2P, *, }; use crate::util_lib::db::{DBConn, Error as db_error}; @@ -124,6 +126,28 @@ impl BlockRequestKey { canonical_stacks_tip_height, } } + + /// Make a request for a block + fn make_getblock_request(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + format!("/v2/blocks/{}", &self.index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to create HTTP request for infallible data") + } + + /// Make a request for a stream of confirmed microblocks + fn make_confirmed_microblocks_request(&self, peer_host: PeerHost) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + peer_host, + "GET".into(), + format!("/v2/microblocks/confirmed/{}", &self.index_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to create HTTP request for infallible data") + } } impl Requestable for BlockRequestKey { @@ -131,20 +155,11 @@ impl Requestable for BlockRequestKey { &self.data_url } - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType { + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest { match self.kind { - BlockRequestKeyKind::Block => HttpRequestType::GetBlock( - HttpRequestMetadata::from_host(peer_host, Some(self.canonical_stacks_tip_height)), - self.index_block_hash, - ), + BlockRequestKeyKind::Block => self.make_getblock_request(peer_host), BlockRequestKeyKind::ConfirmedMicroblockStream => { - HttpRequestType::GetMicroblocksConfirmed( - HttpRequestMetadata::from_host( - peer_host, - Some(self.canonical_stacks_tip_height), - ), - self.index_block_hash, - ) + self.make_confirmed_microblocks_request(peer_host) } } } @@ -476,47 +491,44 @@ impl BlockDownloader { debug!("Event {} ({:?}, {:?} for block {}) is still waiting for a response", event_id, &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); pending_block_requests.insert(block_key, event_id); } - Some(http_response) => match http_response { - HttpResponseType::Block(_md, block) => { - if StacksBlockHeader::make_index_block_hash( - &block_key.consensus_hash, - &block.block_hash(), - ) != block_key.index_block_hash - { - info!("Invalid block from {:?} ({:?}): did not ask for block {}/{}", &block_key.neighbor, &block_key.data_url, block_key.consensus_hash, block.block_hash()); + Some(http_response) => { + match StacksHttpResponse::decode_block(http_response) { + Ok(block) => { + if StacksBlockHeader::make_index_block_hash( + &block_key.consensus_hash, + &block.block_hash(), + ) != block_key.index_block_hash + { + info!("Invalid block from {:?} ({:?}): did not ask for block {}/{}", &block_key.neighbor, &block_key.data_url, block_key.consensus_hash, block.block_hash()); + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } else { + // got the block + debug!( + "Got block {}: {}/{}", + &block_key.sortition_height, + &block_key.consensus_hash, + block.block_hash() + ); + self.blocks.insert(block_key, block); + } + } + Err(net_error::NotFoundError) => { + // remote peer didn't have the block + info!("Remote neighbor {:?} ({:?}) does not actually have block {} indexed at {} ({})", &block_key.neighbor, &block_key.data_url, block_key.sortition_height, &block_key.index_block_hash, &block_key.consensus_hash); + + // the fact that we asked this peer means that it's block inv indicated + // it was present, so the absence is the mark of a broken peer + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } + Err(e) => { + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); - } else { - // got the block - debug!( - "Got block {}: {}/{}", - &block_key.sortition_height, - &block_key.consensus_hash, - block.block_hash() - ); - self.blocks.insert(block_key, block); } } - // TODO: redirect? - HttpResponseType::NotFound(_, _) => { - // remote peer didn't have the block - info!("Remote neighbor {:?} ({:?}) does not actually have block {} indexed at {} ({})", &block_key.neighbor, &block_key.data_url, block_key.sortition_height, &block_key.index_block_hash, &block_key.consensus_hash); - - // the fact that we asked this peer means that it's block inv indicated - // it was present, so the absence is the mark of a broken peer - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - _ => { - // wrong message response - info!( - "Got bad HTTP response from {:?}: {:?}", - &block_key.data_url, &http_response - ); - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - }, + } } } } @@ -610,45 +622,45 @@ impl BlockDownloader { debug!("Event {} ({:?}, {:?} for microblocks built by {:?}) is still waiting for a response", event_id, &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); pending_microblock_requests.insert(rh_block_key, event_id); } - Some(http_response) => match http_response { - HttpResponseType::Microblocks(_md, microblocks) => { - if microblocks.len() == 0 { - // we wouldn't have asked for a 0-length stream - info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + Some(http_response) => { + match StacksHttpResponse::decode_microblocks(http_response) { + Ok(microblocks) => { + if microblocks.len() == 0 { + // we wouldn't have asked for a 0-length stream + info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + self.broken_peers.push(event_id); + self.broken_neighbors.push(block_key.neighbor.clone()); + } else { + // have microblocks (but we don't know yet if they're well-formed) + debug!( + "Got (tentative) microblocks {}: {}/{}-{}", + block_key.sortition_height, + &block_key.consensus_hash, + &block_key.index_block_hash, + microblocks[0].block_hash() + ); + self.microblocks.insert(block_key, microblocks); + } + } + Err(net_error::NotFoundError) => { + // remote peer didn't have the microblock, even though their blockinv said + // they did. + info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); + + // the fact that we asked this peer means that it's block inv indicated + // it was present, so the absence is the mark of a broken peer. + // HOWEVER, there has been some bugs recently about nodes reporting + // invalid microblock streams as present, even though they are + // truly absent. Don't punish these peers with a ban; just don't + // talk to them for a while. + } + Err(e) => { + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); - } else { - // have microblocks (but we don't know yet if they're well-formed) - debug!( - "Got (tentative) microblocks {}: {}/{}-{}", - block_key.sortition_height, - &block_key.consensus_hash, - &block_key.index_block_hash, - microblocks[0].block_hash() - ); - self.microblocks.insert(block_key, microblocks); } } - // TODO: redirect? - HttpResponseType::NotFound(_, _) => { - // remote peer didn't have the microblock, even though their blockinv said - // they did. - info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); - - // the fact that we asked this peer means that it's block inv indicated - // it was present, so the absence is the mark of a broken peer. - // HOWEVER, there has been some bugs recently about nodes reporting - // invalid microblock streams as present, even though they are - // truly absent. Don't punish these peers with a ban; just don't - // talk to them for a while. - } - _ => { - // wrong message response - info!("Got bad HTTP response from {:?}", &block_key.data_url); - self.broken_peers.push(event_id); - self.broken_neighbors.push(block_key.neighbor.clone()); - } - }, + } } } } @@ -1894,47 +1906,6 @@ impl PeerNetwork { }) } - /// Send a (non-blocking) HTTP request to a remote peer. - /// Returns the event ID on success. - pub fn connect_or_send_http_request( - &mut self, - data_url: UrlString, - addr: SocketAddr, - request: HttpRequestType, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result { - PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { - PeerNetwork::with_http(network, |ref mut network, ref mut http| { - match http.connect_http( - network_state, - network, - data_url.clone(), - addr.clone(), - Some(request.clone()), - ) { - Ok(event_id) => Ok(event_id), - Err(net_error::AlreadyConnected(event_id, _)) => { - match http.get_conversation_and_socket(event_id) { - (Some(ref mut convo), Some(ref mut socket)) => { - convo.send_request(request)?; - HttpPeer::saturate_http_socket(socket, convo, mempool, chainstate)?; - Ok(event_id) - } - (_, _) => { - debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); - Err(net_error::PeerNotConnected) - } - } - } - Err(e) => { - return Err(e); - } - } - }) - }) - } - /// Start a request, given the list of request keys to consider. Use the given request_factory to /// create the HTTP request. Pops requests off the front of request_keys, and returns once it successfully /// sends out a request via the HTTP peer. Returns the event ID in the http peer that's @@ -1943,8 +1914,6 @@ impl PeerNetwork { network: &mut PeerNetwork, dns_lookups: &HashMap>>, requestables: &mut VecDeque, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> Option<(T, usize)> { loop { match requestables.pop_front() { @@ -1966,8 +1935,6 @@ impl PeerNetwork { requestable.get_url().clone(), addr.clone(), request, - mempool, - chainstate, ) { Ok(handle) => { debug!( @@ -2006,11 +1973,7 @@ impl PeerNetwork { } /// Start fetching blocks - pub fn block_getblocks_begin( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { + pub fn block_getblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { let mut priority = PeerNetwork::prioritize_requests(&downloader.blocks_to_try); @@ -2018,13 +1981,7 @@ impl PeerNetwork { for sortition_height in priority.drain(..) { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request( - network, - &downloader.dns_lookups, - keys, - mempool, - chainstate, - ) { + match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { Some((key, handle)) => { requests.insert(key.clone(), handle); } @@ -2054,11 +2011,7 @@ impl PeerNetwork { } /// Proceed to get microblocks - pub fn block_getmicroblocks_begin( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { + pub fn block_getmicroblocks_begin(&mut self) -> Result<(), net_error> { test_debug!("{:?}: block_getmicroblocks_begin", &self.local_peer); PeerNetwork::with_downloader_state(self, |ref mut network, ref mut downloader| { let mut priority = PeerNetwork::prioritize_requests(&downloader.microblocks_to_try); @@ -2066,13 +2019,7 @@ impl PeerNetwork { for sortition_height in priority.drain(..) { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request( - network, - &downloader.dns_lookups, - keys, - mempool, - chainstate, - ) { + match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { Some((key, handle)) => { requests.insert(key.clone(), handle); } @@ -2391,7 +2338,6 @@ impl PeerNetwork { pub fn download_blocks( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2493,13 +2439,13 @@ impl PeerNetwork { self.block_dns_lookups_try_finish(dns_client)?; } BlockDownloaderState::GetBlocksBegin => { - self.block_getblocks_begin(mempool, chainstate)?; + self.block_getblocks_begin()?; } BlockDownloaderState::GetBlocksFinish => { self.block_getblocks_try_finish()?; } BlockDownloaderState::GetMicroblocksBegin => { - self.block_getmicroblocks_begin(mempool, chainstate)?; + self.block_getmicroblocks_begin()?; } BlockDownloaderState::GetMicroblocksFinish => { self.block_getmicroblocks_try_finish()?; diff --git a/stackslib/src/net/http.rs b/stackslib/src/net/http.rs deleted file mode 100644 index 9859a4ef35..0000000000 --- a/stackslib/src/net/http.rs +++ /dev/null @@ -1,6722 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::io::prelude::*; -use std::io::{Read, Write}; -use std::net::SocketAddr; -use std::str::FromStr; -use std::time::SystemTime; -use std::{fmt, io, mem, str}; - -use clarity::vm::ast::parser::v1::CLARITY_NAME_REGEX; -use clarity::vm::representations::{ - CONTRACT_NAME_REGEX_STRING, MAX_STRING_LEN, PRINCIPAL_DATA_REGEX_STRING, - STANDARD_PRINCIPAL_REGEX_STRING, -}; -use clarity::vm::types::{ - PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TraitIdentifier, - BOUND_VALUE_SERIALIZATION_HEX, -}; -use clarity::vm::{ClarityName, ContractName, Value}; -use libstackerdb::STACKERDB_MAX_CHUNK_SIZE; -use percent_encoding::percent_decode_str; -use regex::{Captures, Regex}; -use serde::{Deserialize, Serialize}; -use stacks_common::codec::{ - read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, - MAX_PAYLOAD_LEN, -}; -use stacks_common::deps_common::httparse; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; -use stacks_common::util::chunked_encoding::*; -use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; -use stacks_common::util::log; -use stacks_common::util::retry::{BoundReader, RetryReader}; -use url::{form_urlencoded, Url}; -use {serde_json, time}; - -use super::FeeRateEstimateRequestBody; -use crate::burnchains::{Address, Txid}; -use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::stacks::{ - StacksBlock, StacksBlockHeader, StacksMicroblock, StacksPublicKey, StacksTransaction, - TransactionPayload, -}; -use crate::net::atlas::Attachment; -use crate::net::Error::ClarityError; -use crate::net::{ - CallReadOnlyRequestBody, ClientError, Error as net_error, ExtendedStacksHeader, - GetAttachmentResponse, GetAttachmentsInvResponse, HttpContentType, HttpRequestMetadata, - HttpRequestPreamble, HttpRequestType, HttpResponseMetadata, HttpResponsePreamble, - HttpResponseType, HttpVersion, MemPoolSyncData, MessageSequence, NeighborAddress, PeerAddress, - PeerHost, PostTransactionRequestBody, ProtocolFamily, StackerDBChunkData, StacksHttpMessage, - StacksHttpPreamble, TipRequest, UnconfirmedTransactionResponse, UnconfirmedTransactionStatus, - HTTP_PREAMBLE_MAX_ENCODED_SIZE, HTTP_PREAMBLE_MAX_NUM_HEADERS, HTTP_REQUEST_ID_RESERVED, - MAX_HEADERS, MAX_MICROBLOCKS_UNCONFIRMED, -}; - -lazy_static! { - static ref PATH_GETINFO: Regex = Regex::new(r#"^/v2/info$"#).unwrap(); - static ref PATH_GETPOXINFO: Regex = Regex::new(r#"^/v2/pox$"#).unwrap(); - static ref PATH_GETNEIGHBORS: Regex = Regex::new(r#"^/v2/neighbors$"#).unwrap(); - static ref PATH_GETHEADERS: Regex = Regex::new(r#"^/v2/headers/([0-9]+)$"#).unwrap(); - static ref PATH_GETBLOCK: Regex = Regex::new(r#"^/v2/blocks/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_INDEXED: Regex = - Regex::new(r#"^/v2/microblocks/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_CONFIRMED: Regex = - Regex::new(r#"^/v2/microblocks/confirmed/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_GETMICROBLOCKS_UNCONFIRMED: Regex = - Regex::new(r#"^/v2/microblocks/unconfirmed/([0-9a-f]{64})/([0-9]{1,5})$"#).unwrap(); - static ref PATH_GETTRANSACTION_UNCONFIRMED: Regex = - Regex::new(r#"^/v2/transactions/unconfirmed/([0-9a-f]{64})$"#).unwrap(); - static ref PATH_POSTTRANSACTION: Regex = Regex::new(r#"^/v2/transactions$"#).unwrap(); - static ref PATH_POST_FEE_RATE_ESIMATE: Regex = Regex::new(r#"^/v2/fees/transaction$"#).unwrap(); - static ref PATH_POSTBLOCK: Regex = Regex::new(r#"^/v2/blocks/upload/([0-9a-f]{40})$"#).unwrap(); - static ref PATH_POSTMICROBLOCK: Regex = Regex::new(r#"^/v2/microblocks$"#).unwrap(); - static ref PATH_GET_ACCOUNT: Regex = Regex::new(&format!( - "^/v2/accounts/(?P{})$", - *PRINCIPAL_DATA_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_DATA_VAR: Regex = Regex::new(&format!( - "^/v2/data_var/(?P

{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONSTANT_VAL: Regex = Regex::new(&format!( - "^/v2/constant_val/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_MAP_ENTRY: Regex = Regex::new(&format!( - "^/v2/map_entry/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_POST_CALL_READ_ONLY: Regex = Regex::new(&format!( - "^/v2/contracts/call-read/(?P
{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONTRACT_SRC: Regex = Regex::new(&format!( - "^/v2/contracts/source/(?P
{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_IS_TRAIT_IMPLEMENTED: Regex = Regex::new(&format!( - "^/v2/traits/(?P
{})/(?P{})/(?P{})/(?P{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING, *CLARITY_NAME_REGEX - )) - .unwrap(); - static ref PATH_GET_CONTRACT_ABI: Regex = Regex::new(&format!( - "^/v2/contracts/interface/(?P
{})/(?P{})$", - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )) - .unwrap(); - static ref PATH_GET_TRANSFER_COST: Regex = Regex::new("^/v2/fees/transfer$").unwrap(); - static ref PATH_GET_ATTACHMENTS_INV: Regex = Regex::new("^/v2/attachments/inv$").unwrap(); - static ref PATH_GET_ATTACHMENT: Regex = - Regex::new(r#"^/v2/attachments/([0-9a-f]{40})$"#).unwrap(); - static ref PATH_POST_MEMPOOL_QUERY: Regex = - Regex::new(r#"^/v2/mempool/query$"#).unwrap(); - static ref PATH_GET_STACKERDB_METADATA: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_GET_STACKERDB_CHUNK: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_GET_STACKERDB_VERSIONED_CHUNK: Regex = - Regex::new(&format!( - r#"^/v2/stackerdb/(?P
{})/(?P{})/(?P[0-9]+)/(?P[0-9]+)$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_POST_STACKERDB_CHUNK: Regex = - Regex::new(&format!( - r#"/v2/stackerdb/(?P
{})/(?P{})/chunks$"#, - *STANDARD_PRINCIPAL_REGEX_STRING, *CONTRACT_NAME_REGEX_STRING - )).unwrap(); - static ref PATH_OPTIONS_WILDCARD: Regex = Regex::new("^/v2/.{0,4096}$").unwrap(); -} - -/// HTTP headers that we really care about -#[derive(Debug, Clone, PartialEq)] -pub(crate) enum HttpReservedHeader { - ContentLength(u32), - ContentType(HttpContentType), - XRequestID(u32), - Host(PeerHost), - CanonicalStacksTipHeight(u64), -} - -/// Stacks block accepted struct -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct StacksBlockAcceptedData { - pub stacks_block_id: StacksBlockId, - pub accepted: bool, -} - -impl FromStr for PeerHost { - type Err = net_error; - - fn from_str(header: &str) -> Result { - // we're looser than the RFC allows for DNS names -- anything that doesn't parse to an IP - // address will be parsed to a DNS name. - // try as IP:port - match header.parse::() { - Ok(socketaddr) => Ok(PeerHost::IP( - PeerAddress::from_socketaddr(&socketaddr), - socketaddr.port(), - )), - Err(_) => { - // maybe missing :port - let hostport = format!("{}:80", header); - match hostport.parse::() { - Ok(socketaddr) => Ok(PeerHost::IP( - PeerAddress::from_socketaddr(&socketaddr), - socketaddr.port(), - )), - Err(_) => { - // try as DNS-name:port - let host; - let port; - let parts: Vec<&str> = header.split(":").collect(); - if parts.len() == 0 { - return Err(net_error::DeserializeError( - "Failed to parse PeerHost: no parts".to_string(), - )); - } else if parts.len() == 1 { - // no port - host = Some(parts[0].to_string()); - port = Some(80); - } else { - let np = parts.len(); - if parts[np - 1].chars().all(char::is_numeric) { - // ends in :port - let host_str = parts[0..np - 1].join(":"); - if host_str.len() == 0 { - return Err(net_error::DeserializeError( - "Empty host".to_string(), - )); - } - host = Some(host_str); - - let port_res = parts[np - 1].parse::(); - port = match port_res { - Ok(p) => Some(p), - Err(_) => { - return Err(net_error::DeserializeError( - "Failed to parse PeerHost: invalid port".to_string(), - )); - } - }; - } else { - // only host - host = Some(header.to_string()); - port = Some(80); - } - } - - match (host, port) { - (Some(h), Some(p)) => Ok(PeerHost::DNS(h, p)), - (_, _) => Err(net_error::DeserializeError( - "Failed to parse PeerHost: failed to extract host and/or port" - .to_string(), - )), // I don't think this is reachable - } - } - } - } - } - } -} - -impl HttpReservedHeader { - pub fn is_reserved(header: &str) -> bool { - let hdr = header.to_string(); - match hdr.as_str() { - "content-length" - | "content-type" - | "x-request-id" - | "host" - | "x-canonical-stacks-tip-height" => true, - _ => false, - } - } - - pub fn try_from_str(header: &str, value: &str) -> Option { - let hdr = header.to_string().to_lowercase(); - match hdr.as_str() { - "content-length" => match value.parse::() { - Ok(cl) => Some(HttpReservedHeader::ContentLength(cl)), - Err(_) => None, - }, - "content-type" => match value.parse::() { - Ok(ct) => Some(HttpReservedHeader::ContentType(ct)), - Err(_) => None, - }, - "x-request-id" => match value.parse::() { - Ok(rid) => Some(HttpReservedHeader::XRequestID(rid)), - Err(_) => None, - }, - "host" => match value.parse::() { - Ok(ph) => Some(HttpReservedHeader::Host(ph)), - Err(_) => None, - }, - "x-canonical-stacks-tip-height" => match value.parse::() { - Ok(h) => Some(HttpReservedHeader::CanonicalStacksTipHeight(h)), - Err(_) => None, - }, - _ => None, - } - } -} - -impl HttpRequestPreamble { - pub fn new( - version: HttpVersion, - verb: String, - path: String, - hostname: String, - port: u16, - keep_alive: bool, - ) -> HttpRequestPreamble { - HttpRequestPreamble { - version: version, - verb: verb, - path: path, - host: PeerHost::from_host_port(hostname, port), - content_type: None, - content_length: None, - keep_alive: keep_alive, - headers: HashMap::new(), - } - } - - pub fn new_serialized( - fd: &mut W, - version: &HttpVersion, - verb: &str, - path: &str, - host: &PeerHost, - keep_alive: bool, - content_length: Option, - content_type: Option<&HttpContentType>, - mut write_headers: F, - ) -> Result<(), codec_error> - where - F: FnMut(&mut W) -> Result<(), codec_error>, - { - // "$verb $path HTTP/1.${version}\r\n" - fd.write_all(verb.as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(" ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(path.as_bytes()) - .map_err(codec_error::WriteError)?; - - match *version { - HttpVersion::Http10 => { - fd.write_all(" HTTP/1.0\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - HttpVersion::Http11 => { - fd.write_all(" HTTP/1.1\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - - // "User-Agent: $agent\r\nHost: $host\r\n" - fd.write_all("User-Agent: stacks/2.0\r\nHost: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", host).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - - // content-type - match content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(c.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => {} - } - - // content-length - match content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => {} - } - - match *version { - HttpVersion::Http10 => { - if keep_alive { - fd.write_all("Connection: keep-alive\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - HttpVersion::Http11 => { - if !keep_alive { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - } - - // headers - write_headers(fd)?; - - // end-of-headers - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - Ok(()) - } - - #[cfg(test)] - pub fn from_headers( - version: HttpVersion, - verb: String, - path: String, - hostname: String, - port: u16, - keep_alive: bool, - mut keys: Vec, - values: Vec, - ) -> HttpRequestPreamble { - assert_eq!(keys.len(), values.len()); - let mut req = HttpRequestPreamble::new(version, verb, path, hostname, port, keep_alive); - - for (k, v) in keys.drain(..).zip(values) { - req.add_header(k, v); - } - req - } - - pub fn add_header(&mut self, key: String, value: String) -> () { - let hdr = key.to_lowercase(); - if HttpReservedHeader::is_reserved(&hdr) { - match HttpReservedHeader::try_from_str(&hdr, &value) { - Some(h) => match h { - HttpReservedHeader::Host(ph) => { - self.host = ph; - return; - } - HttpReservedHeader::ContentType(ct) => { - self.content_type = Some(ct); - return; - } - _ => {} // can just fall through and insert - }, - None => { - return; - } - } - } - - self.headers.insert(hdr, value); - } - - /// Content-Length for this request. - /// If there is no valid Content-Length header, then - /// the Content-Length is 0 - pub fn get_content_length(&self) -> u32 { - self.content_length.unwrap_or(0) - } - - /// Set the content-length for this request - pub fn set_content_length(&mut self, len: u32) -> () { - self.content_length = Some(len); - } - - /// Set the content-type for this request - pub fn set_content_type(&mut self, content_type: HttpContentType) -> () { - self.content_type = Some(content_type) - } -} - -fn empty_headers(_fd: &mut W) -> Result<(), codec_error> { - Ok(()) -} - -fn stacks_height_headers( - fd: &mut W, - md: &HttpRequestMetadata, -) -> Result<(), codec_error> { - match md.canonical_stacks_tip_height { - Some(height) => { - fd.write_all(format!("X-Canonical-Stacks-Tip-Height: {}\r\n", height).as_bytes()) - .map_err(codec_error::WriteError)?; - } - _ => {} - } - Ok(()) -} - -fn keep_alive_headers(fd: &mut W, md: &HttpResponseMetadata) -> Result<(), codec_error> { - match md.client_version { - HttpVersion::Http10 => { - // client expects explicit keep-alive - if md.client_keep_alive { - fd.write_all("Connection: keep-alive\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } else { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - HttpVersion::Http11 => { - // only need "connection: close" if we're explicitly _not_ doing keep-alive - if !md.client_keep_alive { - fd.write_all("Connection: close\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - } - match md.canonical_stacks_tip_height { - Some(height) => { - fd.write_all(format!("X-Canonical-Stacks-Tip-Height: {}\r\n", height).as_bytes()) - .map_err(codec_error::WriteError)?; - } - _ => {} - } - Ok(()) -} - -fn write_headers( - fd: &mut W, - headers: &HashMap, -) -> Result<(), codec_error> { - for (ref key, ref value) in headers.iter() { - fd.write_all(key.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(": ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(value.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - } - Ok(()) -} - -fn default_accept_header() -> String { - format!( - "Accept: {}, {}, {}", - HttpContentType::Bytes, - HttpContentType::JSON, - HttpContentType::Text - ) -} - -/// Read from a stream until we see '\r\n\r\n', with the purpose of reading an HTTP preamble. -/// It's gonna be important here that R does some bufferring, since this reads byte by byte. -/// EOF if we read 0 bytes. -fn read_to_crlf2(fd: &mut R) -> Result, codec_error> { - let mut ret = Vec::with_capacity(HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize); - while ret.len() < HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - let mut b = [0u8]; - fd.read_exact(&mut b).map_err(codec_error::ReadError)?; - ret.push(b[0]); - - if ret.len() > 4 { - let last_4 = &ret[(ret.len() - 4)..ret.len()]; - - // '\r\n\r\n' is [0x0d, 0x0a, 0x0d, 0x0a] - if last_4 == &[0x0d, 0x0a, 0x0d, 0x0a] { - break; - } - } - } - Ok(ret) -} - -impl StacksMessageCodec for HttpRequestPreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - HttpRequestPreamble::new_serialized( - fd, - &self.version, - &self.verb, - &self.path, - &self.host, - self.keep_alive, - self.content_length.clone(), - self.content_type.as_ref(), - |ref mut fd| write_headers(fd, &self.headers), - ) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // realistically, there won't be more than HTTP_PREAMBLE_MAX_NUM_HEADERS headers - let mut headers = [httparse::EMPTY_HEADER; HTTP_PREAMBLE_MAX_NUM_HEADERS]; - let mut req = httparse::Request::new(&mut headers); - - let buf_read = read_to_crlf2(fd)?; - - // consume request - match req.parse(&buf_read).map_err(|e| { - codec_error::DeserializeError(format!("Failed to parse HTTP request: {:?}", &e)) - })? { - httparse::Status::Partial => { - // partial - return Err(codec_error::UnderflowError( - "Not enough bytes to form a HTTP request preamble".to_string(), - )); - } - httparse::Status::Complete(_) => { - // consumed all headers. body_offset points to the start of the request body - let version = match req - .version - .ok_or(codec_error::DeserializeError("No HTTP version".to_string()))? - { - 0 => HttpVersion::Http10, - 1 => HttpVersion::Http11, - _ => { - return Err(codec_error::DeserializeError( - "Invalid HTTP version".to_string(), - )); - } - }; - - let verb = req - .method - .ok_or(codec_error::DeserializeError("No HTTP method".to_string()))? - .to_string(); - let path = req - .path - .ok_or(codec_error::DeserializeError("No HTTP path".to_string()))? - .to_string(); - - let mut peerhost = None; - let mut content_type = None; - let mut content_length = None; - let mut keep_alive = match version { - HttpVersion::Http10 => false, - HttpVersion::Http11 => true, - }; - - let mut headers: HashMap = HashMap::new(); - let mut all_headers: HashSet = HashSet::new(); - - for i in 0..req.headers.len() { - let value = String::from_utf8(req.headers[i].value.to_vec()).map_err(|_e| { - codec_error::DeserializeError( - "Invalid HTTP header value: not utf-8".to_string(), - ) - })?; - if !value.is_ascii() { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is not ASCII-US" - ))); - } - if value.len() > HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is too big" - ))); - } - - let key = req.headers[i].name.to_string().to_lowercase(); - if headers.contains_key(&key) || all_headers.contains(&key) { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: duplicate header \"{}\"", - key - ))); - } - all_headers.insert(key.clone()); - - if key == "host" { - peerhost = match value.parse::() { - Ok(ph) => Some(ph), - Err(_) => None, - }; - } else if key == "content-type" { - // parse - let ctype = value.to_lowercase().parse::()?; - content_type = Some(ctype); - } else if key == "content-length" { - // parse - content_length = match value.parse::() { - Ok(len) => Some(len), - Err(_) => None, - } - } else if key == "connection" { - // parse - if value.to_lowercase() == "close" { - keep_alive = false; - } else if value.to_lowercase() == "keep-alive" { - keep_alive = true; - } else { - return Err(codec_error::DeserializeError( - "Inavlid HTTP request: invalid Connection: header".to_string(), - )); - } - } else { - headers.insert(key, value); - } - } - - if peerhost.is_none() { - return Err(codec_error::DeserializeError( - "Missing Host header".to_string(), - )); - }; - - Ok(HttpRequestPreamble { - version: version, - verb: verb, - path: path, - host: peerhost.unwrap(), - content_type: content_type, - content_length: content_length, - keep_alive: keep_alive, - headers: headers, - }) - } - } - } -} - -impl HttpResponsePreamble { - pub fn new( - status_code: u16, - reason: String, - content_length_opt: Option, - content_type: HttpContentType, - keep_alive: bool, - request_id: u32, - ) -> HttpResponsePreamble { - HttpResponsePreamble { - status_code: status_code, - reason: reason, - keep_alive: keep_alive, - content_length: content_length_opt, - content_type: content_type, - request_id: request_id, - headers: HashMap::new(), - } - } - - pub fn ok_JSON_from_md( - fd: &mut W, - md: &HttpResponseMetadata, - ) -> Result<(), codec_error> { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - ) - } - - pub fn new_serialized( - fd: &mut W, - status_code: u16, - reason: &str, - content_length: Option, - content_type: &HttpContentType, - request_id: u32, - mut write_headers: F, - ) -> Result<(), codec_error> - where - F: FnMut(&mut W) -> Result<(), codec_error>, - { - fd.write_all("HTTP/1.1 ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{} {}\r\n", status_code, reason).as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("Server: stacks/2.0\r\nDate: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(rfc7231_now().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Origin: *".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Headers: origin, content-type".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nAccess-Control-Allow-Methods: POST, GET, OPTIONS".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\nContent-Type: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(content_type.as_str().as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - - match content_length { - Some(len) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}", len).as_bytes()) - .map_err(codec_error::WriteError)?; - } - None => { - fd.write_all("Transfer-Encoding: chunked".as_bytes()) - .map_err(codec_error::WriteError)?; - } - } - - fd.write_all("\r\nX-Request-Id: ".as_bytes()) - .map_err(codec_error::WriteError)?; - fd.write_all(format!("{}\r\n", request_id).as_bytes()) - .map_err(codec_error::WriteError)?; - - write_headers(fd)?; - - fd.write_all("\r\n".as_bytes()) - .map_err(codec_error::WriteError)?; - Ok(()) - } - - pub fn new_error( - status_code: u16, - request_id: u32, - error_message: Option, - ) -> HttpResponsePreamble { - HttpResponsePreamble { - status_code: status_code, - keep_alive: true, - reason: HttpResponseType::error_reason(status_code).to_string(), - content_length: Some(error_message.unwrap_or("".to_string()).len() as u32), - content_type: HttpContentType::Text, - request_id: request_id, - headers: HashMap::new(), - } - } - - #[cfg(test)] - pub fn from_headers( - status_code: u16, - reason: String, - keep_alive: bool, - content_length: Option, - content_type: HttpContentType, - request_id: u32, - mut keys: Vec, - values: Vec, - ) -> HttpResponsePreamble { - assert_eq!(keys.len(), values.len()); - let mut res = HttpResponsePreamble::new( - status_code, - reason, - content_length, - content_type, - keep_alive, - request_id, - ); - - for (k, v) in keys.drain(..).zip(values) { - res.add_header(k, v); - } - res.set_request_id(request_id); - res - } - - pub fn add_header(&mut self, key: String, value: String) -> () { - let hdr = key.to_lowercase(); - if HttpReservedHeader::is_reserved(&hdr) { - match HttpReservedHeader::try_from_str(&hdr, &value) { - Some(h) => match h { - HttpReservedHeader::XRequestID(rid) => { - self.request_id = rid; - return; - } - HttpReservedHeader::ContentLength(cl) => { - self.content_length = Some(cl); - return; - } - HttpReservedHeader::ContentType(ct) => { - self.content_type = ct; - return; - } - _ => {} // can just fall through and insert - }, - None => { - return; - } - } - } - - self.headers.insert(hdr, value); - } - - pub fn set_request_id(&mut self, request_id: u32) -> () { - self.request_id = request_id; - } - - pub fn add_CORS_headers(&mut self) -> () { - self.headers - .insert("Access-Control-Allow-Origin".to_string(), "*".to_string()); - } - - // do we have Transfer-Encoding: chunked? - pub fn is_chunked(&self) -> bool { - self.content_length.is_none() - } -} - -/// Get an RFC 7231 date that represents the current time -fn rfc7231_now() -> String { - let now = time::PrimitiveDateTime::from(SystemTime::now()); - now.format("%a, %b %-d %-Y %-H:%M:%S GMT") -} - -impl StacksMessageCodec for HttpResponsePreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - HttpResponsePreamble::new_serialized( - fd, - self.status_code, - &self.reason, - self.content_length, - &self.content_type, - self.request_id, - |ref mut fd| write_headers(fd, &self.headers), - ) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // realistically, there won't be more than HTTP_PREAMBLE_MAX_NUM_HEADERS headers - let mut headers = [httparse::EMPTY_HEADER; HTTP_PREAMBLE_MAX_NUM_HEADERS]; - let mut resp = httparse::Response::new(&mut headers); - - let buf_read = read_to_crlf2(fd)?; - - // consume response - match resp.parse(&buf_read).map_err(|e| { - codec_error::DeserializeError(format!("Failed to parse HTTP response: {:?}", &e)) - })? { - httparse::Status::Partial => { - // try again - return Err(codec_error::UnderflowError( - "Not enough bytes to form a HTTP response preamble".to_string(), - )); - } - httparse::Status::Complete(_) => { - // consumed all headers. body_offset points to the start of the response body - let _ = resp - .version - .ok_or(codec_error::DeserializeError("No HTTP version".to_string()))?; - let status_code = resp.code.ok_or(codec_error::DeserializeError( - "No HTTP status code".to_string(), - ))?; - let reason = resp - .reason - .ok_or(codec_error::DeserializeError( - "No HTTP status reason".to_string(), - ))? - .to_string(); - - let mut headers: HashMap = HashMap::new(); - let mut all_headers: HashSet = HashSet::new(); - - let mut content_type = None; - let mut content_length = None; - let mut request_id = None; - let mut chunked_encoding = false; - let mut keep_alive = true; - - for i in 0..resp.headers.len() { - let value = - String::from_utf8(resp.headers[i].value.to_vec()).map_err(|_e| { - codec_error::DeserializeError( - "Invalid HTTP header value: not utf-8".to_string(), - ) - })?; - if !value.is_ascii() { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is not ASCII-US" - ))); - } - if value.len() > HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: header value is too big" - ))); - } - - let key = resp.headers[i].name.to_string().to_lowercase(); - if headers.contains_key(&key) || all_headers.contains(&key) { - return Err(codec_error::DeserializeError(format!( - "Invalid HTTP request: duplicate header \"{}\"", - key - ))); - } - all_headers.insert(key.clone()); - - if key == "content-type" { - let ctype = value.to_lowercase().parse::()?; - content_type = Some(ctype); - } else if key == "content-length" { - let len = value.parse::().map_err(|_e| { - codec_error::DeserializeError( - "Invalid Content-Length header value".to_string(), - ) - })?; - content_length = Some(len); - } else if key == "x-request-id" { - match value.parse::() { - Ok(i) => { - request_id = Some(i); - } - Err(_) => {} - } - } else if key == "connection" { - // parse - if value.to_lowercase() == "close" { - keep_alive = false; - } else if value.to_lowercase() == "keep-alive" { - keep_alive = true; - } else { - return Err(codec_error::DeserializeError( - "Inavlid HTTP request: invalid Connection: header".to_string(), - )); - } - } else if key == "transfer-encoding" { - if value.to_lowercase() == "chunked" { - chunked_encoding = true; - } else { - return Err(codec_error::DeserializeError(format!( - "Unsupported transfer-encoding '{}'", - value - ))); - } - } else { - headers.insert(key, value); - } - } - - if content_length.is_some() && chunked_encoding { - return Err(codec_error::DeserializeError( - "Invalid HTTP response: incompatible transfer-encoding and content-length" - .to_string(), - )); - } - - if content_type.is_none() || (content_length.is_none() && !chunked_encoding) { - return Err(codec_error::DeserializeError( - "Invalid HTTP response: missing Content-Type, Content-Length".to_string(), - )); - } - - Ok(HttpResponsePreamble { - status_code: status_code, - reason: reason, - keep_alive: keep_alive, - content_type: content_type.unwrap(), - content_length: content_length, - request_id: request_id.unwrap_or(HTTP_REQUEST_ID_RESERVED), - headers: headers, - }) - } - } - } -} - -impl HttpRequestType { - fn try_parse( - protocol: &mut StacksHttp, - verb: &str, - regex: &Regex, - preamble: &HttpRequestPreamble, - path: &str, - query: Option<&str>, - fd: &mut R, - parser: F, - ) -> Result, net_error> - where - F: Fn( - &mut StacksHttp, - &HttpRequestPreamble, - &Captures, - Option<&str>, - &mut R, - ) -> Result, - { - if preamble.verb == verb { - if let Some(ref captures) = regex.captures(path) { - let payload = parser(protocol, preamble, captures, query, fd)?; - return Ok(Some(payload)); - } - } - - Ok(None) - } - - pub fn parse( - protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - // TODO: make this static somehow - let REQUEST_METHODS: &[( - &str, - &Regex, - &dyn Fn( - &mut StacksHttp, - &HttpRequestPreamble, - &Captures, - Option<&str>, - &mut R, - ) -> Result, - )] = &[ - ("GET", &PATH_GETINFO, &HttpRequestType::parse_getinfo), - ("GET", &PATH_GETPOXINFO, &HttpRequestType::parse_getpoxinfo), - ( - "GET", - &PATH_GETNEIGHBORS, - &HttpRequestType::parse_getneighbors, - ), - ("GET", &PATH_GETHEADERS, &HttpRequestType::parse_getheaders), - ("GET", &PATH_GETBLOCK, &HttpRequestType::parse_getblock), - ( - "GET", - &PATH_GETMICROBLOCKS_INDEXED, - &HttpRequestType::parse_getmicroblocks_indexed, - ), - ( - "GET", - &PATH_GETMICROBLOCKS_CONFIRMED, - &HttpRequestType::parse_getmicroblocks_confirmed, - ), - ( - "GET", - &PATH_GETMICROBLOCKS_UNCONFIRMED, - &HttpRequestType::parse_getmicroblocks_unconfirmed, - ), - ( - "GET", - &PATH_GETTRANSACTION_UNCONFIRMED, - &HttpRequestType::parse_gettransaction_unconfirmed, - ), - ( - "POST", - &PATH_POST_FEE_RATE_ESIMATE, - &HttpRequestType::parse_post_fee_rate_estimate, - ), - ( - "POST", - &PATH_POSTTRANSACTION, - &HttpRequestType::parse_posttransaction, - ), - ("POST", &PATH_POSTBLOCK, &HttpRequestType::parse_postblock), - ( - "POST", - &PATH_POSTMICROBLOCK, - &HttpRequestType::parse_postmicroblock, - ), - ( - "GET", - &PATH_GET_ACCOUNT, - &HttpRequestType::parse_get_account, - ), - ( - "GET", - &PATH_GET_DATA_VAR, - &HttpRequestType::parse_get_data_var, - ), - ( - "GET", - &PATH_GET_CONSTANT_VAL, - &HttpRequestType::parse_get_constant_val, - ), - ( - "POST", - &PATH_GET_MAP_ENTRY, - &HttpRequestType::parse_get_map_entry, - ), - ( - "GET", - &PATH_GET_TRANSFER_COST, - &HttpRequestType::parse_get_transfer_cost, - ), - ( - "GET", - &PATH_GET_CONTRACT_SRC, - &HttpRequestType::parse_get_contract_source, - ), - ( - "GET", - &PATH_GET_IS_TRAIT_IMPLEMENTED, - &HttpRequestType::parse_get_is_trait_implemented, - ), - ( - "GET", - &PATH_GET_CONTRACT_ABI, - &HttpRequestType::parse_get_contract_abi, - ), - ( - "POST", - &PATH_POST_CALL_READ_ONLY, - &HttpRequestType::parse_call_read_only, - ), - ( - "OPTIONS", - &PATH_OPTIONS_WILDCARD, - &HttpRequestType::parse_options_preflight, - ), - ( - "GET", - &PATH_GET_ATTACHMENT, - &HttpRequestType::parse_get_attachment, - ), - ( - "GET", - &PATH_GET_ATTACHMENTS_INV, - &HttpRequestType::parse_get_attachments_inv, - ), - ( - "POST", - &PATH_POST_MEMPOOL_QUERY, - &HttpRequestType::parse_post_mempool_query, - ), - ( - "GET", - &PATH_GET_STACKERDB_METADATA, - &HttpRequestType::parse_get_stackerdb_metadata, - ), - ( - "GET", - &PATH_GET_STACKERDB_CHUNK, - &HttpRequestType::parse_get_stackerdb_chunk, - ), - ( - "GET", - &PATH_GET_STACKERDB_VERSIONED_CHUNK, - &HttpRequestType::parse_get_stackerdb_versioned_chunk, - ), - ( - "POST", - &PATH_POST_STACKERDB_CHUNK, - &HttpRequestType::parse_post_stackerdb_chunk, - ), - ]; - - // use url::Url to parse path and query string - // Url will refuse to parse just a path, so create a dummy URL - let local_url = format!("http://local{}", &preamble.path); - let url = Url::parse(&local_url).map_err(|_e| { - net_error::DeserializeError("Http request path could not be parsed".to_string()) - })?; - - let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { - net_error::DeserializeError( - "Http request path could not be parsed as UTF-8".to_string(), - ) - })?; - - for (verb, regex, parser) in REQUEST_METHODS.iter() { - match HttpRequestType::try_parse( - protocol, - verb, - regex, - preamble, - &decoded_path, - url.query(), - fd, - parser, - )? { - Some(request) => { - let query = if let Some(q) = url.query() { - format!("?{}", q) - } else { - "".to_string() - }; - info!("Handle HTTPRequest"; "verb" => %verb, "peer_addr" => %protocol.peer_addr, "path" => %decoded_path, "query" => %query); - return Ok(request); - } - None => { - continue; - } - } - } - - let _path = preamble.path.clone(); - test_debug!("Failed to parse '{}'", &_path); - Err(net_error::ClientError(ClientError::NotFound( - preamble.path.clone(), - ))) - } - - fn parse_getinfo( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetInfo".to_string(), - )); - } - Ok(HttpRequestType::GetInfo( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - fn parse_getpoxinfo( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetPoxInfo".to_string(), - )); - } - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetPoxInfo( - HttpRequestMetadata::from_preamble(preamble), - tip, - )) - } - - fn parse_getneighbors( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetNeighbors".to_string(), - )); - } - - Ok(HttpRequestType::GetNeighbors( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - fn parse_get_transfer_cost( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetTransferCost".to_string(), - )); - } - - Ok(HttpRequestType::GetTransferCost( - HttpRequestMetadata::from_preamble(preamble), - )) - } - - /// Check whether the given option query string sets proof=0 (setting proof to false). - /// Defaults to true. - fn get_proof_query(query: Option<&str>) -> bool { - let no_proof = if let Some(query_string) = query { - form_urlencoded::parse(query_string.as_bytes()) - .find(|(key, _v)| key == "proof") - .map(|(_k, value)| value == "0") - .unwrap_or(false) - } else { - false - }; - - !no_proof - } - - /// get the chain tip optional query argument (`tip`) - /// Take the first value we can parse. - fn get_chain_tip_query(query: Option<&str>) -> TipRequest { - match query { - Some(query_string) => { - for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { - if key != "tip" { - continue; - } - - if value == "latest" { - return TipRequest::UseLatestUnconfirmedTip; - } - if let Ok(tip) = StacksBlockId::from_hex(&value) { - return TipRequest::SpecificTip(tip); - } - } - return TipRequest::UseLatestAnchoredTip; - } - None => { - return TipRequest::UseLatestAnchoredTip; - } - } - } - - /// get the mempool page ID optional query argument (`page_id`) - /// Take the first value we can parse. - fn get_mempool_page_id_query(query: Option<&str>) -> Option { - match query { - Some(query_string) => { - for (key, value) in form_urlencoded::parse(query_string.as_bytes()) { - if key != "page_id" { - continue; - } - if let Ok(page_id) = Txid::from_hex(&value) { - return Some(page_id); - } - } - return None; - } - None => { - return None; - } - } - } - - fn parse_get_account( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetAccount".to_string(), - )); - } - - let principal = PrincipalData::parse(&captures["principal"]).map_err(|_e| { - net_error::DeserializeError("Failed to parse account principal".into()) - })?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetAccount( - HttpRequestMetadata::from_preamble(preamble), - principal, - tip, - with_proof, - )) - } - - fn parse_get_data_var( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if content_len != 0 { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetDataVar ({})", - content_len - ))); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let var_name = ClarityName::try_from(captures["varname"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse data var name".into()))?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetDataVar( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - var_name, - tip, - with_proof, - )) - } - - fn parse_get_constant_val( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if content_len != 0 { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetConstantVal ({})", - content_len - ))); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let const_name = - ClarityName::try_from(captures["constname"].to_string()).map_err(|_e| { - net_error::DeserializeError("Failed to parse constant value name".into()) - })?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetConstantVal( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - const_name, - tip, - )) - } - - fn parse_get_map_entry( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < (BOUND_VALUE_SERIALIZATION_HEX)) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for GetMapEntry ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".into(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let map_name = ClarityName::try_from(captures["map"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse map name".into()))?; - - let value_hex: String = serde_json::from_reader(fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse JSON body".into()))?; - - let value = Value::try_deserialize_hex_untyped(&value_hex) - .map_err(|_e| net_error::DeserializeError("Failed to deserialize key value".into()))?; - - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetMapEntry( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - map_name, - value, - tip, - with_proof, - )) - } - - fn parse_call_read_only( - protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < protocol.maximum_call_argument_size) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for CallReadOnly ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let func_name = ClarityName::try_from(captures["function"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - let body: CallReadOnlyRequestBody = serde_json::from_reader(fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse JSON body".into()))?; - - let sender = PrincipalData::parse(&body.sender) - .map_err(|_e| net_error::DeserializeError("Failed to parse sender principal".into()))?; - - let sponsor = if let Some(sponsor) = body.sponsor { - Some(PrincipalData::parse(&sponsor).map_err(|_e| { - net_error::DeserializeError("Failed to parse sponsor principal".into()) - })?) - } else { - None - }; - - let arguments = body - .arguments - .into_iter() - .map(|hex| Value::try_deserialize_hex_untyped(&hex).ok()) - .collect::>>() - .ok_or_else(|| { - net_error::DeserializeError("Failed to deserialize argument value".into()) - })?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::CallReadOnlyFunction( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - sender, - sponsor, - func_name, - arguments, - tip, - )) - } - - fn parse_get_contract_arguments( - preamble: &HttpRequestPreamble, - captures: &Captures, - ) -> Result<(HttpRequestMetadata, StacksAddress, ContractName), net_error> { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - Ok(( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - )) - } - - fn parse_get_contract_abi( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let tip = HttpRequestType::get_chain_tip_query(query); - HttpRequestType::parse_get_contract_arguments(preamble, captures).map( - |(preamble, addr, name)| HttpRequestType::GetContractABI(preamble, addr, name, tip), - ) - } - - fn parse_get_contract_source( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let with_proof = HttpRequestType::get_proof_query(query); - let tip = HttpRequestType::get_chain_tip_query(query); - HttpRequestType::parse_get_contract_arguments(preamble, captures).map( - |(preamble, addr, name)| { - HttpRequestType::GetContractSrc(preamble, addr, name, tip, with_proof) - }, - ) - } - - fn parse_get_is_trait_implemented( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - let tip = HttpRequestType::get_chain_tip_query(query); - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let contract_addr = StacksAddress::from_string(&captures["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(captures["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - let trait_name = ClarityName::try_from(captures["traitName"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse trait name".into()))?; - let trait_contract_addr = StacksAddress::from_string(&captures["traitContractAddr"]) - .ok_or_else(|| net_error::DeserializeError("Failed to parse contract address".into()))? - .into(); - let trait_contract_name = ContractName::try_from(captures["traitContractName"].to_string()) - .map_err(|_e| { - net_error::DeserializeError("Failed to parse trait contract name".into()) - })?; - let trait_id = TraitIdentifier::new(trait_contract_addr, trait_contract_name, trait_name); - - Ok(HttpRequestType::GetIsTraitImplemented( - HttpRequestMetadata::from_preamble(preamble), - contract_addr, - contract_name, - trait_id, - tip, - )) - } - - fn parse_getheaders( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetBlock".to_string(), - )); - } - - let quantity_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to reward cycle group".to_string(), - ))? - .as_str(); - - let quantity: u64 = quantity_str - .parse() - .map_err(|_| net_error::DeserializeError("Failed to parse reward cycle".to_string()))?; - - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::GetHeaders( - HttpRequestMetadata::from_preamble(preamble), - quantity, - tip, - )) - } - - fn parse_getblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetBlock".to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to block hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str) - .map_err(|_e| net_error::DeserializeError("Failed to parse block hash".to_string()))?; - - Ok(HttpRequestType::GetBlock( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_indexed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicroblocksIndexed" - .to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksIndexed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_confirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocks".to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksConfirmed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - )) - } - - fn parse_getmicroblocks_unconfirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocksUnconfirmed" - .to_string(), - )); - } - - let block_hash_str = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock hash group".to_string(), - ))? - .as_str(); - - let min_seq_str = captures - .get(2) - .ok_or(net_error::DeserializeError( - "Failed to match path to microblock minimum sequence group".to_string(), - ))? - .as_str(); - - let block_hash = StacksBlockId::from_hex(block_hash_str).map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock hash".to_string()) - })?; - - let min_seq = min_seq_str.parse::().map_err(|_e| { - net_error::DeserializeError("Failed to parse microblock minimum sequence".to_string()) - })?; - - Ok(HttpRequestType::GetMicroblocksUnconfirmed( - HttpRequestMetadata::from_preamble(preamble), - block_hash, - min_seq, - )) - } - - fn parse_gettransaction_unconfirmed( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body for GetMicrolocksUnconfirmed" - .to_string(), - )); - } - - let txid_hex = regex - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to txid group".to_string(), - ))? - .as_str(); - - if txid_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid txid: expected 64 bytes".to_string(), - )); - } - - let txid = Txid::from_hex(&txid_hex) - .map_err(|_e| net_error::DeserializeError("Failed to decode txid hex".to_string()))?; - - Ok(HttpRequestType::GetTransactionUnconfirmed( - HttpRequestMetadata::from_preamble(preamble), - txid, - )) - } - - fn parse_post_fee_rate_estimate( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - let content_len = preamble.get_content_length(); - if !(content_len > 0 && content_len < MAX_PAYLOAD_LEN) { - return Err(net_error::DeserializeError(format!( - "Invalid Http request: invalid body length for FeeRateEstimate ({})", - content_len - ))); - } - - if preamble.content_type != Some(HttpContentType::JSON) { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let bound_fd = BoundReader::from_reader(fd, content_len as u64); - - let body: FeeRateEstimateRequestBody = serde_json::from_reader(bound_fd).map_err(|e| { - net_error::DeserializeError(format!("Failed to parse JSON body: {}", e)) - })?; - - let payload_hex = if body.transaction_payload.starts_with("0x") { - &body.transaction_payload[2..] - } else { - &body.transaction_payload - }; - - let payload_data = hex_bytes(payload_hex).map_err(|_e| { - net_error::DeserializeError("Bad hex string supplied for transaction payload".into()) - })?; - - let payload = TransactionPayload::consensus_deserialize(&mut payload_data.as_slice()) - .map_err(|e| { - net_error::DeserializeError(format!( - "Failed to deserialize transaction payload: {}", - e - )) - })?; - - let estimated_len = - std::cmp::max(body.estimated_len.unwrap_or(0), payload_data.len() as u64); - - Ok(HttpRequestType::FeeRateEstimate( - HttpRequestMetadata::from_preamble(preamble), - payload, - estimated_len, - )) - } - - fn parse_posttransaction( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostTransaction" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostTransaction body is too big".to_string(), - )); - } - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for transaction".to_string(), - )); - } - Some(HttpContentType::Bytes) => { - HttpRequestType::parse_posttransaction_octets(preamble, &mut bound_fd) - } - Some(HttpContentType::JSON) => { - HttpRequestType::parse_posttransaction_json(preamble, &mut bound_fd) - } - _ => { - return Err(net_error::DeserializeError( - "Wrong Content-Type for transaction; expected application/json".to_string(), - )); - } - } - } - - fn parse_posttransaction_octets( - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - let tx = StacksTransaction::consensus_deserialize(fd).map_err(|e| { - if let codec_error::DeserializeError(msg) = e { - net_error::ClientError(ClientError::Message(format!( - "Failed to deserialize posted transaction: {}", - msg - ))) - } else { - e.into() - } - })?; - Ok(HttpRequestType::PostTransaction( - HttpRequestMetadata::from_preamble(preamble), - tx, - None, - )) - } - - fn parse_posttransaction_json( - preamble: &HttpRequestPreamble, - fd: &mut R, - ) -> Result { - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let body: PostTransactionRequestBody = serde_json::from_reader(&mut bound_fd) - .map_err(|_e| net_error::DeserializeError("Failed to parse body".into()))?; - - let tx = { - let tx_bytes = hex_bytes(&body.tx) - .map_err(|_e| net_error::DeserializeError("Failed to parse tx".into()))?; - StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|e| { - if let codec_error::DeserializeError(msg) = e { - net_error::ClientError(ClientError::Message(format!( - "Failed to deserialize posted transaction: {}", - msg - ))) - } else { - e.into() - } - }) - }?; - - let attachment = match body.attachment { - None => None, - Some(attachment_content) => { - let content = hex_bytes(&attachment_content).map_err(|_e| { - net_error::DeserializeError("Failed to parse attachment".into()) - })?; - Some(Attachment::new(content)) - } - }; - - Ok(HttpRequestType::PostTransaction( - HttpRequestMetadata::from_preamble(preamble), - tx, - attachment, - )) - } - - fn parse_postblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostBlock body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for Stacks block".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for Stacks block; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let consensus_hash_str = regex - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match consensus hash in path group".to_string(), - ))? - .as_str(); - - let consensus_hash: ConsensusHash = - ConsensusHash::from_hex(consensus_hash_str).map_err(|_| { - net_error::DeserializeError("Failed to parse consensus hash".to_string()) - })?; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let stacks_block = StacksBlock::consensus_deserialize(&mut bound_fd)?; - - Ok(HttpRequestType::PostBlock( - HttpRequestMetadata::from_preamble(preamble), - consensus_hash, - stacks_block, - )) - } - - fn parse_postmicroblock( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostMicroblock" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostMicroblock body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for microblock".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for microblock; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - - let mb = StacksMicroblock::consensus_deserialize(&mut bound_fd)?; - let tip = HttpRequestType::get_chain_tip_query(query); - - Ok(HttpRequestType::PostMicroblock( - HttpRequestMetadata::from_preamble(preamble), - mb, - tip, - )) - } - - fn parse_get_attachment( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - captures: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - let hex_content_hash = captures - .get(1) - .ok_or(net_error::DeserializeError( - "Failed to match path to attachment hash group".to_string(), - ))? - .as_str(); - - let content_hash = Hash160::from_hex(&hex_content_hash).map_err(|_| { - net_error::DeserializeError("Failed to construct hash160 from inputs".to_string()) - })?; - - Ok(HttpRequestType::GetAttachment( - HttpRequestMetadata::from_preamble(preamble), - content_hash, - )) - } - - fn parse_get_attachments_inv( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _captures: &Captures, - query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let (index_block_hash, pages_indexes) = match query { - None => { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting index_block_hash and pages_indexes" - .to_string(), - )); - } - Some(query) => { - let mut index_block_hash = None; - let mut pages_indexes = HashSet::new(); - - for (key, value) in form_urlencoded::parse(query.as_bytes()) { - if key == "index_block_hash" { - index_block_hash = match StacksBlockId::from_hex(&value) { - Ok(index_block_hash) => Some(index_block_hash), - _ => None, - }; - } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(",") { - if let Ok(page_index) = entry.parse::() { - pages_indexes.insert(page_index); - } - } - } - } - } - - let index_block_hash = match index_block_hash { - None => { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting index_block_hash".to_string(), - )); - } - Some(index_block_hash) => index_block_hash, - }; - - if pages_indexes.is_empty() { - return Err(net_error::DeserializeError( - "Invalid Http request: expecting pages_indexes".to_string(), - )); - } - - (index_block_hash, pages_indexes) - } - }; - - Ok(HttpRequestType::GetAttachmentsInv( - HttpRequestMetadata::from_preamble(preamble), - index_block_hash, - pages_indexes, - )) - } - - fn parse_post_mempool_query( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-empty body".to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: MemPoolQuery body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/octet-stream - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for MemPoolQuery".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Wrong Content-Type for MemPoolQuery; expected application/octet-stream" - .to_string(), - )); - } - } - }; - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let mempool_query = MemPoolSyncData::consensus_deserialize(&mut bound_fd)?; - let page_id_opt = HttpRequestType::get_mempool_page_id_query(query); - - Ok(HttpRequestType::MemPoolQuery( - HttpRequestMetadata::from_preamble(preamble), - mempool_query, - page_id_opt, - )) - } - - fn parse_get_stackerdb_metadata( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBMetadata(preamble, contract_id) - }, - ) - } - - fn parse_get_stackerdb_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let slot_id: u32 = regex - .name("slot_id") - .ok_or(net_error::DeserializeError( - "Failed to match slot ID".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| net_error::DeserializeError("Failed to decode slot ID".to_string()))?; - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBChunk(preamble, contract_id, slot_id, None) - }, - ) - } - - fn parse_get_stackerdb_versioned_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let slot_id: u32 = regex - .name("slot_id") - .ok_or(net_error::DeserializeError( - "Failed to match slot ID".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| net_error::DeserializeError("Failed to decode slot ID".to_string()))?; - - let version: u32 = regex - .name("slot_version") - .ok_or(net_error::DeserializeError( - "Failed to match slot version".to_string(), - ))? - .as_str() - .parse() - .map_err(|_| { - net_error::DeserializeError("Failed to decode slot version".to_string()) - })?; - - HttpRequestType::parse_get_contract_arguments(preamble, regex).map( - |(preamble, addr, name)| { - let contract_id = QualifiedContractIdentifier::new(addr.into(), name); - HttpRequestType::GetStackerDBChunk(preamble, contract_id, slot_id, Some(version)) - }, - ) - } - - fn parse_post_stackerdb_chunk( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - regex: &Captures, - _query: Option<&str>, - fd: &mut R, - ) -> Result { - if preamble.get_content_length() == 0 { - return Err(net_error::DeserializeError( - "Invalid Http request: expected non-zero-length body for PostStackerDBChunk" - .to_string(), - )); - } - - if preamble.get_content_length() > MAX_PAYLOAD_LEN { - return Err(net_error::DeserializeError( - "Invalid Http request: PostStackerDBChunk body is too big".to_string(), - )); - } - - // content-type must be given, and must be application/json - match preamble.content_type { - None => { - return Err(net_error::DeserializeError( - "Missing Content-Type for stackerdb chunk".to_string(), - )); - } - Some(ref c) => { - if *c != HttpContentType::JSON { - return Err(net_error::DeserializeError( - "Wrong Content-Type for stackerdb; expected application/json".to_string(), - )); - } - } - }; - - let contract_addr = StacksAddress::from_string(®ex["address"]).ok_or_else(|| { - net_error::DeserializeError("Failed to parse contract address".into()) - })?; - let contract_name = ContractName::try_from(regex["contract"].to_string()) - .map_err(|_e| net_error::DeserializeError("Failed to parse contract name".into()))?; - - let contract_id = QualifiedContractIdentifier::new(contract_addr.into(), contract_name); - - let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); - let chunk_data: StackerDBChunkData = - serde_json::from_reader(&mut bound_fd).map_err(|_e| { - net_error::DeserializeError("Failed to parse StackerDB chunk body".into()) - })?; - - Ok(HttpRequestType::PostStackerDBChunk( - HttpRequestMetadata::from_preamble(preamble), - contract_id, - chunk_data, - )) - } - - fn parse_options_preflight( - _protocol: &mut StacksHttp, - preamble: &HttpRequestPreamble, - _regex: &Captures, - _query: Option<&str>, - _fd: &mut R, - ) -> Result { - Ok(HttpRequestType::OptionsPreflight( - HttpRequestMetadata::from_preamble(preamble), - preamble.path.to_string(), - )) - } - - pub fn metadata(&self) -> &HttpRequestMetadata { - match *self { - HttpRequestType::GetInfo(ref md) => md, - HttpRequestType::GetPoxInfo(ref md, ..) => md, - HttpRequestType::GetNeighbors(ref md) => md, - HttpRequestType::GetHeaders(ref md, ..) => md, - HttpRequestType::GetBlock(ref md, _) => md, - HttpRequestType::GetMicroblocksIndexed(ref md, _) => md, - HttpRequestType::GetMicroblocksConfirmed(ref md, _) => md, - HttpRequestType::GetMicroblocksUnconfirmed(ref md, _, _) => md, - HttpRequestType::GetTransactionUnconfirmed(ref md, _) => md, - HttpRequestType::PostTransaction(ref md, _, _) => md, - HttpRequestType::PostBlock(ref md, ..) => md, - HttpRequestType::PostMicroblock(ref md, ..) => md, - HttpRequestType::GetAccount(ref md, ..) => md, - HttpRequestType::GetDataVar(ref md, ..) => md, - HttpRequestType::GetConstantVal(ref md, ..) => md, - HttpRequestType::GetMapEntry(ref md, ..) => md, - HttpRequestType::GetTransferCost(ref md) => md, - HttpRequestType::GetContractABI(ref md, ..) => md, - HttpRequestType::GetContractSrc(ref md, ..) => md, - HttpRequestType::GetIsTraitImplemented(ref md, ..) => md, - HttpRequestType::CallReadOnlyFunction(ref md, ..) => md, - HttpRequestType::OptionsPreflight(ref md, ..) => md, - HttpRequestType::GetAttachmentsInv(ref md, ..) => md, - HttpRequestType::GetAttachment(ref md, ..) => md, - HttpRequestType::MemPoolQuery(ref md, ..) => md, - HttpRequestType::FeeRateEstimate(ref md, _, _) => md, - HttpRequestType::GetStackerDBMetadata(ref md, ..) => md, - HttpRequestType::GetStackerDBChunk(ref md, ..) => md, - HttpRequestType::PostStackerDBChunk(ref md, ..) => md, - HttpRequestType::ClientError(ref md, ..) => md, - } - } - - pub fn metadata_mut(&mut self) -> &mut HttpRequestMetadata { - match *self { - HttpRequestType::GetInfo(ref mut md) => md, - HttpRequestType::GetPoxInfo(ref mut md, ..) => md, - HttpRequestType::GetNeighbors(ref mut md) => md, - HttpRequestType::GetHeaders(ref mut md, ..) => md, - HttpRequestType::GetBlock(ref mut md, _) => md, - HttpRequestType::GetMicroblocksIndexed(ref mut md, _) => md, - HttpRequestType::GetMicroblocksConfirmed(ref mut md, _) => md, - HttpRequestType::GetMicroblocksUnconfirmed(ref mut md, _, _) => md, - HttpRequestType::GetTransactionUnconfirmed(ref mut md, _) => md, - HttpRequestType::PostTransaction(ref mut md, _, _) => md, - HttpRequestType::PostBlock(ref mut md, ..) => md, - HttpRequestType::PostMicroblock(ref mut md, ..) => md, - HttpRequestType::GetAccount(ref mut md, ..) => md, - HttpRequestType::GetDataVar(ref mut md, ..) => md, - HttpRequestType::GetConstantVal(ref mut md, ..) => md, - HttpRequestType::GetMapEntry(ref mut md, ..) => md, - HttpRequestType::GetTransferCost(ref mut md) => md, - HttpRequestType::GetContractABI(ref mut md, ..) => md, - HttpRequestType::GetContractSrc(ref mut md, ..) => md, - HttpRequestType::GetIsTraitImplemented(ref mut md, ..) => md, - HttpRequestType::CallReadOnlyFunction(ref mut md, ..) => md, - HttpRequestType::OptionsPreflight(ref mut md, ..) => md, - HttpRequestType::GetAttachmentsInv(ref mut md, ..) => md, - HttpRequestType::GetAttachment(ref mut md, ..) => md, - HttpRequestType::MemPoolQuery(ref mut md, ..) => md, - HttpRequestType::FeeRateEstimate(ref mut md, _, _) => md, - HttpRequestType::GetStackerDBMetadata(ref mut md, ..) => md, - HttpRequestType::GetStackerDBChunk(ref mut md, ..) => md, - HttpRequestType::PostStackerDBChunk(ref mut md, ..) => md, - HttpRequestType::ClientError(ref mut md, ..) => md, - } - } - - fn make_tip_query_string(tip_req: &TipRequest, with_proof: bool) -> String { - match tip_req { - TipRequest::UseLatestUnconfirmedTip => { - format!("?tip=latest{}", if with_proof { "" } else { "&proof=0" }) - } - TipRequest::SpecificTip(tip) => { - format!("?tip={}{}", tip, if with_proof { "" } else { "&proof=0" }) - } - TipRequest::UseLatestAnchoredTip => { - if !with_proof { - format!("?proof=0") - } else { - "".to_string() - } - } - } - } - - pub fn request_path(&self) -> String { - match self { - HttpRequestType::GetInfo(_md) => "/v2/info".to_string(), - HttpRequestType::GetPoxInfo(_md, tip_req) => format!( - "/v2/pox{}", - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetNeighbors(_md) => "/v2/neighbors".to_string(), - HttpRequestType::GetHeaders(_md, quantity, tip_req) => format!( - "/v2/headers/{}{}", - quantity, - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetBlock(_md, block_hash) => { - format!("/v2/blocks/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksIndexed(_md, block_hash) => { - format!("/v2/microblocks/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksConfirmed(_md, block_hash) => { - format!("/v2/microblocks/confirmed/{}", block_hash.to_hex()) - } - HttpRequestType::GetMicroblocksUnconfirmed(_md, block_hash, min_seq) => format!( - "/v2/microblocks/unconfirmed/{}/{}", - block_hash.to_hex(), - min_seq - ), - HttpRequestType::GetTransactionUnconfirmed(_md, txid) => { - format!("/v2/transactions/unconfirmed/{}", txid) - } - HttpRequestType::PostTransaction(_md, ..) => "/v2/transactions".to_string(), - HttpRequestType::PostBlock(_md, ch, ..) => format!("/v2/blocks/upload/{}", &ch), - HttpRequestType::PostMicroblock(_md, _, tip_req) => format!( - "/v2/microblocks{}", - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetAccount(_md, principal, tip_req, with_proof) => { - format!( - "/v2/accounts/{}{}", - &principal.to_string(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof,) - ) - } - HttpRequestType::GetDataVar( - _md, - contract_addr, - contract_name, - var_name, - tip_req, - with_proof, - ) => format!( - "/v2/data_var/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - var_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetConstantVal( - _md, - contract_addr, - contract_name, - const_name, - tip_req, - ) => format!( - "/v2/constant_val/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - const_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::GetMapEntry( - _md, - contract_addr, - contract_name, - map_name, - _key, - tip_req, - with_proof, - ) => format!( - "/v2/map_entry/{}/{}/{}{}", - &contract_addr.to_string(), - contract_name.as_str(), - map_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetTransferCost(_md) => "/v2/fees/transfer".into(), - HttpRequestType::GetContractABI(_, contract_addr, contract_name, tip_req) => format!( - "/v2/contracts/interface/{}/{}{}", - contract_addr, - contract_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true,) - ), - HttpRequestType::GetContractSrc( - _, - contract_addr, - contract_name, - tip_req, - with_proof, - ) => format!( - "/v2/contracts/source/{}/{}{}", - contract_addr, - contract_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, *with_proof) - ), - HttpRequestType::GetIsTraitImplemented( - _, - contract_addr, - contract_name, - trait_id, - tip_req, - ) => format!( - "/v2/traits/{}/{}/{}/{}/{}{}", - contract_addr, - contract_name.as_str(), - trait_id.name.to_string(), - StacksAddress::from(trait_id.clone().contract_identifier.issuer), - trait_id.contract_identifier.name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::CallReadOnlyFunction( - _, - contract_addr, - contract_name, - _, - _, - func_name, - _, - tip_req, - ) => format!( - "/v2/contracts/call-read/{}/{}/{}{}", - contract_addr, - contract_name.as_str(), - func_name.as_str(), - HttpRequestType::make_tip_query_string(tip_req, true) - ), - HttpRequestType::OptionsPreflight(_md, path) => path.to_string(), - HttpRequestType::GetAttachmentsInv(_md, index_block_hash, pages_indexes) => { - let pages_query = match pages_indexes.len() { - 0 => format!(""), - _n => { - let mut indexes = pages_indexes - .iter() - .map(|i| format!("{}", i)) - .collect::>(); - indexes.sort(); - format!("&pages_indexes={}", indexes.join(",")) - } - }; - let index_block_hash = format!("index_block_hash={}", index_block_hash); - format!("/v2/attachments/inv?{}{}", index_block_hash, pages_query,) - } - HttpRequestType::GetAttachment(_, content_hash) => { - format!("/v2/attachments/{}", to_hex(&content_hash.0[..])) - } - HttpRequestType::MemPoolQuery(_, _, page_id_opt) => match page_id_opt { - Some(page_id) => { - format!("/v2/mempool/query?page_id={}", page_id) - } - None => "/v2/mempool/query".to_string(), - }, - HttpRequestType::GetStackerDBMetadata(_, contract_id) => format!( - "/v2/stackerdb/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name - ), - HttpRequestType::GetStackerDBChunk(_, contract_id, slot_id, slot_version_opt) => { - if let Some(version) = slot_version_opt { - format!( - "/v2/stackerdb/{}/{}/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name, - slot_id, - version - ) - } else { - format!( - "/v2/stackerdb/{}/{}/{}", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name, - slot_id - ) - } - } - HttpRequestType::PostStackerDBChunk(_, contract_id, ..) => { - format!( - "/v2/stackerdb/{}/{}/chunks", - StacksAddress::from(contract_id.issuer.clone()), - &contract_id.name - ) - } - HttpRequestType::FeeRateEstimate(_, _, _) => self.get_path().to_string(), - HttpRequestType::ClientError(_md, e) => match e { - ClientError::NotFound(path) => path.to_string(), - _ => "error path unknown".into(), - }, - } - } - - pub fn get_path(&self) -> &'static str { - match self { - HttpRequestType::GetInfo(..) => "/v2/info", - HttpRequestType::GetPoxInfo(..) => "/v2/pox", - HttpRequestType::GetNeighbors(..) => "/v2/neighbors", - HttpRequestType::GetHeaders(..) => "/v2/headers/:height", - HttpRequestType::GetBlock(..) => "/v2/blocks/:hash", - HttpRequestType::GetMicroblocksIndexed(..) => "/v2/microblocks/:hash", - HttpRequestType::GetMicroblocksConfirmed(..) => "/v2/microblocks/confirmed/:hash", - HttpRequestType::GetMicroblocksUnconfirmed(..) => { - "/v2/microblocks/unconfirmed/:hash/:seq" - } - HttpRequestType::GetTransactionUnconfirmed(..) => "/v2/transactions/unconfirmed/:txid", - HttpRequestType::PostTransaction(..) => "/v2/transactions", - HttpRequestType::PostBlock(..) => "/v2/blocks/upload/:block", - HttpRequestType::PostMicroblock(..) => "/v2/microblocks", - HttpRequestType::GetAccount(..) => "/v2/accounts/:principal", - HttpRequestType::GetDataVar(..) => "/v2/data_var/:principal/:contract_name/:var_name", - HttpRequestType::GetConstantVal(..) => { - "/v2/constant_val/:principal/:contract_name/:const_name" - } - HttpRequestType::GetMapEntry(..) => "/v2/map_entry/:principal/:contract_name/:map_name", - HttpRequestType::GetTransferCost(..) => "/v2/fees/transfer", - HttpRequestType::GetContractABI(..) => { - "/v2/contracts/interface/:principal/:contract_name" - } - HttpRequestType::GetContractSrc(..) => "/v2/contracts/source/:principal/:contract_name", - HttpRequestType::CallReadOnlyFunction(..) => { - "/v2/contracts/call-read/:principal/:contract_name/:func_name" - } - HttpRequestType::GetAttachmentsInv(..) => "/v2/attachments/inv", - HttpRequestType::GetAttachment(..) => "/v2/attachments/:hash", - HttpRequestType::GetIsTraitImplemented(..) => "/v2/traits/:principal/:contract_name", - HttpRequestType::MemPoolQuery(..) => "/v2/mempool/query", - HttpRequestType::FeeRateEstimate(_, _, _) => "/v2/fees/transaction", - HttpRequestType::GetStackerDBMetadata(..) => "/v2/stackerdb/:principal/:contract_name", - HttpRequestType::GetStackerDBChunk(..) => { - "/v2/stackerdb/:principal/:contract_name/:slot_id(/:slot_version)?" - } - HttpRequestType::PostStackerDBChunk(..) => { - "/v2/stackerdb/:principal/:contract_name/chunks" - } - HttpRequestType::OptionsPreflight(..) | HttpRequestType::ClientError(..) => "/", - } - } - - pub fn send(&self, _protocol: &mut StacksHttp, fd: &mut W) -> Result<(), net_error> { - match self { - HttpRequestType::PostTransaction(md, tx, attachment) => { - let mut tx_bytes = vec![]; - write_next(&mut tx_bytes, tx)?; - let tx_hex = to_hex(&tx_bytes[..]); - - let (content_type, request_body_bytes) = match attachment { - None => { - // Transaction does not include an attachment: HttpContentType::Bytes (more compressed) - (Some(&HttpContentType::Bytes), tx_bytes) - } - Some(attachment) => { - // Transaction is including an attachment: HttpContentType::JSON - let request_body = PostTransactionRequestBody { - tx: tx_hex, - attachment: Some(to_hex(&attachment.content[..])), - }; - - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, &request_body).map_err( - |e| { - net_error::SerializeError(format!( - "Failed to serialize read-only call to JSON: {:?}", - &e - )) - }, - )?; - (Some(&HttpContentType::JSON), request_body_bytes) - } - }; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - content_type, - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::PostBlock(md, _ch, block) => { - let mut block_bytes = vec![]; - write_next(&mut block_bytes, block)?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(block_bytes.len() as u32), - Some(&HttpContentType::Bytes), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&block_bytes).map_err(net_error::WriteError)?; - } - HttpRequestType::PostMicroblock(md, mb, ..) => { - let mut mb_bytes = vec![]; - write_next(&mut mb_bytes, mb)?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(mb_bytes.len() as u32), - Some(&HttpContentType::Bytes), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&mb_bytes).map_err(net_error::WriteError)?; - } - HttpRequestType::GetMapEntry( - md, - _contract_addr, - _contract_name, - _map_name, - key, - .., - ) => { - let mut request_bytes = vec![]; - key.serialize_write(&mut request_bytes) - .map_err(net_error::WriteError)?; - let request_json = format!("\"{}\"", to_hex(&request_bytes)); - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_json.as_bytes().len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_json.as_bytes()) - .map_err(net_error::WriteError)?; - } - HttpRequestType::CallReadOnlyFunction( - md, - _contract_addr, - _contract_name, - sender, - sponsor, - _func_name, - func_args, - .., - ) => { - let mut args = vec![]; - for arg in func_args.iter() { - let mut arg_bytes = vec![]; - arg.serialize_write(&mut arg_bytes) - .map_err(net_error::WriteError)?; - args.push(to_hex(&arg_bytes)); - } - - let request_body = CallReadOnlyRequestBody { - sender: sender.to_string(), - sponsor: sponsor.as_ref().map(|sp| sp.to_string()), - arguments: args, - }; - - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, &request_body).map_err(|e| { - net_error::SerializeError(format!( - "Failed to serialize read-only call to JSON: {:?}", - &e - )) - })?; - - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::MemPoolQuery(md, query, ..) => { - let request_body_bytes = query.serialize_to_vec(); - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::Bytes), - empty_headers, - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - HttpRequestType::PostStackerDBChunk(md, _, request) => { - let mut request_body_bytes = vec![]; - serde_json::to_writer(&mut request_body_bytes, request).map_err(|e| { - net_error::SerializeError(format!( - "Failed to serialize StackerDB POST chunk to JSON: {:?}", - &e - )) - })?; - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "POST", - &self.request_path(), - &md.peer, - md.keep_alive, - Some(request_body_bytes.len() as u32), - Some(&HttpContentType::JSON), - |fd| stacks_height_headers(fd, md), - )?; - fd.write_all(&request_body_bytes) - .map_err(net_error::WriteError)?; - } - other_type => { - let md = other_type.metadata(); - let request_path = other_type.request_path(); - HttpRequestPreamble::new_serialized( - fd, - &md.version, - "GET", - &request_path, - &md.peer, - md.keep_alive, - None, - None, - |fd| stacks_height_headers(fd, md), - )?; - } - } - Ok(()) - } -} - -impl HttpResponseType { - fn try_parse( - protocol: &mut StacksHttp, - regex: &Regex, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - request_path: &str, - fd: &mut R, - len_hint: Option, - parser: F, - ) -> Result, net_error> - where - F: Fn( - &mut StacksHttp, - HttpVersion, - &HttpResponsePreamble, - &mut R, - Option, - ) -> Result, - { - if regex.is_match(request_path) { - let payload = parser(protocol, request_version, preamble, fd, len_hint)?; - Ok(Some(payload)) - } else { - Ok(None) - } - } - - fn parse_error( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - ) -> Result { - if preamble.status_code < 400 || preamble.status_code > 599 { - return Err(net_error::DeserializeError( - "Inavlid response: not an error".to_string(), - )); - } - - if preamble.content_type != HttpContentType::Text - && preamble.content_type != HttpContentType::JSON - { - return Err(net_error::DeserializeError(format!( - "Invalid error response: expected text/plain or application/json, got {:?}", - &preamble.content_type - ))); - } - - let mut error_text = String::new(); - fd.read_to_string(&mut error_text) - .map_err(net_error::ReadError)?; - - let md = HttpResponseMetadata::from_preamble(request_version, preamble); - let resp = match preamble.status_code { - 400 => HttpResponseType::BadRequest(md, error_text), - 401 => HttpResponseType::Unauthorized(md, error_text), - 402 => HttpResponseType::PaymentRequired(md, error_text), - 403 => HttpResponseType::Forbidden(md, error_text), - 404 => HttpResponseType::NotFound(md, error_text), - 500 => HttpResponseType::ServerError(md, error_text), - 503 => HttpResponseType::ServiceUnavailable(md, error_text), - _ => HttpResponseType::Error(md, preamble.status_code, error_text), - }; - Ok(resp) - } - - /// Parse a SIP-003 bytestream. The first 4 bytes are a big-endian length prefix - fn parse_bytestream( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result { - // content-type has to be Bytes - if preamble.content_type != HttpContentType::Bytes { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/octet-stream".to_string(), - )); - } - - let item: T = if preamble.is_chunked() && len_hint.is_none() { - let mut chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - read_next(&mut chunked_fd)? - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(content_length) = content_length_opt { - if (content_length as u64) > max_len { - return Err(net_error::DeserializeError( - "Invalid Content-Length header: too long".to_string(), - )); - } - - let mut bound_fd = BoundReader::from_reader(fd, content_length as u64); - read_next(&mut bound_fd)? - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - Ok(item) - } - - fn parse_json( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result { - // content-type has to be JSON - if preamble.content_type != HttpContentType::JSON { - return Err(net_error::DeserializeError( - "Invalid content-type: expected application/json".to_string(), - )); - } - - let item_result: Result = if preamble.is_chunked() - && len_hint.is_none() - { - let chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - serde_json::from_reader(chunked_fd) - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(content_length) = content_length_opt { - if (content_length as u64) > max_len { - return Err(net_error::DeserializeError( - "Invalid Content-Length header: too long".to_string(), - )); - } - let bound_fd = BoundReader::from_reader(fd, content_length as u64); - serde_json::from_reader(bound_fd) - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - item_result.map_err(|e| { - if e.is_eof() { - net_error::UnderflowError(format!("Not enough bytes to parse JSON")) - } else { - net_error::DeserializeError(format!("Failed to parse JSON: {:?}", &e)) - } - }) - } - - fn parse_raw_bytes( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - expected_content_type: HttpContentType, - ) -> Result, net_error> { - if preamble.content_type != expected_content_type { - return Err(net_error::DeserializeError(format!( - "Invalid content-type: expected {}", - expected_content_type - ))); - } - let buf = if preamble.is_chunked() && len_hint.is_none() { - let mut chunked_fd = HttpChunkedTransferReader::from_reader(fd, max_len); - let mut buf = vec![]; - chunked_fd - .read_to_end(&mut buf) - .map_err(net_error::ReadError)?; - buf - } else { - let content_length_opt = match (preamble.content_length, len_hint) { - (Some(l), _) => Some(l as u32), - (None, Some(l)) => Some(l as u32), - (None, None) => None, - }; - if let Some(len) = content_length_opt { - let mut buf = vec![0u8; len as usize]; - fd.read_exact(&mut buf).map_err(net_error::ReadError)?; - buf - } else { - // unsupported headers - trace!("preamble: {:?}", preamble); - return Err(net_error::DeserializeError( - "Invalid headers: need either Transfer-Encoding or Content-Length".to_string(), - )); - } - }; - - Ok(buf) - } - - fn parse_text( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result, net_error> { - Self::parse_raw_bytes(preamble, fd, len_hint, max_len, HttpContentType::Text) - } - - fn parse_bytes( - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - max_len: u64, - ) -> Result, net_error> { - Self::parse_raw_bytes(preamble, fd, len_hint, max_len, HttpContentType::Bytes) - } - - // len_hint is given by the StacksHttp protocol implementation - pub fn parse( - protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - request_path: String, - fd: &mut R, - len_hint: Option, - ) -> Result { - if preamble.status_code >= 400 { - return HttpResponseType::parse_error(protocol, request_version, preamble, fd); - } - - // TODO: make this static somehow - let RESPONSE_METHODS: &[( - &Regex, - &dyn Fn( - &mut StacksHttp, - HttpVersion, - &HttpResponsePreamble, - &mut R, - Option, - ) -> Result, - )] = &[ - (&PATH_GETINFO, &HttpResponseType::parse_peerinfo), - (&PATH_GETPOXINFO, &HttpResponseType::parse_poxinfo), - (&PATH_GETNEIGHBORS, &HttpResponseType::parse_neighbors), - (&PATH_GETHEADERS, &HttpResponseType::parse_headers), - (&PATH_GETBLOCK, &HttpResponseType::parse_block), - (&PATH_GET_DATA_VAR, &HttpResponseType::parse_get_data_var), - ( - &PATH_GET_CONSTANT_VAL, - &HttpResponseType::parse_get_constant_val, - ), - (&PATH_GET_MAP_ENTRY, &HttpResponseType::parse_get_map_entry), - ( - &PATH_GETMICROBLOCKS_INDEXED, - &HttpResponseType::parse_microblocks, - ), - ( - &PATH_GETMICROBLOCKS_CONFIRMED, - &HttpResponseType::parse_microblocks, - ), - ( - &PATH_GETMICROBLOCKS_UNCONFIRMED, - &HttpResponseType::parse_microblocks_unconfirmed, - ), - ( - &PATH_GETTRANSACTION_UNCONFIRMED, - &HttpResponseType::parse_transaction_unconfirmed, - ), - (&PATH_POSTTRANSACTION, &HttpResponseType::parse_txid), - ( - &PATH_POSTBLOCK, - &HttpResponseType::parse_stacks_block_accepted, - ), - ( - &PATH_POSTMICROBLOCK, - &HttpResponseType::parse_microblock_hash, - ), - (&PATH_GET_ACCOUNT, &HttpResponseType::parse_get_account), - ( - &PATH_GET_CONTRACT_SRC, - &HttpResponseType::parse_get_contract_src, - ), - ( - &PATH_GET_IS_TRAIT_IMPLEMENTED, - &HttpResponseType::parse_get_is_trait_implemented, - ), - ( - &PATH_GET_CONTRACT_ABI, - &HttpResponseType::parse_get_contract_abi, - ), - ( - &PATH_POST_CALL_READ_ONLY, - &HttpResponseType::parse_call_read_only, - ), - ( - &PATH_GET_ATTACHMENT, - &HttpResponseType::parse_get_attachment, - ), - ( - &PATH_GET_ATTACHMENTS_INV, - &HttpResponseType::parse_get_attachments_inv, - ), - ( - &PATH_POST_MEMPOOL_QUERY, - &HttpResponseType::parse_post_mempool_query, - ), - ( - &PATH_GET_STACKERDB_METADATA, - &HttpResponseType::parse_get_stackerdb_metadata, - ), - ( - &PATH_GET_STACKERDB_CHUNK, - &HttpResponseType::parse_get_stackerdb_chunk, - ), - ( - &PATH_GET_STACKERDB_VERSIONED_CHUNK, - &HttpResponseType::parse_get_stackerdb_chunk, - ), - ( - &PATH_POST_STACKERDB_CHUNK, - &HttpResponseType::parse_stackerdb_chunk_response, - ), - ]; - - // use url::Url to parse path and query string - // Url will refuse to parse just a path, so create a dummy URL - let local_url = format!("http://local{}", &request_path); - let url = Url::parse(&local_url).map_err(|_e| { - net_error::DeserializeError("Http request path could not be parsed".to_string()) - })?; - - let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { - net_error::DeserializeError( - "Http response path could not be parsed as UTF-8".to_string(), - ) - })?; - - for (regex, parser) in RESPONSE_METHODS.iter() { - match HttpResponseType::try_parse( - protocol, - regex, - request_version, - preamble, - &decoded_path.to_string(), - fd, - len_hint, - parser, - ) { - Ok(Some(request)) => { - return Ok(request); - } - Ok(None) => { - continue; - } - Err(e) => { - test_debug!("Failed to parse {}: {:?}", &request_path, &e); - return Err(e); - } - } - } - - test_debug!( - "Failed to match request path '{}' to a handler", - &request_path - ); - return Err(net_error::DeserializeError( - "Http response could not be parsed".to_string(), - )); - } - - fn parse_peerinfo( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let peer_info = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::PeerInfo( - HttpResponseMetadata::from_preamble(request_version, preamble), - peer_info, - )) - } - - fn parse_poxinfo( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let pox_info = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::PoxInfo( - HttpResponseMetadata::from_preamble(request_version, preamble), - pox_info, - )) - } - - fn parse_neighbors( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let neighbors_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Neighbors( - HttpResponseMetadata::from_preamble(request_version, preamble), - neighbors_data, - )) - } - - fn parse_headers( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let headers: Vec = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Headers( - HttpResponseMetadata::from_preamble(request_version, preamble), - headers, - )) - } - - fn parse_block( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let block: StacksBlock = - HttpResponseType::parse_bytestream(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Block( - HttpResponseMetadata::from_preamble(request_version, preamble), - block, - )) - } - - fn parse_microblocks( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let microblocks: Vec = - HttpResponseType::parse_bytestream(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::Microblocks( - HttpResponseMetadata::from_preamble(request_version, preamble), - microblocks, - )) - } - - fn parse_get_account( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let account_entry = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetAccount( - HttpResponseMetadata::from_preamble(request_version, preamble), - account_entry, - )) - } - - fn parse_get_data_var( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let data_var = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetDataVar( - HttpResponseMetadata::from_preamble(request_version, preamble), - data_var, - )) - } - - fn parse_get_constant_val( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let constant_val = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetConstantVal( - HttpResponseMetadata::from_preamble(request_version, preamble), - constant_val, - )) - } - - fn parse_get_map_entry( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let map_entry = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetMapEntry( - HttpResponseMetadata::from_preamble(request_version, preamble), - map_entry, - )) - } - - fn parse_get_contract_src( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let src_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetContractSrc( - HttpResponseMetadata::from_preamble(request_version, preamble), - src_data, - )) - } - - fn parse_get_is_trait_implemented( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let src_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetIsTraitImplemented( - HttpResponseMetadata::from_preamble(request_version, preamble), - src_data, - )) - } - - fn parse_get_contract_abi( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let abi = HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::GetContractABI( - HttpResponseMetadata::from_preamble(request_version, preamble), - abi, - )) - } - - fn parse_call_read_only( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let call_data = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::CallReadOnlyFunction( - HttpResponseMetadata::from_preamble(request_version, preamble), - call_data, - )) - } - - fn parse_microblocks_unconfirmed( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - // NOTE: there will be no length prefix on this, but we won't ever get more than - // MAX_MICROBLOCKS_UNCONFIRMED microblocks - let mut microblocks = vec![]; - let max_len = len_hint.unwrap_or(MAX_MESSAGE_LEN as usize) as u64; - let mut bound_reader = BoundReader::from_reader(fd, max_len); - loop { - let mblock: StacksMicroblock = match read_next(&mut bound_reader) { - Ok(mblock) => Ok(mblock), - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - // end of stream -- this is fine - break; - } - _ => Err(e), - }, - _ => Err(e), - }, - }?; - - microblocks.push(mblock); - if microblocks.len() == MAX_MICROBLOCKS_UNCONFIRMED { - break; - } - } - Ok(HttpResponseType::Microblocks( - HttpResponseMetadata::from_preamble(request_version, preamble), - microblocks, - )) - } - - fn parse_transaction_unconfirmed( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let unconfirmed_status: UnconfirmedTransactionResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - // tx payload must decode to a transaction - let tx_bytes = hex_bytes(&unconfirmed_status.tx).map_err(|_| { - net_error::DeserializeError("Unconfirmed transaction is not hex-encoded".to_string()) - })?; - let _ = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).map_err(|_| { - net_error::DeserializeError( - "Unconfirmed transaction is not a well-formed Stacks transaction".to_string(), - ) - })?; - - Ok(HttpResponseType::UnconfirmedTransaction( - HttpResponseMetadata::from_preamble(request_version, preamble), - unconfirmed_status, - )) - } - - fn parse_txid( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let txid_hex: String = HttpResponseType::parse_json(preamble, fd, len_hint, 66)?; - if txid_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid txid: expected 64 bytes".to_string(), - )); - } - - let txid = Txid::from_hex(&txid_hex) - .map_err(|_e| net_error::DeserializeError("Failed to decode txid hex".to_string()))?; - Ok(HttpResponseType::TransactionID( - HttpResponseMetadata::from_preamble(request_version, preamble), - txid, - )) - } - - fn parse_get_attachment( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let res: GetAttachmentResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - Ok(HttpResponseType::GetAttachment( - HttpResponseMetadata::from_preamble(request_version, preamble), - res, - )) - } - - fn parse_get_attachments_inv( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let res: GetAttachmentsInvResponse = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - - Ok(HttpResponseType::GetAttachmentsInv( - HttpResponseMetadata::from_preamble(request_version, preamble), - res, - )) - } - - fn parse_stacks_block_accepted( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let stacks_block_accepted: StacksBlockAcceptedData = - HttpResponseType::parse_json(preamble, fd, len_hint, 128)?; - Ok(HttpResponseType::StacksBlockAccepted( - HttpResponseMetadata::from_preamble(request_version, preamble), - stacks_block_accepted.stacks_block_id, - stacks_block_accepted.accepted, - )) - } - - fn parse_microblock_hash( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let mblock_hex: String = HttpResponseType::parse_json(preamble, fd, len_hint, 66)?; - if mblock_hex.len() != 64 { - return Err(net_error::DeserializeError( - "Invalid microblock hash: expected 64 bytes".to_string(), - )); - } - - let mblock_hash = BlockHeaderHash::from_hex(&mblock_hex).map_err(|_e| { - net_error::DeserializeError("Failed to decode microblock hash hex".to_string()) - })?; - Ok(HttpResponseType::MicroblockHash( - HttpResponseMetadata::from_preamble(request_version, preamble), - mblock_hash, - )) - } - - /// Read the trailing page ID from a transaction stream - fn parse_mempool_query_page_id( - pos: usize, - retry_reader: &mut RetryReader<'_, R>, - ) -> Result, net_error> { - // possibly end-of-transactions, in which case, the last 32 bytes should be - // a page ID. Expect end-of-stream after this. - retry_reader.set_position(pos); - let next_page: Txid = match read_next(retry_reader) { - Ok(txid) => txid, - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - if pos == retry_reader.position() { - // this is fine -- the node didn't get another page - return Ok(None); - } else { - // partial data -- corrupt stream - test_debug!("Unexpected EOF: {} != {}", pos, retry_reader.position()); - return Err(e.into()); - } - } - _ => { - return Err(e.into()); - } - }, - e => { - return Err(e.into()); - } - }, - }; - - test_debug!("Read page_id {:?}", &next_page); - Ok(Some(next_page)) - } - - /// Decode a transaction stream, returned from /v2/mempool/query. - /// The wire format is a list of transactions (no SIP-003 length prefix), followed by an - /// optional 32-byte page ID. Obtain both the transactions and page ID, if it exists. - pub fn decode_tx_stream( - fd: &mut R, - len_hint: Option, - ) -> Result<(Vec, Option), net_error> { - // The wire format is `tx, tx, tx, tx, .., tx, txid`. - // The last 32 bytes are the page ID for the next mempool query. - // NOTE: there will be no length prefix on this. - let mut txs: Vec = vec![]; - let max_len = len_hint.unwrap_or(MAX_MESSAGE_LEN as usize) as u64; - let mut bound_reader = BoundReader::from_reader(fd, max_len); - let mut retry_reader = RetryReader::new(&mut bound_reader); - let mut page_id = None; - let mut expect_eof = false; - - loop { - let pos = retry_reader.position(); - let next_msg: Result = read_next(&mut retry_reader); - match next_msg { - Ok(tx) => { - if expect_eof { - // this should have failed - test_debug!("Expected EOF; got transaction {}", tx.txid()); - return Err(net_error::ExpectedEndOfStream); - } - - test_debug!("Read transaction {}", tx.txid()); - txs.push(tx); - Ok(()) - } - Err(e) => match e { - codec_error::ReadError(ref ioe) => match ioe.kind() { - io::ErrorKind::UnexpectedEof => { - if expect_eof { - if pos != retry_reader.position() { - // read partial data. The stream is corrupt. - test_debug!( - "Expected EOF; stream advanced from {} to {}", - pos, - retry_reader.position() - ); - return Err(net_error::ExpectedEndOfStream); - } - } else { - // couldn't read a full transaction. This is possibly a page ID, whose - // 32 bytes decode to the prefix of a well-formed transaction. - test_debug!("Try to read page ID trailer after ReadError"); - page_id = HttpResponseType::parse_mempool_query_page_id( - pos, - &mut retry_reader, - )?; - } - break; - } - _ => Err(e), - }, - codec_error::DeserializeError(_msg) => { - if expect_eof { - // this should have failed due to EOF - test_debug!("Expected EOF; got DeserializeError '{}'", &_msg); - return Err(net_error::ExpectedEndOfStream); - } - - // failed to parse a transaction. This is possibly a page ID. - test_debug!("Try to read page ID trailer after ReadError"); - page_id = - HttpResponseType::parse_mempool_query_page_id(pos, &mut retry_reader)?; - - // do one more pass to make sure we're actually end-of-stream. - // otherwise, the stream itself was corrupt, since any 32 bytes is a valid - // txid and the presence of more bytes means that we simply got a bad tx - // that we couldn't decode. - expect_eof = true; - Ok(()) - } - _ => Err(e), - }, - }?; - } - - Ok((txs, page_id)) - } - - fn parse_post_mempool_query( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let (txs, page_id) = HttpResponseType::decode_tx_stream(fd, len_hint)?; - Ok(HttpResponseType::MemPoolTxs( - HttpResponseMetadata::from_preamble(request_version, preamble), - page_id, - txs, - )) - } - - fn parse_get_stackerdb_metadata( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let slot_metadata = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::StackerDBMetadata( - HttpResponseMetadata::from_preamble(request_version, preamble), - slot_metadata, - )) - } - - fn parse_get_stackerdb_chunk( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let chunk = - HttpResponseType::parse_bytes(preamble, fd, len_hint, STACKERDB_MAX_CHUNK_SIZE as u64)?; - Ok(HttpResponseType::StackerDBChunk( - HttpResponseMetadata::from_preamble(request_version, preamble), - chunk, - )) - } - - fn parse_stackerdb_chunk_response( - _protocol: &mut StacksHttp, - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - fd: &mut R, - len_hint: Option, - ) -> Result { - let slot_ack = - HttpResponseType::parse_json(preamble, fd, len_hint, MAX_MESSAGE_LEN as u64)?; - Ok(HttpResponseType::StackerDBChunkAck( - HttpResponseMetadata::from_preamble(request_version, preamble), - slot_ack, - )) - } - - fn error_reason(code: u16) -> &'static str { - match code { - 400 => "Bad Request", - 401 => "Unauthorized", - 402 => "Payment Required", - 403 => "Forbidden", - 404 => "Not Found", - 500 => "Internal Server Error", - 503 => "Service Temporarily Unavailable", - _ => "Error", - } - } - - fn error_response( - &self, - fd: &mut W, - code: u16, - message: &str, - ) -> Result<(), net_error> { - let md = self.metadata(); - HttpResponsePreamble::new_serialized( - fd, - code, - HttpResponseType::error_reason(code), - Some(message.len() as u32), - &HttpContentType::Text, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - fd.write_all(message.as_bytes()) - .map_err(net_error::WriteError)?; - Ok(()) - } - - pub fn metadata(&self) -> &HttpResponseMetadata { - match *self { - HttpResponseType::PeerInfo(ref md, _) => md, - HttpResponseType::PoxInfo(ref md, _) => md, - HttpResponseType::Neighbors(ref md, _) => md, - HttpResponseType::HeaderStream(ref md) => md, - HttpResponseType::Headers(ref md, _) => md, - HttpResponseType::Block(ref md, _) => md, - HttpResponseType::BlockStream(ref md) => md, - HttpResponseType::Microblocks(ref md, _) => md, - HttpResponseType::MicroblockStream(ref md) => md, - HttpResponseType::TransactionID(ref md, _) => md, - HttpResponseType::StacksBlockAccepted(ref md, ..) => md, - HttpResponseType::MicroblockHash(ref md, _) => md, - HttpResponseType::TokenTransferCost(ref md, _) => md, - HttpResponseType::GetDataVar(ref md, _) => md, - HttpResponseType::GetConstantVal(ref md, _) => md, - HttpResponseType::GetMapEntry(ref md, _) => md, - HttpResponseType::GetAccount(ref md, _) => md, - HttpResponseType::GetContractABI(ref md, _) => md, - HttpResponseType::GetContractSrc(ref md, _) => md, - HttpResponseType::GetIsTraitImplemented(ref md, _) => md, - HttpResponseType::CallReadOnlyFunction(ref md, _) => md, - HttpResponseType::UnconfirmedTransaction(ref md, _) => md, - HttpResponseType::GetAttachment(ref md, _) => md, - HttpResponseType::GetAttachmentsInv(ref md, _) => md, - HttpResponseType::MemPoolTxStream(ref md) => md, - HttpResponseType::MemPoolTxs(ref md, ..) => md, - HttpResponseType::OptionsPreflight(ref md) => md, - HttpResponseType::TransactionFeeEstimation(ref md, _) => md, - HttpResponseType::StackerDBMetadata(ref md, ..) => md, - HttpResponseType::StackerDBChunk(ref md, ..) => md, - HttpResponseType::StackerDBChunkAck(ref md, ..) => md, - // errors - HttpResponseType::BadRequestJSON(ref md, _) => md, - HttpResponseType::BadRequest(ref md, _) => md, - HttpResponseType::Unauthorized(ref md, _) => md, - HttpResponseType::PaymentRequired(ref md, _) => md, - HttpResponseType::Forbidden(ref md, _) => md, - HttpResponseType::NotFound(ref md, _) => md, - HttpResponseType::ServerError(ref md, _) => md, - HttpResponseType::ServiceUnavailable(ref md, _) => md, - HttpResponseType::Error(ref md, _, _) => md, - } - } - - fn send_bytestream( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - message: &T, - ) -> Result<(), codec_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - write_next(fd, message) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - write_next(&mut encoder, message)?; - encoder.flush().map_err(codec_error::WriteError)?; - Ok(()) - } - } - - fn send_text( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - text: &[u8], - ) -> Result<(), net_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - fd.write_all(text).map_err(net_error::WriteError) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - encoder.write_all(text).map_err(net_error::WriteError)?; - encoder.flush().map_err(net_error::WriteError)?; - Ok(()) - } - } - - fn send_json( - protocol: &mut StacksHttp, - md: &HttpResponseMetadata, - fd: &mut W, - message: &T, - ) -> Result<(), net_error> { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - serde_json::to_writer(fd, message) - .map_err(|e| net_error::SerializeError(format!("Failed to send as JSON: {:?}", &e))) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - serde_json::to_writer(&mut encoder, message).map_err(|e| { - net_error::SerializeError(format!("Failed to send as chunk-encoded JSON: {:?}", &e)) - })?; - encoder.flush().map_err(net_error::WriteError)?; - Ok(()) - } - } - - pub fn send(&self, protocol: &mut StacksHttp, fd: &mut W) -> Result<(), net_error> { - match *self { - HttpResponseType::GetAccount(ref md, ref account_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, account_data)?; - } - HttpResponseType::TransactionFeeEstimation(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetContractABI(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetContractSrc(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetIsTraitImplemented(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::TokenTransferCost(ref md, ref cost) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, cost)?; - } - HttpResponseType::CallReadOnlyFunction(ref md, ref data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::GetDataVar(ref md, ref var_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, var_data)?; - } - HttpResponseType::GetConstantVal(ref md, ref constant_val) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, constant_val)?; - } - HttpResponseType::GetMapEntry(ref md, ref map_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, map_data)?; - } - HttpResponseType::PeerInfo(ref md, ref peer_info) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, peer_info)?; - } - HttpResponseType::PoxInfo(ref md, ref pox_info) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, pox_info)?; - } - HttpResponseType::Neighbors(ref md, ref neighbor_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, neighbor_data)?; - } - HttpResponseType::GetAttachment(ref md, ref zonefile_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, zonefile_data)?; - } - HttpResponseType::GetAttachmentsInv(ref md, ref zonefile_data) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, zonefile_data)?; - } - HttpResponseType::Headers(ref md, ref headers) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, headers)?; - } - HttpResponseType::HeaderStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the headers data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::Block(ref md, ref block) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_bytestream(protocol, md, fd, block)?; - } - HttpResponseType::BlockStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the block data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::Microblocks(ref md, ref microblocks) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_bytestream(protocol, md, fd, microblocks)?; - } - HttpResponseType::MicroblockStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the microblock data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::TransactionID(ref md, ref txid) => { - let txid_bytes = txid.to_hex(); - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &txid_bytes)?; - } - HttpResponseType::StacksBlockAccepted(ref md, ref stacks_block_id, ref accepted) => { - let accepted_data = StacksBlockAcceptedData { - stacks_block_id: stacks_block_id.clone(), - accepted: *accepted, - }; - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &accepted_data)?; - } - HttpResponseType::MicroblockHash(ref md, ref mblock_hash) => { - let mblock_bytes = mblock_hash.to_hex(); - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, &mblock_bytes)?; - } - HttpResponseType::UnconfirmedTransaction(ref md, ref unconfirmed_status) => { - HttpResponsePreamble::ok_JSON_from_md(fd, md)?; - HttpResponseType::send_json(protocol, md, fd, unconfirmed_status)?; - } - HttpResponseType::MemPoolTxStream(ref md) => { - // only send the preamble. The caller will need to figure out how to send along - // the tx data itself. - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - } - HttpResponseType::MemPoolTxs(ref md, ref page_id, ref txs) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - match page_id { - Some(txid) => { - if md.content_length.is_some() { - // have explicit content-length, so we can send as-is - write_next(fd, txs)?; - write_next(fd, txid)?; - Ok(()) - } else { - // no content-length, so send as chunk-encoded - let mut write_state = - HttpChunkedTransferWriterState::new(protocol.chunk_size as usize); - let mut encoder = - HttpChunkedTransferWriter::from_writer_state(fd, &mut write_state); - write_next(&mut encoder, txs)?; - write_next(&mut encoder, txid)?; - encoder.flush().map_err(codec_error::WriteError)?; - Ok(()) - } - } - None => HttpResponseType::send_bytestream(protocol, md, fd, txs), - }?; - } - HttpResponseType::StackerDBMetadata(ref md, ref slot_metadata) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, slot_metadata)?; - } - HttpResponseType::StackerDBChunk(ref md, ref chunk, ..) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::Bytes, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_text(protocol, md, fd, chunk)?; - } - HttpResponseType::StackerDBChunkAck(ref md, ref ack_data) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, ack_data)?; - } - HttpResponseType::OptionsPreflight(ref md) => { - HttpResponsePreamble::new_serialized( - fd, - 200, - "OK", - None, - &HttpContentType::Text, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_text(protocol, md, fd, "".as_bytes())?; - } - HttpResponseType::BadRequestJSON(ref md, ref data) => { - HttpResponsePreamble::new_serialized( - fd, - 400, - HttpResponseType::error_reason(400), - md.content_length.clone(), - &HttpContentType::JSON, - md.request_id, - |ref mut fd| keep_alive_headers(fd, md), - )?; - HttpResponseType::send_json(protocol, md, fd, data)?; - } - HttpResponseType::BadRequest(_, ref msg) => self.error_response(fd, 400, msg)?, - HttpResponseType::Unauthorized(_, ref msg) => self.error_response(fd, 401, msg)?, - HttpResponseType::PaymentRequired(_, ref msg) => self.error_response(fd, 402, msg)?, - HttpResponseType::Forbidden(_, ref msg) => self.error_response(fd, 403, msg)?, - HttpResponseType::NotFound(_, ref msg) => self.error_response(fd, 404, msg)?, - HttpResponseType::ServerError(_, ref msg) => self.error_response(fd, 500, msg)?, - HttpResponseType::ServiceUnavailable(_, ref msg) => { - self.error_response(fd, 503, msg)? - } - HttpResponseType::Error(_, ref error_code, ref msg) => { - self.error_response(fd, *error_code, msg)? - } - }; - Ok(()) - } -} - -impl StacksMessageCodec for StacksHttpPreamble { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - match *self { - StacksHttpPreamble::Request(ref req) => req.consensus_serialize(fd), - StacksHttpPreamble::Response(ref res) => res.consensus_serialize(fd), - } - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let mut retry_fd = RetryReader::new(fd); - - // the byte stream can decode to a http request or a http response, but not both. - match HttpRequestPreamble::consensus_deserialize(&mut retry_fd) { - Ok(request) => Ok(StacksHttpPreamble::Request(request)), - Err(e_request) => { - // maybe a http response? - retry_fd.set_position(0); - match HttpResponsePreamble::consensus_deserialize(&mut retry_fd) { - Ok(response) => Ok(StacksHttpPreamble::Response(response)), - Err(e) => { - // underflow? - match (e_request, e) { - (codec_error::ReadError(ref ioe1), codec_error::ReadError(ref ioe2)) => { - if ioe1.kind() == io::ErrorKind::UnexpectedEof && ioe2.kind() == io::ErrorKind::UnexpectedEof { - // out of bytes - Err(codec_error::UnderflowError("Not enough bytes to form a HTTP request or response".to_string())) - } - else { - Err(codec_error::DeserializeError(format!("Neither a HTTP request ({:?}) or HTTP response ({:?})", ioe1, ioe2))) - } - }, - (e_req, e_res) => Err(codec_error::DeserializeError(format!("Failed to decode HTTP request or HTTP response (request error: {:?}; response error: {:?})", &e_req, &e_res))) - } - } - } - } - } - } -} - -impl MessageSequence for StacksHttpMessage { - fn request_id(&self) -> u32 { - // there is at most one in-flight HTTP request, as far as a Connection

is concerned - HTTP_REQUEST_ID_RESERVED - } - - fn get_message_name(&self) -> &'static str { - match *self { - StacksHttpMessage::Request(ref req) => match req { - HttpRequestType::GetInfo(_) => "HTTP(GetInfo)", - HttpRequestType::GetPoxInfo(_, _) => "HTTP(GetPoxInfo)", - HttpRequestType::GetNeighbors(_) => "HTTP(GetNeighbors)", - HttpRequestType::GetHeaders(..) => "HTTP(GetHeaders)", - HttpRequestType::GetBlock(_, _) => "HTTP(GetBlock)", - HttpRequestType::GetMicroblocksIndexed(_, _) => "HTTP(GetMicroblocksIndexed)", - HttpRequestType::GetMicroblocksConfirmed(_, _) => "HTTP(GetMicroblocksConfirmed)", - HttpRequestType::GetMicroblocksUnconfirmed(_, _, _) => { - "HTTP(GetMicroblocksUnconfirmed)" - } - HttpRequestType::GetTransactionUnconfirmed(_, _) => { - "HTTP(GetTransactionUnconfirmed)" - } - HttpRequestType::PostTransaction(_, _, _) => "HTTP(PostTransaction)", - HttpRequestType::PostBlock(..) => "HTTP(PostBlock)", - HttpRequestType::PostMicroblock(..) => "HTTP(PostMicroblock)", - HttpRequestType::GetAccount(..) => "HTTP(GetAccount)", - HttpRequestType::GetDataVar(..) => "HTTP(GetDataVar)", - HttpRequestType::GetConstantVal(..) => "HTTP(GetConstantVal)", - HttpRequestType::GetMapEntry(..) => "HTTP(GetMapEntry)", - HttpRequestType::GetTransferCost(_) => "HTTP(GetTransferCost)", - HttpRequestType::GetContractABI(..) => "HTTP(GetContractABI)", - HttpRequestType::GetContractSrc(..) => "HTTP(GetContractSrc)", - HttpRequestType::GetIsTraitImplemented(..) => "HTTP(GetIsTraitImplemented)", - HttpRequestType::CallReadOnlyFunction(..) => "HTTP(CallReadOnlyFunction)", - HttpRequestType::GetAttachment(..) => "HTTP(GetAttachment)", - HttpRequestType::GetAttachmentsInv(..) => "HTTP(GetAttachmentsInv)", - HttpRequestType::MemPoolQuery(..) => "HTTP(MemPoolQuery)", - HttpRequestType::OptionsPreflight(..) => "HTTP(OptionsPreflight)", - HttpRequestType::GetStackerDBMetadata(..) => "HTTP(GetStackerDBMetadata)", - HttpRequestType::GetStackerDBChunk(..) => "HTTP(GetStackerDBChunk)", - HttpRequestType::PostStackerDBChunk(..) => "HTTP(PostStackerDBChunk)", - HttpRequestType::ClientError(..) => "HTTP(ClientError)", - HttpRequestType::FeeRateEstimate(_, _, _) => "HTTP(FeeRateEstimate)", - }, - StacksHttpMessage::Response(ref res) => match res { - HttpResponseType::TokenTransferCost(_, _) => "HTTP(TokenTransferCost)", - HttpResponseType::GetDataVar(_, _) => "HTTP(GetDataVar)", - HttpResponseType::GetConstantVal(..) => "HTTP(GetConstantVal)", - HttpResponseType::GetMapEntry(_, _) => "HTTP(GetMapEntry)", - HttpResponseType::GetAccount(_, _) => "HTTP(GetAccount)", - HttpResponseType::GetContractABI(..) => "HTTP(GetContractABI)", - HttpResponseType::GetContractSrc(..) => "HTTP(GetContractSrc)", - HttpResponseType::GetIsTraitImplemented(..) => "HTTP(GetIsTraitImplemented)", - HttpResponseType::CallReadOnlyFunction(..) => "HTTP(CallReadOnlyFunction)", - HttpResponseType::GetAttachment(_, _) => "HTTP(GetAttachment)", - HttpResponseType::GetAttachmentsInv(_, _) => "HTTP(GetAttachmentsInv)", - HttpResponseType::PeerInfo(_, _) => "HTTP(PeerInfo)", - HttpResponseType::PoxInfo(_, _) => "HTTP(PeerInfo)", - HttpResponseType::Neighbors(_, _) => "HTTP(Neighbors)", - HttpResponseType::Headers(..) => "HTTP(Headers)", - HttpResponseType::HeaderStream(..) => "HTTP(HeaderStream)", - HttpResponseType::Block(_, _) => "HTTP(Block)", - HttpResponseType::BlockStream(_) => "HTTP(BlockStream)", - HttpResponseType::Microblocks(_, _) => "HTTP(Microblocks)", - HttpResponseType::MicroblockStream(_) => "HTTP(MicroblockStream)", - HttpResponseType::TransactionID(_, _) => "HTTP(Transaction)", - HttpResponseType::StacksBlockAccepted(..) => "HTTP(StacksBlockAccepted)", - HttpResponseType::MicroblockHash(_, _) => "HTTP(MicroblockHash)", - HttpResponseType::UnconfirmedTransaction(_, _) => "HTTP(UnconfirmedTransaction)", - HttpResponseType::MemPoolTxStream(..) => "HTTP(MemPoolTxStream)", - HttpResponseType::MemPoolTxs(..) => "HTTP(MemPoolTxs)", - HttpResponseType::StackerDBMetadata(..) => "HTTP(StackerDBMetadata)", - HttpResponseType::StackerDBChunk(..) => "HTTP(StackerDBChunk)", - HttpResponseType::StackerDBChunkAck(..) => "HTTP(StackerDBChunkAck)", - HttpResponseType::OptionsPreflight(_) => "HTTP(OptionsPreflight)", - HttpResponseType::BadRequestJSON(..) | HttpResponseType::BadRequest(..) => { - "HTTP(400)" - } - HttpResponseType::Unauthorized(_, _) => "HTTP(401)", - HttpResponseType::PaymentRequired(_, _) => "HTTP(402)", - HttpResponseType::Forbidden(_, _) => "HTTP(403)", - HttpResponseType::NotFound(_, _) => "HTTP(404)", - HttpResponseType::ServerError(_, _) => "HTTP(500)", - HttpResponseType::ServiceUnavailable(_, _) => "HTTP(503)", - HttpResponseType::Error(_, _, _) => "HTTP(other)", - HttpResponseType::TransactionFeeEstimation(_, _) => { - "HTTP(TransactionFeeEstimation)" - } - }, - } - } -} - -/// A partially-decoded, streamed HTTP message (response) being received. -/// Internally used by StacksHttp to keep track of chunk-decoding state. -#[derive(Debug, Clone, PartialEq)] -struct HttpRecvStream { - state: HttpChunkedTransferReaderState, - data: Vec, - total_consumed: usize, // number of *encoded* bytes consumed -} - -impl HttpRecvStream { - pub fn new(max_size: u64) -> HttpRecvStream { - HttpRecvStream { - state: HttpChunkedTransferReaderState::new(max_size), - data: vec![], - total_consumed: 0, - } - } - - /// Feed data into our chunked transfer reader state. If we finish reading a stream, return - /// the decoded bytes (as Some(Vec) and the total number of encoded bytes consumed). - /// Always returns the number of bytes consumed. - pub fn consume_data( - &mut self, - fd: &mut R, - ) -> Result<(Option<(Vec, usize)>, usize), net_error> { - let mut consumed = 0; - let mut blocked = false; - while !blocked { - let mut decoded_buf = vec![0u8; 8192]; - let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { - Ok((0, num_consumed)) => { - trace!( - "consume_data blocked on 0 decoded bytes ({} consumed)", - num_consumed - ); - blocked = true; - (0, num_consumed) - } - Ok((num_read, num_consumed)) => (num_read, num_consumed), - Err(e) => match e.kind() { - io::ErrorKind::WouldBlock | io::ErrorKind::TimedOut => { - trace!("consume_data blocked on read error"); - blocked = true; - (0, 0) - } - _ => { - return Err(net_error::ReadError(e)); - } - }, - }; - - consumed += consumed_pass; - if read_pass > 0 { - self.data.extend_from_slice(&decoded_buf[0..read_pass]); - } - } - - self.total_consumed += consumed; - - // did we get a message? - if self.state.is_eof() { - // reset - let message_data = mem::replace(&mut self.data, vec![]); - let total_consumed = self.total_consumed; - - self.state = HttpChunkedTransferReaderState::new(self.state.max_size); - self.total_consumed = 0; - - Ok((Some((message_data, total_consumed)), consumed)) - } else { - Ok((None, consumed)) - } - } -} - -/// Information about an in-flight request -#[derive(Debug, Clone, PartialEq)] -struct HttpReplyData { - request_id: u32, - stream: HttpRecvStream, -} - -/// Stacks HTTP implementation, for bufferring up data. -/// One of these exists per Connection. -/// There can be at most one HTTP request in-flight (i.e. we don't do pipelining) -#[derive(Debug, Clone, PartialEq)] -pub struct StacksHttp { - /// Address of peer - peer_addr: SocketAddr, - /// Version of client - request_version: Option, - /// Path we requested - request_path: Option, - /// Incoming reply - reply: Option, - /// Size of HTTP chunks to write - chunk_size: usize, - /// Maximum size of call arguments - pub maximum_call_argument_size: u32, -} - -impl StacksHttp { - pub fn new(peer_addr: SocketAddr) -> StacksHttp { - StacksHttp { - peer_addr, - reply: None, - request_version: None, - request_path: None, - chunk_size: 8192, - maximum_call_argument_size: 20 * BOUND_VALUE_SERIALIZATION_HEX, - } - } - - pub fn set_chunk_size(&mut self, size: usize) -> () { - self.chunk_size = size; - } - - pub fn num_pending(&self) -> usize { - if self.reply.is_some() { - 1 - } else { - 0 - } - } - - pub fn has_pending_reply(&self) -> bool { - self.reply.is_some() - } - - pub fn set_pending(&mut self, preamble: &HttpResponsePreamble) -> bool { - if self.reply.is_some() { - // already pending - return false; - } - self.reply = Some(HttpReplyData { - request_id: preamble.request_id, - stream: HttpRecvStream::new(MAX_MESSAGE_LEN as u64), - }); - true - } - - pub fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), net_error> { - // if we already have a pending message, then this preamble cannot be processed (indicates an un-compliant client) - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // request path must have been set - if self.request_path.is_none() { - return Err(net_error::DeserializeError( - "Possible bug: did not set the request path".to_string(), - )); - } - - if http_response_preamble.is_chunked() { - // will stream this. Make sure we're not doing so already (no collisions - // allowed on in-flight request IDs!) - if self.has_pending_reply() { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - - // mark as pending -- we can stream this - if !self.set_pending(http_response_preamble) { - test_debug!("Have pending reply already"); - return Err(net_error::InProgress); - } - } - } - _ => {} - } - Ok(()) - } - - pub fn begin_request(&mut self, client_version: HttpVersion, request_path: String) -> () { - self.request_version = Some(client_version); - self.request_path = Some(request_path); - } - - pub fn reset(&mut self) -> () { - self.request_version = None; - self.request_path = None; - self.reply = None; - } - - /// Used for processing chunk-encoded streams. - /// Given the preamble and a Read, stream the bytes into a chunk-decoder. Return the decoded - /// bytes if we decode an entire stream. Always return the number of bytes consumed. - /// Returns Ok((Some(request path, decoded bytes we got, total number of encoded bytes), number of bytes gotten in this call)) - pub fn consume_data( - &mut self, - preamble: &HttpResponsePreamble, - fd: &mut R, - ) -> Result<(Option<(HttpVersion, String, Vec, usize)>, usize), net_error> { - assert!(preamble.is_chunked()); - assert!(self.reply.is_some()); - assert!(self.request_path.is_some()); - assert!(self.request_version.is_some()); - - let mut finished = false; - let res = match self.reply { - Some(ref mut reply) => { - assert_eq!(reply.request_id, preamble.request_id); - match reply.stream.consume_data(fd) { - Ok(res) => { - match res { - (None, sz) => Ok((None, sz)), - (Some((byte_vec, bytes_total)), sz) => { - // done receiving - finished = true; - Ok(( - Some(( - self.request_version.clone().unwrap(), - self.request_path.clone().unwrap(), - byte_vec, - bytes_total, - )), - sz, - )) - } - } - } - Err(e) => { - // broken stream - finished = true; - Err(e) - } - } - } - None => { - unreachable!(); - } - }; - - if finished { - // if we fetch the whole message, or encounter an error, then we're done -- we can free - // up this stream. - self.reset(); - } - res - } - - /// Given a HTTP request, serialize it out - #[cfg(test)] - pub fn serialize_request(req: &HttpRequestType) -> Result, net_error> { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let mut ret = vec![]; - req.send(&mut http, &mut ret)?; - Ok(ret) - } - - /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(test)] - pub fn parse_response( - request_path: &str, - response_buf: &[u8], - ) -> Result { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.reset(); - http.begin_request(HttpVersion::Http11, request_path.to_string()); - - let (preamble, message_offset) = http.read_preamble(response_buf)?; - let is_chunked = match preamble { - StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), - _ => { - return Err(net_error::DeserializeError( - "Invalid HTTP message: did not get a Response preamble".to_string(), - )); - } - }; - - let mut message_bytes = &response_buf[message_offset..]; - - if is_chunked { - match http.stream_payload(&preamble, &mut message_bytes) { - Ok((Some((message, _)), _)) => Ok(message), - Ok((None, _)) => Err(net_error::UnderflowError( - "Not enough bytes to form a streamed HTTP response".to_string(), - )), - Err(e) => Err(e), - } - } else { - let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; - Ok(message) - } - } -} - -impl ProtocolFamily for StacksHttp { - type Preamble = StacksHttpPreamble; - type Message = StacksHttpMessage; - - /// how big can a preamble get? - fn preamble_size_hint(&mut self) -> usize { - HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize - } - - /// how big is this message? Might not know if we're dealing with chunked encoding. - fn payload_len(&mut self, preamble: &StacksHttpPreamble) -> Option { - match *preamble { - StacksHttpPreamble::Request(ref http_request_preamble) => { - Some(http_request_preamble.get_content_length() as usize) - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - match http_response_preamble.content_length { - Some(len) => Some(len as usize), - None => None, - } - } - } - } - - /// StacksHttpMessage deals with HttpRequestPreambles and HttpResponsePreambles - fn read_preamble(&mut self, buf: &[u8]) -> Result<(StacksHttpPreamble, usize), net_error> { - let mut cursor = io::Cursor::new(buf); - - let preamble = { - let mut rd = BoundReader::from_reader(&mut cursor, 4096); - let preamble: StacksHttpPreamble = read_next(&mut rd)?; - preamble - }; - - let preamble_len = cursor.position() as usize; - - self.set_preamble(&preamble)?; - - Ok((preamble, preamble_len)) - } - - /// Stream a payload of unknown length. Only gets called if payload_len() returns None. - /// Returns the message if we get enough data to form one. - /// Always returns the number of bytes consumed. - fn stream_payload( - &mut self, - preamble: &StacksHttpPreamble, - fd: &mut R, - ) -> Result<(Option<(StacksHttpMessage, usize)>, usize), net_error> { - assert!(self.payload_len(preamble).is_none()); - match preamble { - StacksHttpPreamble::Request(_) => { - // HTTP requests can't be chunk-encoded, so this should never be reached - unreachable!() - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(http_response_preamble.is_chunked()); - assert!(self.request_path.is_some()); - - // message of unknown length. Buffer up and maybe we can parse it. - let (message_bytes_opt, num_read) = - self.consume_data(http_response_preamble, fd).map_err(|e| { - self.reset(); - e - })?; - - match message_bytes_opt { - Some((request_version, request_path, message_bytes, total_bytes_consumed)) => { - // can parse! - test_debug!( - "read http response payload of {} bytes (just buffered {}) for {}", - message_bytes.len(), - num_read, - &request_path - ); - - // we now know the content-length, so pass it into the parser. - let len_hint = message_bytes.len(); - let parse_res = HttpResponseType::parse( - self, - request_version, - http_response_preamble, - request_path, - &mut &message_bytes[..], - Some(len_hint), - ); - - // done parsing - self.reset(); - match parse_res { - Ok(data_response) => Ok(( - Some(( - StacksHttpMessage::Response(data_response), - total_bytes_consumed, - )), - num_read, - )), - Err(e) => { - info!("Failed to parse HTTP response: {:?}", &e); - Err(e) - } - } - } - None => { - // need more data - trace!( - "did not read http response payload, but buffered {}", - num_read - ); - Ok((None, num_read)) - } - } - } - } - } - - /// Parse a payload of known length. - /// Only gets called if payload_len() returns Some(...) - fn read_payload( - &mut self, - preamble: &StacksHttpPreamble, - buf: &[u8], - ) -> Result<(StacksHttpMessage, usize), net_error> { - match preamble { - StacksHttpPreamble::Request(ref http_request_preamble) => { - // all requests have a known length - let len = http_request_preamble.get_content_length() as usize; - assert!(len <= buf.len(), "{} > {}", len, buf.len()); - - trace!("read http request payload of {} bytes", len); - - let mut cursor = io::Cursor::new(buf); - match HttpRequestType::parse(self, http_request_preamble, &mut cursor) { - Ok(data_request) => Ok(( - StacksHttpMessage::Request(data_request), - cursor.position() as usize, - )), - Err(e) => { - info!("Failed to parse HTTP request: {:?}", &e); - if let net_error::ClientError(client_err) = e { - let req = HttpRequestType::ClientError( - HttpRequestMetadata::from_preamble(http_request_preamble), - client_err, - ); - // consume any remaining HTTP request content by returning bytes read = len - Ok((StacksHttpMessage::Request(req), len)) - } else { - Err(e) - } - } - } - } - StacksHttpPreamble::Response(ref http_response_preamble) => { - assert!(!http_response_preamble.is_chunked()); - assert!(self.request_path.is_some()); - assert!(self.request_version.is_some()); - - let request_path = self.request_path.take().unwrap(); - let request_version = self.request_version.take().unwrap(); - - // message of known length - test_debug!( - "read http response payload of {} bytes for {}", - buf.len(), - &request_path - ); - - let mut cursor = io::Cursor::new(buf); - match HttpResponseType::parse( - self, - request_version, - http_response_preamble, - request_path, - &mut cursor, - None, - ) { - Ok(data_response) => Ok(( - StacksHttpMessage::Response(data_response), - cursor.position() as usize, - )), - Err(e) => Err(e), - } - } - } - } - - fn verify_payload_bytes( - &mut self, - _key: &StacksPublicKey, - _preamble: &StacksHttpPreamble, - _bytes: &[u8], - ) -> Result<(), net_error> { - // not defined for HTTP messages, but maybe we could add a signature header at some point - // in the future if needed. - Ok(()) - } - - fn write_message( - &mut self, - fd: &mut W, - message: &StacksHttpMessage, - ) -> Result<(), net_error> { - match *message { - StacksHttpMessage::Request(ref req) => { - if self.request_path.is_some() { - test_debug!("Have pending request already"); - return Err(net_error::InProgress); - } - req.send(self, fd)?; - - self.reset(); - self.begin_request(req.metadata().version, req.request_path()); - Ok(()) - } - StacksHttpMessage::Response(ref resp) => resp.send(self, fd), - } - } -} - -#[cfg(test)] -mod test { - use std::error::Error; - - use rand; - use rand::RngCore; - use stacks_common::types::chainstate::StacksAddress; - use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum}; - - use super::*; - use crate::burnchains::Txid; - use crate::chainstate::stacks::db::blocks::test::make_sample_microblock_stream; - use crate::chainstate::stacks::test::make_codec_test_block; - use crate::chainstate::stacks::{ - StacksBlock, StacksMicroblock, StacksPrivateKey, StacksTransaction, TokenTransferMemo, - TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, - }; - use crate::net::codec::test::check_codec_and_corruption; - use crate::net::test::*; - use crate::net::{RPCNeighbor, RPCNeighborsInfo}; - - #[test] - fn test_parse_reserved_header() { - let tests = vec![ - ( - "Content-Length", - "123", - Some(HttpReservedHeader::ContentLength(123)), - ), - ( - "Content-Type", - "text/plain", - Some(HttpReservedHeader::ContentType(HttpContentType::Text)), - ), - ( - "Content-Type", - "application/octet-stream", - Some(HttpReservedHeader::ContentType(HttpContentType::Bytes)), - ), - ( - "Content-Type", - "application/json", - Some(HttpReservedHeader::ContentType(HttpContentType::JSON)), - ), - ( - "X-Request-Id", - "123", - Some(HttpReservedHeader::XRequestID(123)), - ), - ( - "Host", - "foo:123", - Some(HttpReservedHeader::Host(PeerHost::DNS( - "foo".to_string(), - 123, - ))), - ), - ( - "Host", - "1.2.3.4:123", - Some(HttpReservedHeader::Host(PeerHost::IP( - PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x01, 0x02, 0x03, 0x04, - ]), - 123, - ))), - ), - // errors - ("Content-Length", "-1", None), - ("Content-Length", "asdf", None), - ("Content-Length", "4294967296", None), - ("Content-Type", "blargh", None), - ("X-Request-Id", "-1", None), - ("X-Request-Id", "asdf", None), - ("X-Request-Id", "4294967296", None), - ("Unrecognized", "header", None), - ]; - - for (key, value, expected_result) in tests { - let result = HttpReservedHeader::try_from_str(key, value); - assert_eq!(result, expected_result); - } - } - - #[test] - fn test_parse_http_request_preamble_ok() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, true, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("GET /foo HTTP/1.1\r\nConnection: close\r\nHost: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, false, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, false, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\nConnection: close\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, false, vec!["foo".to_string()], vec!["Bar".to_string()])) - ]; - - for (data, request) in tests.iter() { - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(req.is_ok(), "{:?}", &req); - assert_eq!(req.unwrap(), *request); - - let sreq = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sreq.is_ok(), "{:?}", &sreq); - assert_eq!( - sreq.unwrap(), - StacksHttpPreamble::Request((*request).clone()) - ); - } - } - - #[test] - fn test_parse_http_request_options() { - let data = "OPTIONS /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n"; - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - let preamble = HttpRequestPreamble::from_headers( - HttpVersion::Http11, - "OPTIONS".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - true, - vec![], - vec![], - ); - assert_eq!(req.unwrap(), preamble); - } - - #[test] - fn test_parse_http_request_preamble_case_ok() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\nhOsT: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, true, vec![], vec![])), - ("GET /foo HTTP/1.1\r\ncOnNeCtIoN: cLoSe\r\nhOsT: localhost:6270\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "GET".to_string(), "/foo".to_string(), "localhost".to_string(), 6270, false, vec![], vec![])), - ("POST asdf HTTP/1.1\r\nhOsT: core.blockstack.org\r\nCOnNeCtIoN: kEeP-aLiVE\r\nFoo: Bar\r\n\r\n", - HttpRequestPreamble::from_headers(HttpVersion::Http11, "POST".to_string(), "asdf".to_string(), "core.blockstack.org".to_string(), 80, true, vec!["foo".to_string()], vec!["Bar".to_string()])), - ]; - - for (data, request) in tests.iter() { - let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(req.is_ok(), "{:?}", &req); - assert_eq!(req.unwrap(), *request); - - let sreq = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sreq.is_ok(), "{:?}", &sreq); - assert_eq!( - sreq.unwrap(), - StacksHttpPreamble::Request((*request).clone()) - ); - } - } - - #[test] - fn test_parse_http_request_preamble_err() { - let tests = vec![ - ("GET /foo HTTP/1.1\r\n", "failed to fill whole buffer"), - ("GET /foo HTTP/1.1\r\n\r\n", "Missing Host header"), - ( - "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", - "Missing Host header", - ), - ("GET /foo HTTP/\r\n\r\n", "Failed to parse HTTP request"), - ("GET /foo HTTP/1.1\r\nHost:", "failed to fill whole buffer"), - ( - "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", - "duplicate header", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", - "header value is not ASCII-US", - ), - ( - "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", - "Missing Host header", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", - "invalid Connection: header", - ), - ]; - - for (data, errstr) in tests.iter() { - let res = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}'", errstr); - assert!(res.is_err(), "{:?}", &res); - assert!( - res.as_ref().unwrap_err().to_string().find(errstr).is_some(), - "{:?}", - &res - ); - } - } - - #[test] - fn test_parse_stacks_http_preamble_request_err() { - let tests = vec![ - ( - "GET /foo HTTP/1.1\r\n", - "Not enough bytes to form a HTTP request or response", - ), - ( - "GET /foo HTTP/1.1\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost:", - "Not enough bytes to form a HTTP request or response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ( - "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response", - ), - ]; - - for (data, errstr) in tests.iter() { - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}'", errstr); - assert!(sres.is_err(), "{:?}", &sres); - assert!( - sres.as_ref() - .unwrap_err() - .to_string() - .find(errstr) - .is_some(), - "{:?}", - &sres - ); - } - } - - #[test] - fn test_http_request_preamble_headers() { - let mut req = HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - true, - ); - let req_11 = HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - false, - ); - let req_10 = HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/foo".to_string(), - "localhost".to_string(), - 6270, - false, - ); - - req.add_header("foo".to_string(), "bar".to_string()); - - assert_eq!(req.content_type, None); - req.set_content_type(HttpContentType::JSON); - assert_eq!(req.content_type, Some(HttpContentType::JSON)); - - req.add_header( - "content-type".to_string(), - "application/octet-stream".to_string(), - ); - assert_eq!(req.content_type, Some(HttpContentType::Bytes)); - - let mut bytes = vec![]; - req.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - - test_debug!("headers:\n{}", txt); - - assert!(txt.find("HTTP/1.1").is_some(), "HTTP version is missing"); - assert!( - txt.find("User-Agent: stacks/2.0\r\n").is_some(), - "User-Agnet header is missing" - ); - assert!( - txt.find("Host: localhost:6270\r\n").is_some(), - "Host header is missing" - ); - assert!(txt.find("foo: bar\r\n").is_some(), "foo header is missing"); - assert!( - txt.find("Content-Type: application/octet-stream\r\n") - .is_some(), - "content-type is missing" - ); - assert!(txt.find("Connection: ").is_none()); // not sent if keep_alive is true (for HTTP/1.1) - - let mut bytes_10 = vec![]; - req_10.consensus_serialize(&mut bytes_10).unwrap(); - let txt_10 = String::from_utf8(bytes_10).unwrap(); - - assert!(txt_10.find("HTTP/1.0").is_some(), "HTTP version is missing"); - - let mut bytes_11 = vec![]; - req_11.consensus_serialize(&mut bytes_11).unwrap(); - let txt_11 = String::from_utf8(bytes_11).unwrap(); - - assert!(txt_11.find("HTTP/1.1").is_some(), "HTTP version is wrong"); - assert!( - txt_11.find("Connection: close").is_some(), - "Explicit Connection: close is missing" - ); - } - - #[test] - fn test_parse_http_response_preamble_ok() { - let tests = vec![ - ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), true, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), true, Some(456), HttpContentType::JSON, 0, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nX-Request-Id: 123\r\nFoo: Bar\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), true, Some(456), HttpContentType::JSON, 123, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 200 Ok\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nConnection: close\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), false, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nConnection: close\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), false, Some(456), HttpContentType::JSON, 0, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Length: 456\r\nX-Request-Id: 123\r\nFoo: Bar\r\n\r\n", - HttpResponsePreamble::from_headers(400, "Bad Request".to_string(), false, Some(456), HttpContentType::JSON, 123, vec!["foo".to_string()], vec!["Bar".to_string()])), - ("HTTP/1.1 200 Ok\r\nConnection: close\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), false, None, HttpContentType::Bytes, 0, vec![], vec![])), - ]; - - for (data, response) in tests.iter() { - test_debug!("Try parsing:\n{}\n", data); - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(res.is_ok(), "{:?}", &res); - assert_eq!(res.unwrap(), *response); - - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sres.is_ok(), "{:?}", &sres); - assert_eq!( - sres.unwrap(), - StacksHttpPreamble::Response((*response).clone()) - ); - } - } - - #[test] - fn test_parse_http_response_case_ok() { - let tests = vec![ - ("HTTP/1.1 200 OK\r\ncOnTeNt-TyPe: aPpLiCaTiOn/oCtEt-StReAm\r\ncOnTeNt-LeNgTh: 123\r\nx-ReQuEsT-iD: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "OK".to_string(), true, Some(123), HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnTeNt-tYpE: aPpLiCaTiOn/OcTeT-sTrEaM\r\ntRaNsFeR-eNcOdInG: cHuNkEd\r\nX-rEqUeSt-Id: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnNeCtIoN: cLoSe\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), false, None, HttpContentType::Bytes, 0, vec![], vec![])), - ("HTTP/1.1 200 Ok\r\ncOnNeCtIoN: kEeP-AlIvE\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", - HttpResponsePreamble::from_headers(200, "Ok".to_string(), true, None, HttpContentType::Bytes, 0, vec![], vec![])), - ]; - - for (data, response) in tests.iter() { - test_debug!("Try parsing:\n{}\n", data); - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(res.is_ok(), "{:?}", &res); - assert_eq!(res.unwrap(), *response); - - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - assert!(sres.is_ok(), "{:?}", &sres); - assert_eq!( - sres.unwrap(), - StacksHttpPreamble::Response((*response).clone()) - ); - } - } - - #[test] - fn test_http_response_preamble_headers() { - let mut res = HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(123), - HttpContentType::JSON, - true, - 123, - ); - assert_eq!(res.request_id, 123); - - res.set_request_id(456); - assert_eq!(res.request_id, 456); - - res.add_header("foo".to_string(), "bar".to_string()); - res.add_CORS_headers(); - - let mut bytes = vec![]; - res.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - assert!( - txt.find("Server: stacks/2.0\r\n").is_some(), - "Server header is missing" - ); - assert!( - txt.find("Content-Length: 123\r\n").is_some(), - "Content-Length is missing" - ); - assert!( - txt.find("Content-Type: application/json\r\n").is_some(), - "Content-Type is missing" - ); - assert!(txt.find("Date: ").is_some(), "Date header is missing"); - assert!(txt.find("foo: bar\r\n").is_some(), "foo header is missing"); - assert!( - txt.find("X-Request-Id: 456\r\n").is_some(), - "X-Request-Id is missing" - ); - assert!( - txt.find("Access-Control-Allow-Origin: *\r\n").is_some(), - "CORS header is missing" - ); - assert!( - txt.find("Access-Control-Allow-Headers: origin, content-type\r\n") - .is_some(), - "CORS header is missing" - ); - assert!( - txt.find("Access-Control-Allow-Methods: POST, GET, OPTIONS\r\n") - .is_some(), - "CORS header is missing" - ); - assert!(txt.find("Connection: ").is_none()); // not sent if keep_alive is true - } - - #[test] - fn test_parse_http_response_preamble_err() { - let tests = vec![ - ("HTTP/1.1 200", - "failed to fill whole buffer"), - ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", - "header value is not ASCII-US"), - ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", - "duplicate header"), - ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", - "Unsupported HTTP content type"), - ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", - "Invalid Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "missing Content-Type, Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", - "missing Content-Type, Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", - "incompatible transfer-encoding and content-length"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", - "invalid Connection: header"), - ]; - - for (data, errstr) in tests.iter() { - let res = HttpResponsePreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}', got: {:?}", errstr, &res); - assert!(res.is_err(), "{:?}", &res); - assert!(res.unwrap_err().to_string().find(errstr).is_some()); - } - } - - #[test] - fn test_parse_stacks_http_preamble_response_err() { - let tests = vec![ - ("HTTP/1.1 200", - "Not enough bytes to form a HTTP request or response"), - ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), - ]; - - for (data, errstr) in tests.iter() { - let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); - test_debug!("Expect '{}', got: {:?}", errstr, &sres); - assert!(sres.is_err(), "{:?}", &sres); - assert!( - sres.as_ref() - .unwrap_err() - .to_string() - .find(errstr) - .is_some(), - "{:?}", - &sres - ); - } - } - - fn make_test_transaction() -> StacksTransaction { - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); - let addr = auth.origin().address_testnet(); - let recv_addr = StacksAddress { - version: 1, - bytes: Hash160([0xff; 20]), - }; - - let mut tx_stx_transfer = StacksTransaction::new( - TransactionVersion::Testnet, - auth.clone(), - TransactionPayload::TokenTransfer( - recv_addr.clone().into(), - 123, - TokenTransferMemo([0u8; 34]), - ), - ); - tx_stx_transfer.chain_id = 0x80000000; - tx_stx_transfer.post_condition_mode = TransactionPostConditionMode::Allow; - tx_stx_transfer.set_tx_fee(0); - tx_stx_transfer - } - - #[test] - fn test_http_parse_host_header_value() { - let hosts = vec![ - "1.2.3.4", - "1.2.3.4:5678", - "[1:203:405:607:809:a0b:c0d:e0f]", - "[1:203:405:607:809:a0b:c0d:e0f]:12345", - "www.foo.com", - "www.foo.com:12345", - // invalid IP addresses will be parsed to DNS names - "1.2.3.4.5", - "[1:203:405:607:809:a0b:c0d:e0f:1011]", - // these won't parse at all, since the port is invalid - "1.2.3.4:1234567", - "1.2.3.4.5:1234567", - "[1:203:405:607:809:a0b:c0d:e0f]:1234567", - "[1:203:405:607:809:a0b:c0d:e0f:1011]:1234567", - "www.foo.com:1234567", - ":", - ":123", - ]; - - let peerhosts = vec![ - Some(PeerHost::IP( - PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 1, 2, 3, 4]), - 80, - )), - Some(PeerHost::IP( - PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 1, 2, 3, 4]), - 5678, - )), - Some(PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 80, - )), - Some(PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 12345, - )), - Some(PeerHost::DNS("www.foo.com".to_string(), 80)), - Some(PeerHost::DNS("www.foo.com".to_string(), 12345)), - Some(PeerHost::DNS("1.2.3.4.5".to_string(), 80)), - Some(PeerHost::DNS( - "[1:203:405:607:809:a0b:c0d:e0f:1011]".to_string(), - 80, - )), - None, - None, - None, - None, - None, - None, - None, - ]; - - for (host, expected_host) in hosts.iter().zip(peerhosts.iter()) { - let peerhost = match host.parse::() { - Ok(ph) => Some(ph), - Err(_) => None, - }; - - match (peerhost, expected_host) { - (Some(ref ph), Some(ref expected_ph)) => assert_eq!(*ph, *expected_ph), - (None, None) => {} - (Some(ph), None) => { - eprintln!( - "Parsed {} successfully to {:?}, but expected error", - host, ph - ); - assert!(false); - } - (None, Some(expected_ph)) => { - eprintln!("Failed to parse {} successfully", host); - assert!(false); - } - } - } - } - - #[test] - fn test_http_request_type_codec() { - let http_request_metadata_ip = HttpRequestMetadata { - version: HttpVersion::Http11, - peer: PeerHost::IP( - PeerAddress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), - 12345, - ), - keep_alive: true, - canonical_stacks_tip_height: None, - }; - let http_request_metadata_dns = HttpRequestMetadata { - version: HttpVersion::Http11, - peer: PeerHost::DNS("www.foo.com".to_string(), 80), - keep_alive: true, - canonical_stacks_tip_height: None, - }; - - let tests = vec![ - HttpRequestType::GetNeighbors(http_request_metadata_ip.clone()), - HttpRequestType::GetBlock(http_request_metadata_dns.clone(), StacksBlockId([2u8; 32])), - HttpRequestType::GetMicroblocksIndexed( - http_request_metadata_ip.clone(), - StacksBlockId([3u8; 32]), - ), - HttpRequestType::PostTransaction( - http_request_metadata_dns.clone(), - make_test_transaction(), - None, - ), - HttpRequestType::OptionsPreflight(http_request_metadata_ip.clone(), "/".to_string()), - ]; - - let mut tx_body = vec![]; - make_test_transaction() - .consensus_serialize(&mut tx_body) - .unwrap(); - - let mut post_transaction_preamble = HttpRequestPreamble::new( - HttpVersion::Http11, - "POST".to_string(), - "/v2/transactions".to_string(), - http_request_metadata_dns.peer.hostname(), - http_request_metadata_dns.peer.port(), - http_request_metadata_dns.keep_alive, - ); - post_transaction_preamble.set_content_type(HttpContentType::Bytes); - post_transaction_preamble.set_content_length(tx_body.len() as u32); - - // all of these should parse - let expected_http_preambles = vec![ - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/neighbors".to_string(), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - format!("/v2/blocks/{}", StacksBlockId([2u8; 32]).to_hex()), - http_request_metadata_dns.peer.hostname(), - http_request_metadata_dns.peer.port(), - http_request_metadata_dns.keep_alive, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - format!("/v2/microblocks/{}", StacksBlockId([3u8; 32]).to_hex()), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - post_transaction_preamble, - HttpRequestPreamble::new( - HttpVersion::Http11, - "OPTIONS".to_string(), - format!("/"), - http_request_metadata_ip.peer.hostname(), - http_request_metadata_ip.peer.port(), - http_request_metadata_ip.keep_alive, - ), - ]; - - let expected_http_bodies = vec![vec![], vec![], vec![], tx_body]; - - for (test, (expected_http_preamble, expected_http_body)) in tests.iter().zip( - expected_http_preambles - .iter() - .zip(expected_http_bodies.iter()), - ) { - let mut expected_bytes = vec![]; - expected_http_preamble - .consensus_serialize(&mut expected_bytes) - .unwrap(); - - test_debug!( - "Expected preamble:\n{}", - str::from_utf8(&expected_bytes).unwrap() - ); - - if expected_http_preamble.content_type.is_none() - || expected_http_preamble.content_type != Some(HttpContentType::Bytes) - { - test_debug!( - "Expected http body:\n{}", - str::from_utf8(&expected_http_body).unwrap() - ); - } else { - test_debug!("Expected http body (hex):\n{}", to_hex(&expected_http_body)); - } - - expected_bytes.append(&mut expected_http_body.clone()); - - let mut bytes = vec![]; - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.write_message(&mut bytes, &StacksHttpMessage::Request(test.clone())) - .unwrap(); - - assert_eq!(bytes, expected_bytes); - } - } - - #[test] - fn test_http_request_type_codec_err() { - let bad_content_lengths = vec![ - "GET /v2/neighbors HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "GET /v2/blocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "GET /v2/microblocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 0\r\n\r\n", - ]; - for bad_content_length in bad_content_lengths { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let (preamble, offset) = http.read_preamble(bad_content_length.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &bad_content_length.as_bytes()[offset..]); - - assert!(e.is_err(), "{:?}", &e); - assert!( - e.as_ref() - .unwrap_err() - .to_string() - .find("-length body for") - .is_some(), - "{:?}", - &e - ); - } - - let bad_content_types = vec![ - "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", - ]; - for bad_content_type in bad_content_types { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let (preamble, offset) = http.read_preamble(bad_content_type.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &bad_content_type.as_bytes()[offset..]); - assert!(e.is_err()); - assert!(e.unwrap_err().to_string().find("Content-Type").is_some()); - } - } - - #[test] - fn test_http_response_type_codec() { - let test_neighbors_info = RPCNeighborsInfo { - bootstrap: vec![], - sample: vec![ - RPCNeighbor { - network_id: 1, - peer_version: 2, - addrbytes: PeerAddress([ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, - ]), - port: 12345, - public_key_hash: Hash160::from_bytes( - &hex_bytes("1111111111111111111111111111111111111111").unwrap(), - ) - .unwrap(), - authenticated: true, - stackerdbs: Some(vec![]), - }, - RPCNeighbor { - network_id: 3, - peer_version: 4, - addrbytes: PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x01, 0x02, 0x03, 0x04, - ]), - port: 23456, - public_key_hash: Hash160::from_bytes( - &hex_bytes("2222222222222222222222222222222222222222").unwrap(), - ) - .unwrap(), - authenticated: false, - stackerdbs: Some(vec![]), - }, - ], - inbound: vec![], - outbound: vec![], - }; - - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let test_block_info = make_codec_test_block(5); - let test_microblock_info = - make_sample_microblock_stream(&privk, &test_block_info.block_hash()); - - let mut test_block_info_bytes = vec![]; - test_block_info - .consensus_serialize(&mut test_block_info_bytes) - .unwrap(); - - let mut test_microblock_info_bytes = vec![]; - test_microblock_info - .consensus_serialize(&mut test_microblock_info_bytes) - .unwrap(); - - let tests = vec![ - // length is known - ( - HttpResponseType::Neighbors( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), - true, - None, - ), - test_neighbors_info.clone(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Block( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(test_block_info_bytes.len() as u32), - true, - None, - ), - test_block_info.clone(), - ), - format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), - ), - ( - HttpResponseType::Microblocks( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some(test_microblock_info_bytes.len() as u32), - true, - None, - ), - test_microblock_info.clone(), - ), - format!( - "/v2/microblocks/{}", - test_microblock_info[0].block_hash().to_hex() - ), - ), - ( - HttpResponseType::TransactionID( - HttpResponseMetadata::new( - HttpVersion::Http11, - 123, - Some((Txid([0x1; 32]).to_hex().len() + 2) as u32), - true, - None, - ), - Txid([0x1; 32]), - ), - "/v2/transactions".to_string(), - ), - // length is unknown - ( - HttpResponseType::Neighbors( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_neighbors_info.clone(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Block( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_block_info.clone(), - ), - format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), - ), - ( - HttpResponseType::Microblocks( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - test_microblock_info.clone(), - ), - format!( - "/v2/microblocks/{}", - test_microblock_info[0].block_hash().to_hex() - ), - ), - ( - HttpResponseType::TransactionID( - HttpResponseMetadata::new(HttpVersion::Http11, 123, None, true, None), - Txid([0x1; 32]), - ), - "/v2/transactions".to_string(), - ), - // errors without error messages - ( - HttpResponseType::BadRequest( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Unauthorized( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::PaymentRequired( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Forbidden( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::NotFound( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServerError( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServiceUnavailable( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Error( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(0), true, None), - 502, - "".to_string(), - ), - "/v2/neighbors".to_string(), - ), - // errors with specific messages - ( - HttpResponseType::BadRequest( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Unauthorized( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::PaymentRequired( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Forbidden( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::NotFound( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServerError( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::ServiceUnavailable( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ( - HttpResponseType::Error( - HttpResponseMetadata::new(HttpVersion::Http11, 123, Some(3), true, None), - 502, - "foo".to_string(), - ), - "/v2/neighbors".to_string(), - ), - ]; - - let expected_http_preambles = vec![ - // length is known - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), - HttpContentType::JSON, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(test_block_info_bytes.len() as u32), - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some(test_microblock_info_bytes.len() as u32), - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - Some((Txid([0x1; 32]).to_hex().len() + 2) as u32), - HttpContentType::JSON, - true, - 123, - ), - // length is unknown - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::JSON, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::Bytes, - true, - 123, - ), - HttpResponsePreamble::new( - 200, - "OK".to_string(), - None, - HttpContentType::JSON, - true, - 123, - ), - // errors - HttpResponsePreamble::new_error(400, 123, None), - HttpResponsePreamble::new_error(401, 123, None), - HttpResponsePreamble::new_error(402, 123, None), - HttpResponsePreamble::new_error(403, 123, None), - HttpResponsePreamble::new_error(404, 123, None), - HttpResponsePreamble::new_error(500, 123, None), - HttpResponsePreamble::new_error(503, 123, None), - // generic error - HttpResponsePreamble::new_error(502, 123, None), - // errors with messages - HttpResponsePreamble::new_error(400, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(401, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(402, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(403, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(404, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(500, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(503, 123, Some("foo".to_string())), - HttpResponsePreamble::new_error(502, 123, Some("foo".to_string())), - ]; - - let expected_http_bodies = vec![ - // with content-length - serde_json::to_string(&test_neighbors_info) - .unwrap() - .as_bytes() - .to_vec(), - test_block_info_bytes.clone(), - test_microblock_info_bytes.clone(), - Txid([0x1; 32]).to_hex().as_bytes().to_vec(), - // with transfer-encoding: chunked - serde_json::to_string(&test_neighbors_info) - .unwrap() - .as_bytes() - .to_vec(), - test_block_info_bytes, - test_microblock_info_bytes, - Txid([0x1; 32]).to_hex().as_bytes().to_vec(), - // errors - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - // errors with messages - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - "foo".as_bytes().to_vec(), - ]; - - for ((test, request_path), (expected_http_preamble, _expected_http_body)) in - tests.iter().zip( - expected_http_preambles - .iter() - .zip(expected_http_bodies.iter()), - ) - { - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - let mut bytes = vec![]; - test_debug!("write body:\n{:?}\n", test); - - http.begin_request(HttpVersion::Http11, request_path.to_string()); - http.write_message(&mut bytes, &StacksHttpMessage::Response((*test).clone())) - .unwrap(); - - let (mut preamble, offset) = match http.read_preamble(&bytes) { - Ok((p, o)) => (p, o), - Err(e) => { - test_debug!("first 4096 bytes:\n{:?}\n", &bytes[0..].to_vec()); - test_debug!("error: {:?}", &e); - assert!(false); - unreachable!(); - } - }; - - test_debug!("read preamble of {} bytes\n{:?}\n", offset, preamble); - - test_debug!("read http body\n{:?}\n", &bytes[offset..].to_vec()); - - let (message, _total_len) = if expected_http_preamble.is_chunked() { - let (msg_opt, len) = http - .stream_payload(&preamble, &mut &bytes[offset..]) - .unwrap(); - (msg_opt.unwrap().0, len) - } else { - http.read_payload(&preamble, &bytes[offset..]).unwrap() - }; - - test_debug!("got message\n{:?}\n", &message); - - // check everything in the parsed preamble except for the extra headers - match preamble { - StacksHttpPreamble::Response(ref mut req) => { - assert_eq!(req.headers.len(), 5); - assert!(req.headers.get("access-control-allow-headers").is_some()); - assert!(req.headers.get("access-control-allow-methods").is_some()); - assert!(req.headers.get("access-control-allow-origin").is_some()); - assert!(req.headers.get("server").is_some()); - assert!(req.headers.get("date").is_some()); - req.headers.clear(); - } - StacksHttpPreamble::Request(_) => { - panic!("parsed a request"); - } - } - - assert_eq!( - preamble, - StacksHttpPreamble::Response((*expected_http_preamble).clone()) - ); - assert_eq!(message, StacksHttpMessage::Response((*test).clone())); - assert_eq!(http.num_pending(), 0); - } - } - - #[test] - fn test_http_response_type_codec_err() { - let request_paths = vec![ - "/v2/blocks/1111111111111111111111111111111111111111111111111111111111111111", - "/v2/transactions", - "/v2/neighbors", - "/v2/neighbors", - "/v2/neighbors", - ]; - let bad_request_payloads = vec![ - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 2\r\n\r\nab", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 4\r\n\r\n\"ab\"", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\n{", - "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\na", - "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/octet-stream\r\nContent-length: 2\r\n\r\n{}", - ]; - let expected_bad_request_payload_errors = vec![ - "Invalid content-type", - "Invalid txid:", - "Not enough bytes", - "Failed to parse", - "expected text/plain", - ]; - for (test, (expected_error, request_path)) in bad_request_payloads.iter().zip( - expected_bad_request_payload_errors - .iter() - .zip(request_paths), - ) { - test_debug!( - "Expect failure:\n{}\nExpected error: '{}'", - test, - expected_error - ); - - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - http.begin_request(HttpVersion::Http11, request_path.to_string()); - - let (preamble, offset) = http.read_preamble(test.as_bytes()).unwrap(); - let e = http.read_payload(&preamble, &test.as_bytes()[offset..]); - let errstr = format!("{:?}", &e); - assert!(e.is_err()); - assert!( - e.unwrap_err().to_string().find(expected_error).is_some(), - "{}", - errstr - ); - } - } - - #[test] - fn test_http_headers_too_big() { - let bad_header_value = std::iter::repeat("A") - .take(HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize) - .collect::(); - let bad_request_preamble = format!( - "GET /v2/neighbors HTTP/1.1\r\nHost: localhost:1234\r\nBad-Header: {}\r\n\r\n", - &bad_header_value - ); - let bad_response_preamble = format!("HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-ID: 123\r\nContent-Type: text/plain\r\nContent-Length: 64\r\nBad-Header: {}\r\n\r\n", &bad_header_value); - - let request_err = - HttpRequestPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let response_err = - HttpResponsePreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - let protocol_request_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let protocol_response_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - eprintln!("request_err: {:?}", &request_err); - eprintln!("response_err: {:?}", &response_err); - - eprintln!("protocol_request_err: {:?}", &protocol_request_err); - eprintln!("protocol_response_err: {:?}", &protocol_response_err); - - assert!(request_err - .to_string() - .find("Not enough bytes to form a HTTP request preamble") - .is_some()); - assert!(response_err - .to_string() - .find("Not enough bytes to form a HTTP response preamble") - .is_some()); - assert!(protocol_request_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - assert!(protocol_response_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - } - - #[test] - fn test_http_headers_too_many() { - let mut too_many_headers_list = vec![]; - for i in 0..HTTP_PREAMBLE_MAX_NUM_HEADERS { - too_many_headers_list.push(format!("H{}: {}\r\n", i + 1, i + 1)); - } - let too_many_headers = too_many_headers_list.join(""); - let bad_request_preamble = format!( - "GET /v2/neighbors HTTP/1.1\r\nHost: localhost:1234\r\n{}\r\n", - &too_many_headers - ); - let bad_response_preamble = format!("HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-ID: 123\r\nContent-Type: text/plain\r\nContent-Length: 64\r\n{}\r\n", &too_many_headers); - - let request_err = - HttpRequestPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let response_err = - HttpResponsePreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - let protocol_request_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_request_preamble.as_bytes()) - .unwrap_err(); - let protocol_response_err = - StacksHttpPreamble::consensus_deserialize(&mut bad_response_preamble.as_bytes()) - .unwrap_err(); - - eprintln!("request_err: {:?}", &request_err); - eprintln!("response_err: {:?}", &response_err); - - eprintln!("protocol_request_err: {:?}", &protocol_request_err); - eprintln!("protocol_response_err: {:?}", &protocol_response_err); - - assert!(request_err - .to_string() - .find("Failed to parse HTTP request: TooManyHeaders") - .is_some()); - assert!(response_err - .to_string() - .find("Failed to parse HTTP response: TooManyHeaders") - .is_some()); - assert!(protocol_request_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - assert!(protocol_response_err - .to_string() - .find("Failed to decode HTTP request or HTTP response") - .is_some()); - } - - #[test] - fn test_http_duplicate_concurrent_streamed_response_fails() { - // do not permit multiple in-flight chunk-encoded HTTP responses with the same request ID. - let valid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n37\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; - let invalid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n10\r\nxxxxxxxxxxxxxxxx\r\n0\r\n\r\n"; - let invalid_chunked_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n38\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; - - let mut http = StacksHttp::new("127.0.0.1:20443".parse().unwrap()); - - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(valid_neighbors_response.as_bytes()) - .unwrap(); - assert_eq!(http.num_pending(), 1); - - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // finish reading the body - let msg = http - .stream_payload( - &preamble, - &mut &valid_neighbors_response.as_bytes()[offset..], - ) - .unwrap(); - match msg { - ( - Some(( - StacksHttpMessage::Response(HttpResponseType::Neighbors(_, neighbors_data)), - _, - )), - _, - ) => assert_eq!( - neighbors_data, - RPCNeighborsInfo { - bootstrap: vec![], - sample: vec![], - inbound: vec![], - outbound: vec![] - } - ), - _ => { - error!("Got {:?}", &msg); - assert!(false); - } - } - assert_eq!(http.num_pending(), 0); - - // can read the preamble again, but only once - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(invalid_neighbors_response.as_bytes()) - .unwrap(); - assert_eq!(http.num_pending(), 1); - - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // reading a corrupt body unlocks the ability to read the preamble again - let res = http.stream_payload( - &preamble, - &mut &invalid_neighbors_response.as_bytes()[offset..], - ); - assert!(res.unwrap_err().to_string().find("JSON").is_some()); - assert_eq!(http.num_pending(), 0); - - // can read the premable again, but only once - http.begin_request(HttpVersion::Http11, "/v2/neighbors".to_string()); - let (preamble, offset) = http - .read_preamble(invalid_chunked_response.as_bytes()) - .unwrap(); - let res = http.read_preamble(valid_neighbors_response.as_bytes()); - - assert!(res.is_err()); - assert!(res.unwrap_err().to_string().find("in progress").is_some()); - - // reading a corrupt chunk stream unlocks the ability to read the preamble again - let res = http.stream_payload( - &preamble, - &mut &invalid_chunked_response.as_bytes()[offset..], - ); - assert!(res - .unwrap_err() - .to_string() - .find("Invalid chunk trailer") - .is_some()); - assert_eq!(http.num_pending(), 0); - } - - #[test] - fn test_http_request_version_keep_alive() { - let requests = vec![ - HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - true, - ), - HttpRequestPreamble::new( - HttpVersion::Http10, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - false, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - true, - ), - HttpRequestPreamble::new( - HttpVersion::Http11, - "GET".to_string(), - "/v2/info".to_string(), - "localhost".to_string(), - 8080, - false, - ), - ]; - - // (have 'connection' header?, have 'keep-alive' value?) - let requests_connection_expected = - vec![(true, true), (false, false), (false, false), (true, false)]; - - for (r, (has_connection, is_keep_alive)) in - requests.iter().zip(requests_connection_expected.iter()) - { - let mut bytes = vec![]; - r.consensus_serialize(&mut bytes).unwrap(); - let txt = String::from_utf8(bytes).unwrap(); - - eprintln!( - "has_connection: {}, is_keep_alive: {}\n{}", - *has_connection, *is_keep_alive, &txt - ); - if *has_connection { - if *is_keep_alive { - assert!(txt.find("Connection: keep-alive\r\n").is_some()); - } else { - assert!(txt.find("Connection: close\r\n").is_some()); - } - } else { - assert!(txt.find("Connection: ").is_none()); - } - } - } - - #[test] - fn test_http_response_version_keep_alive() { - // (version, explicit keep-alive?) - let responses_args = vec![ - (HttpVersion::Http10, true), - (HttpVersion::Http10, false), - (HttpVersion::Http11, true), - (HttpVersion::Http11, false), - ]; - - let mut responses = vec![]; - for res in responses_args.iter() { - let mut bytes = vec![]; - let md = HttpResponseMetadata::new(res.0.clone(), 123, None, res.1, None); - HttpResponsePreamble::new_serialized( - &mut bytes, - 200, - "OK", - None, - &HttpContentType::JSON, - 123, - |ref mut fd| keep_alive_headers(fd, &md), - ) - .unwrap(); - responses.push(String::from_utf8(bytes).unwrap()); - } - - for (response, (version, sent_keep_alive)) in responses.iter().zip(responses_args.iter()) { - test_debug!( - "version: {:?}, sent keep-alive: {}, response:\n{}", - version, - sent_keep_alive, - response - ); - match version { - HttpVersion::Http10 => { - // be explicit about Connection: with http/1.0 clients - if *sent_keep_alive { - assert!(response.find("Connection: keep-alive\r\n").is_some()); - } else { - assert!(response.find("Connection: close\r\n").is_some()); - } - } - HttpVersion::Http11 => { - if *sent_keep_alive { - // we don't send connection: keep-alive if the client is 1.1 and it didn't - // send its own connection:

is concerned + HTTP_REQUEST_ID_RESERVED + } + + fn get_message_name(&self) -> &'static str { + "StachsHttpMessage" + } +} + +/// A partially-decoded, streamed HTTP message (response) being received. +/// Internally used by StacksHttp to keep track of chunk-decoding state. +#[derive(Debug, Clone, PartialEq)] +struct StacksHttpRecvStream { + state: HttpChunkedTransferReaderState, + data: Vec, + total_consumed: usize, // number of *encoded* bytes consumed +} + +impl StacksHttpRecvStream { + pub fn new(max_size: u64) -> StacksHttpRecvStream { + StacksHttpRecvStream { + state: HttpChunkedTransferReaderState::new(max_size), + data: vec![], + total_consumed: 0, + } + } + + /// Feed data into our chunked transfer reader state. If we finish reading a stream, return + /// the decoded bytes (as Some(Vec) and the total number of encoded bytes consumed). + /// Always returns the number of bytes consumed. + pub fn consume_data( + &mut self, + fd: &mut R, + ) -> Result<(Option<(Vec, usize)>, usize), NetError> { + let mut consumed = 0; + let mut blocked = false; + while !blocked { + let mut decoded_buf = vec![0u8; CHUNK_BUF_LEN]; + let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { + Ok((0, num_consumed)) => { + trace!( + "consume_data blocked on 0 decoded bytes ({} consumed)", + num_consumed + ); + blocked = true; + (0, num_consumed) + } + Ok((num_read, num_consumed)) => (num_read, num_consumed), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut + { + trace!("consume_data blocked on read error"); + blocked = true; + (0, 0) + } else { + return Err(NetError::ReadError(e)); + } + } + }; + + consumed += consumed_pass; + if read_pass > 0 { + self.data.extend_from_slice(&decoded_buf[0..read_pass]); + } + } + + self.total_consumed += consumed; + + // did we get a message? + if self.state.is_eof() { + // reset + let message_data = mem::replace(&mut self.data, vec![]); + let total_consumed = self.total_consumed; + + self.state = HttpChunkedTransferReaderState::new(self.state.max_size); + self.total_consumed = 0; + + Ok((Some((message_data, total_consumed)), consumed)) + } else { + Ok((None, consumed)) + } + } +} + +/// Information about an in-flight request +#[derive(Debug, Clone, PartialEq)] +struct StacksHttpReplyData { + request_id: u32, + stream: StacksHttpRecvStream, +} + +/// Stacks HTTP state machine implementation, for bufferring up data. +/// One of these exists per Connection. +/// There can be at most one HTTP request in-flight (i.e. we don't do pipelining). +/// +/// This state machine gets used for both clients and servers. A client issues an HTTP request, +/// and must receive a follow-up HTTP reply (or the state machine errors out). A server receives +/// an HTTP request, and sends an HTTP reply. +#[derive(Clone)] +pub struct StacksHttp { + /// Address of peer + peer_addr: SocketAddr, + /// offset body after '\r\n\r\n' if known + body_start: Option, + /// number of preamble bytes seen so far + num_preamble_bytes: usize, + /// last 4 bytes of the preamble we've seen, just in case the \r\n\r\n straddles two calls to + /// read_preamble() + last_four_preamble_bytes: [u8; 4], + /// Incoming reply state + reply: Option, + /// Size of HTTP chunks to write + chunk_size: usize, + /// Which request handler is active. + /// This is only used if this state-machine is used by a client to issue a request and then + /// parse a reply. If instead this state-machine is used by the server to parse a request and + /// send a reply, it will be unused. + request_handler_index: Option, + /// HTTP request handlers (verb, regex, request-handler, response-handler) + request_handlers: Vec<(String, Regex, Box)>, + /// Maximum size of call arguments + pub maximum_call_argument_size: u32, + /// Maximum execution budget of a read-only call + pub read_only_call_limit: ExecutionCost, +} + +impl StacksHttp { + pub fn new(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { + let mut http = StacksHttp { + peer_addr, + body_start: None, + num_preamble_bytes: 0, + last_four_preamble_bytes: [0u8; 4], + reply: None, + chunk_size: 8192, + request_handler_index: None, + request_handlers: vec![], + maximum_call_argument_size: conn_opts.maximum_call_argument_size, + read_only_call_limit: conn_opts.read_only_call_limit.clone(), + }; + http.register_rpc_methods(); + http + } + + /// Register an API RPC endpoint + pub fn register_rpc_endpoint( + &mut self, + handler: Handler, + ) { + self.request_handlers.push(( + handler.verb().to_string(), + handler.path_regex(), + Box::new(handler), + )); + } + + /// Find the HTTP request handler to use to process the reply, given the request path. + /// Returns the index into the list of handlers + fn find_response_handler(&self, request_verb: &str, request_path: &str) -> Option { + for (i, (verb, regex, _)) in self.request_handlers.iter().enumerate() { + if request_verb != verb { + continue; + } + let _captures = if let Some(caps) = regex.captures(request_path) { + caps + } else { + continue; + }; + + return Some(i); + } + None + } + + /// Force the state machine to expect a response + #[cfg(test)] + pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { + let handler_index = self + .find_response_handler(request_verb, request_path) + .expect(&format!( + "FATAL: could not find handler for '{}' '{}'", + request_verb, request_path + )); + self.request_handler_index = Some(handler_index); + } + + /// Try to parse an inbound HTTP request using a given handler, preamble, and body + #[cfg(test)] + pub fn handle_try_parse_request( + &self, + handler: &mut dyn RPCRequestHandler, + preamble: &HttpRequestPreamble, + body: &[u8], + ) -> Result { + let (decoded_path, query) = decode_request_path(&preamble.path_and_query_str)?; + let captures = if let Some(caps) = handler.path_regex().captures(&decoded_path) { + caps + } else { + return Err(NetError::NotFoundError); + }; + + let payload = match handler.try_parse_request( + preamble, + &captures, + if query.len() > 0 { Some(&query) } else { None }, + body, + ) { + Ok(p) => p, + Err(e) => { + handler.restart(); + return Err(e.into()); + } + }; + + let request = StacksHttpRequest::new(preamble.clone(), payload); + Ok(request) + } + + /// Try to parse an inbound HTTP request, given its decoded HTTP preamble. + /// The body will be in the `fd`. + /// Returns the parsed HTTP request if successful. + pub fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + body: &[u8], + ) -> Result { + let (decoded_path, query) = decode_request_path(&preamble.path_and_query_str)?; + test_debug!("decoded_path: '{}', query: '{}'", &decoded_path, &query); + + // NOTE: This loop starts out like `find_response_handler()`, but `captures`'s lifetime is + // bound to `regex` so we can't just return it from `find_response_handler()`. Thus, it's + // duplicated here. + for (verb, regex, request) in self.request_handlers.iter_mut() { + if &preamble.verb != verb { + continue; + } + let captures = if let Some(caps) = regex.captures(&decoded_path) { + caps + } else { + continue; + }; + + let payload = match request.try_parse_request( + preamble, + &captures, + if query.len() > 0 { Some(&query) } else { None }, + body, + ) { + Ok(p) => p, + Err(e) => { + request.restart(); + return Err(e.into()); + } + }; + + info!("Handle StacksHttpRequest"; "verb" => %verb, "peer_addr" => %self.peer_addr, "path" => %decoded_path, "query" => %query); + let request = StacksHttpRequest::new(preamble.clone(), payload); + return Ok(request); + } + + test_debug!("Failed to parse '{}'", &preamble.path_and_query_str); + Err(NetError::Http(HttpError::Http( + 404, + "No such file or directory".into(), + ))) + } + + /// Parse out an HTTP response error message + pub fn try_parse_error_response( + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + if preamble.status_code < 400 || preamble.status_code > 599 { + return Err(NetError::DeserializeError( + "Inavlid response: not an error".to_string(), + )); + } + + let payload = if preamble.content_type == HttpContentType::Text { + let mut error_text = String::new(); + let mut ioc = io::Cursor::new(body); + let mut bound_fd = BoundReader::from_reader(&mut ioc, MAX_MESSAGE_LEN as u64); + bound_fd + .read_to_string(&mut error_text) + .map_err(NetError::ReadError)?; + + HttpResponsePayload::Text(error_text) + } else if preamble.content_type == HttpContentType::JSON { + let mut ioc = io::Cursor::new(body); + let mut bound_fd = BoundReader::from_reader(&mut ioc, MAX_MESSAGE_LEN as u64); + let json_val = serde_json::from_reader(&mut bound_fd).map_err(|_| { + NetError::DeserializeError("Failed to decode JSON value".to_string()) + })?; + + HttpResponsePayload::JSON(json_val) + } else { + return Err(NetError::DeserializeError(format!( + "Invalid error response: expected text/plain or application/json, got {:?}", + &preamble.content_type + ))); + }; + + Ok(StacksHttpResponse::new(preamble.clone(), payload)) + } + + /// Try to parse an inbound HTTP response, given its decoded HTTP preamble, and the HTTP + /// version and request path that had originally sent. The body will be read from `fd`. + pub fn try_parse_response( + &mut self, + request_handler_index: usize, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + if preamble.status_code >= 400 { + return Self::try_parse_error_response(preamble, body); + } + + let (_, _, parser) = self + .request_handlers + .get(request_handler_index) + .expect("FATAL: tried to use nonexistent response handler"); + let payload = parser.try_parse_response(preamble, body)?; + let response = StacksHttpResponse::new(preamble.clone(), payload); + return Ok(response); + } + + /// Handle an HTTP request by generating an HTTP response. + /// Returns Ok((preamble, contents)) on success. Note that this could be an HTTP error + /// message. + /// Returns Err(..) on failure to decode or generate the response. + pub fn try_handle_request( + &mut self, + request: StacksHttpRequest, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let (decoded_path, _) = decode_request_path(&request.preamble().path_and_query_str)?; + let response_handler_index = + if let Some(i) = self.find_response_handler(&request.preamble().verb, &decoded_path) { + i + } else { + // method not found + return StacksHttpResponse::new_error( + &request.preamble, + &HttpNotFound::new(format!( + "No such API endpoint '{} {}'", + &request.preamble().verb, + &decoded_path + )), + ) + .try_into_contents(); + }; + + let (_, _, request_handler) = self + .request_handlers + .get_mut(response_handler_index) + .expect("FATAL: request points to a nonexistent handler"); + let request_preamble = request.preamble.clone(); + let request_result = + request_handler.try_handle_request(request.preamble, request.contents, node); + request_handler.restart(); + + let (response_preamble, response_contents) = match request_result { + Ok((rp, rc)) => (rp, rc), + Err(NetError::Http(e)) => { + return StacksHttpResponse::new_error(&request_preamble, &*e.into_http_error()) + .try_into_contents() + } + Err(e) => { + warn!("Irrecoverable error when handling request"; "path" => %request_preamble.path_and_query_str, "error" => %e); + return Err(e); + } + }; + Ok((response_preamble, response_contents)) + } + + #[cfg(test)] + pub fn num_pending(&self) -> usize { + self.reply.as_ref().map(|_| 1).unwrap_or(0) + } + + /// Set up the pending response + /// Called indirectly from ProtocolFamily::read_preamble() when handling an HTTP response + /// Used for dealing with streaming data + fn set_pending(&mut self, preamble: &HttpResponsePreamble) { + self.reply = Some(StacksHttpReplyData { + request_id: preamble + .get_request_id() + .unwrap_or(HTTP_REQUEST_ID_RESERVED), + stream: StacksHttpRecvStream::new(MAX_MESSAGE_LEN as u64), + }); + } + + /// Set the preamble. This is only relevant for receiving an HTTP response to a request that we + /// already sent. It gets called from ProtocolFamily::read_preamble(). + /// + /// This method will set up this state machine to consume the message associated with this + /// premable, if the response is chunked. + fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { + match preamble { + StacksHttpPreamble::Response(ref http_response_preamble) => { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); + } + + self.set_pending(http_response_preamble); + } + } + _ => {} + } + Ok(()) + } + + /// Clear any pending response state -- i.e. due to a failed request. + fn reset(&mut self) -> () { + self.request_handler_index = None; + self.reply = None; + } + + /// Used for processing chunk-encoded streams. + /// Given the preamble and a Read, stream the bytes into a chunk-decoder. Return the decoded + /// bytes if we decode an entire stream. Always return the number of bytes consumed. + /// Returns Ok((Some(decoded bytes we got, total number of encoded bytes), number of bytes gotten in this call)) if we're done decoding. + /// Returns Ok((None, number of bytes gotten in this call)) if there's more to decode. + pub fn consume_data( + &mut self, + preamble: &HttpResponsePreamble, + fd: &mut R, + ) -> Result<(Option<(Vec, usize)>, usize), NetError> { + if !preamble.is_chunked() { + return Err(NetError::InvalidState); + } + if let Some(reply) = self.reply.as_mut() { + match reply.stream.consume_data(fd).map_err(|e| { + self.reset(); + e + })? { + (Some((byte_vec, bytes_total)), sz) => { + // done receiving + self.reply = None; + Ok((Some((byte_vec, bytes_total)), sz)) + } + res => Ok(res), + } + } else { + return Err(NetError::InvalidState); + } + } + + /// Calculate the search window for \r\n\r\n in the preamble stream. + /// + /// As we are streaming the preamble, we're looking for the pattern `\r\n\r\n`. The last four + /// bytes of the encoded preamble are always stored in `self.last_four_preamble_bytes`; this + /// gets updated as the preamble data is streamed in. So, given these last four bytes, and the + /// next chunk of data streamed in from the request (in `buf`), determine the 4-byte sequence + /// to check for `\r\n\r\n`. + /// + /// `i` is the offset into the chunk `buf` being searched. If `i < 4`, then we must check the + /// last `4 - i` bytes of `self.last_four_preamble_bytes` as well as the first `i` bytes of + /// `buf`. Otherwise, we just check `buf[i-4..i]`. + fn body_start_search_window(&self, i: usize, buf: &[u8]) -> [u8; 4] { + let window = match i { + 0 => [ + self.last_four_preamble_bytes[0], + self.last_four_preamble_bytes[1], + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + ], + 1 => [ + self.last_four_preamble_bytes[1], + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + buf[0], + ], + 2 => [ + self.last_four_preamble_bytes[2], + self.last_four_preamble_bytes[3], + buf[0], + buf[1], + ], + 3 => [self.last_four_preamble_bytes[3], buf[0], buf[1], buf[2]], + _ => [buf[i - 4], buf[i - 3], buf[i - 2], buf[i - 1]], + }; + window + } + + /// Given a fully-formed single HTTP response, parse it (used by clients). + #[cfg(test)] + pub fn parse_response( + verb: &str, + request_path: &str, + response_buf: &[u8], + ) -> Result { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + + let response_handler_index = + http.find_response_handler(verb, request_path) + .ok_or(NetError::SendError(format!( + "No such handler for '{} {}'", + verb, request_path + )))?; + http.request_handler_index = Some(response_handler_index); + + let (preamble, message_offset) = http.read_preamble(response_buf)?; + let is_chunked = match preamble { + StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), + _ => { + return Err(NetError::DeserializeError( + "Invalid HTTP message: did not get a Response preamble".to_string(), + )); + } + }; + + let mut message_bytes = &response_buf[message_offset..]; + + if is_chunked { + match http.stream_payload(&preamble, &mut message_bytes)? { + (Some((message, _)), _) => Ok(message), + (None, _) => Err(NetError::UnderflowError( + "Not enough bytes to form a streamed HTTP response".to_string(), + )), + } + } else { + let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; + Ok(message) + } + } +} + +impl ProtocolFamily for StacksHttp { + type Preamble = StacksHttpPreamble; + type Message = StacksHttpMessage; + + /// how big can a preamble get? + fn preamble_size_hint(&mut self) -> usize { + HTTP_PREAMBLE_MAX_ENCODED_SIZE as usize + } + + /// how big is this message? Might not know if we're dealing with chunked encoding. + fn payload_len(&mut self, preamble: &StacksHttpPreamble) -> Option { + match *preamble { + StacksHttpPreamble::Request(ref http_request_preamble) => { + Some(http_request_preamble.get_content_length() as usize) + } + StacksHttpPreamble::Response(ref http_response_preamble) => http_response_preamble + .content_length + .map(|len| len as usize), + } + } + + /// Read the next HTTP preamble (be it a request or a response), and return the preamble and + /// the number of bytes consumed while reading it. + fn read_preamble(&mut self, buf: &[u8]) -> Result<(StacksHttpPreamble, usize), NetError> { + // does this contain end-of-headers marker, including the last four bytes of preamble we + // saw? + if self.body_start.is_none() { + for i in 0..=buf.len() { + let window = self.body_start_search_window(i, buf); + if window == [b'\r', b'\n', b'\r', b'\n'] { + self.body_start = Some(self.num_preamble_bytes + i); + } + } + } + if self.body_start.is_none() { + // haven't found the body yet, so update `last_four_preamble_bytes` + // and report underflow + let len = buf.len(); + let last_four_preamble_bytes = self.body_start_search_window(len, buf); + self.num_preamble_bytes += len; + self.last_four_preamble_bytes = last_four_preamble_bytes; + return Err(NetError::UnderflowError( + "Not enough bytes to form HTTP preamble".into(), + )); + } + + let mut cursor = io::Cursor::new(buf); + + let preamble = { + let mut rd = + BoundReader::from_reader(&mut cursor, HTTP_PREAMBLE_MAX_ENCODED_SIZE as u64); + let preamble: StacksHttpPreamble = read_next(&mut rd)?; + preamble + }; + + let preamble_len = cursor.position() as usize; + self.set_preamble(&preamble)?; + + Ok((preamble, preamble_len)) + } + + /// Stream a payload of unknown length. Only gets called if payload_len() returns None. + /// + /// Returns Ok((Some((message, num-bytes-consumed)), num-bytes-read)) if we read enough data to + /// form a message. `num-bytes-consumed` is the number of bytes required to parse the message, + /// and `num-bytes-read` is the number of bytes read in this call. + /// + /// Returns Ok((None, num-bytes-read)) if we consumed data (i.e. `num-bytes-read` bytes), but + /// did not yet have enough of the message to parse it. The caller should try again. + /// + /// Returns Error on irrecoverable error. + fn stream_payload( + &mut self, + preamble: &StacksHttpPreamble, + fd: &mut R, + ) -> Result<(Option<(StacksHttpMessage, usize)>, usize), NetError> { + if self.payload_len(preamble).is_some() { + return Err(NetError::InvalidState); + } + match preamble { + StacksHttpPreamble::Request(_) => { + // HTTP requests can't be chunk-encoded, so this should never be reached + return Err(NetError::InvalidState); + } + StacksHttpPreamble::Response(ref http_response_preamble) => { + if !http_response_preamble.is_chunked() { + return Err(NetError::InvalidState); + } + + // sanity check -- if we're receiving a response, then we must have earlier issued + // a request. Thus, we must already know which response handler to use. + // Otherwise, someone sent us malforemd data. + if self.request_handler_index.is_none() { + self.reset(); + return Err(NetError::DeserializeError( + "Unsolicited HTTP response".to_string(), + )); + } + + // message of unknown length. Buffer up and maybe we can parse it. + let (message_bytes_opt, num_read) = + self.consume_data(http_response_preamble, fd).map_err(|e| { + self.reset(); + e + })?; + + match message_bytes_opt { + Some((message_bytes, total_bytes_consumed)) => { + // can parse! + test_debug!( + "read http response payload of {} bytes (just buffered {})", + message_bytes.len(), + num_read, + ); + + // we now know the content-length, so pass it into the parser. + let handler_index = + self.request_handler_index + .ok_or(NetError::DeserializeError( + "Unknown HTTP response handler".to_string(), + ))?; + + let parse_res = self.try_parse_response( + handler_index, + http_response_preamble, + &message_bytes[..], + ); + + // done parsing + self.reset(); + match parse_res { + Ok(data_response) => Ok(( + Some(( + StacksHttpMessage::Response(data_response), + total_bytes_consumed, + )), + num_read, + )), + Err(e) => { + info!("Failed to parse HTTP response: {:?}", &e); + Err(e) + } + } + } + None => { + // need more data + trace!( + "did not read http response payload, but buffered {}", + num_read + ); + Ok((None, num_read)) + } + } + } + } + } + + /// Parse a payload of known length. + /// Only gets called if payload_len() returns Some(...). + /// + /// Return Ok(message, num-bytes-consumed) if we decoded a message. The message will + /// have consumed `num-bytes-consumed` bytes. + /// + /// Return Err(..) if we failed to decode the message. + fn read_payload( + &mut self, + preamble: &StacksHttpPreamble, + buf: &[u8], + ) -> Result<(StacksHttpMessage, usize), NetError> { + match preamble { + StacksHttpPreamble::Request(ref http_request_preamble) => { + // all requests have a known length + let len = http_request_preamble.get_content_length() as usize; + if len > buf.len() { + return Err(NetError::InvalidState); + } + + trace!("read http request payload of {} bytes", len); + + match self.try_parse_request(http_request_preamble, &buf[0..len]) { + Ok(data_request) => Ok((StacksHttpMessage::Request(data_request), len)), + Err(NetError::Http(http_error)) => { + // convert into a response + let resp = StacksHttpResponse::new_error( + http_request_preamble, + &*http_error.into_http_error(), + ); + self.reset(); + return Ok(( + StacksHttpMessage::Error( + http_request_preamble.path_and_query_str.clone(), + resp, + ), + len, + )); + } + Err(e) => { + info!("Failed to parse HTTP request: {:?}", &e); + self.reset(); + Err(e) + } + } + } + StacksHttpPreamble::Response(ref http_response_preamble) => { + if http_response_preamble.is_chunked() { + return Err(NetError::InvalidState); + } + + // message of known length + test_debug!("read http response payload of {} bytes", buf.len(),); + + // sanity check -- if we're receiving a response, then we must have earlier issued + // a request. Thus, we must already know which response handler to use. + // Otherwise, someone sent us malformed data. + let handler_index = self.request_handler_index.ok_or_else(|| { + self.reset(); + NetError::DeserializeError("Unsolicited HTTP response".to_string()) + })?; + + let res = self.try_parse_response(handler_index, http_response_preamble, buf); + self.reset(); + res.map(|data_response| (StacksHttpMessage::Response(data_response), buf.len())) + } + } + } + + fn verify_payload_bytes( + &mut self, + _key: &StacksPublicKey, + _preamble: &StacksHttpPreamble, + _bytes: &[u8], + ) -> Result<(), NetError> { + // not defined for HTTP messages, but maybe we could add a signature header at some point + // in the future if needed. + Ok(()) + } + + /// Write out a message to `fd`. + /// + /// NOTE: If we're sending a StacksHttpMessage::Request(..), then the next preamble and payload + /// received _must be_ a StacksHttpMessage::Response(..) in response to the request. + /// If it is not, then that decode will fail. + fn write_message( + &mut self, + fd: &mut W, + message: &StacksHttpMessage, + ) -> Result<(), NetError> { + match *message { + StacksHttpMessage::Request(ref req) => { + // client cannot send more than one request in parallel + if self.request_handler_index.is_some() { + test_debug!("Have pending request already"); + return Err(NetError::InProgress); + } + + // find the response handler we'll use + let (decoded_path, _) = decode_request_path(&req.preamble().path_and_query_str)?; + let handler_index = self + .find_response_handler(&req.preamble().verb, &decoded_path) + .ok_or(NetError::SendError(format!( + "No response handler found for `{} {}`", + &req.preamble().verb, + &decoded_path + )))?; + + req.send(fd)?; + + // remember this so we'll know how to decode the response. + // The next preamble and message we'll read _must be_ a response! + self.request_handler_index = Some(handler_index); + Ok(()) + } + StacksHttpMessage::Response(ref resp) => resp.send(fd), + StacksHttpMessage::Error(_, ref resp) => resp.send(fd), + } + } +} + +impl PeerNetwork { + /// Send a (non-blocking) HTTP request to a remote peer. + /// Returns the event ID on success. + pub fn connect_or_send_http_request( + &mut self, + data_url: UrlString, + addr: SocketAddr, + request: StacksHttpRequest, + ) -> Result { + PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { + PeerNetwork::with_http(network, |ref mut network, ref mut http| { + match http.connect_http( + network_state, + network, + data_url.clone(), + addr.clone(), + Some(request.clone()), + ) { + Ok(event_id) => Ok(event_id), + Err(NetError::AlreadyConnected(event_id, _)) => { + if let (Some(ref mut convo), Some(ref mut socket)) = + http.get_conversation_and_socket(event_id) + { + convo.send_request(request)?; + HttpPeer::saturate_http_socket(socket, convo)?; + Ok(event_id) + } else { + debug!("HTTP failed to connect to {:?}, {:?}", &data_url, &addr); + Err(NetError::PeerNotConnected) + } + } + Err(e) => { + return Err(e); + } + } + }) + }) + } +} + +/// Given a raw path, decode it (i.e. if it's url-encoded) +/// Return the (decoded-path, query-string) on success +pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { + let local_url = format!("http://local{}", path); + let url = Url::parse(&local_url).map_err(|_e| { + NetError::DeserializeError("Http request path could not be parsed".to_string()) + })?; + + let decoded_path = percent_decode_str(url.path()).decode_utf8().map_err(|_e| { + NetError::DeserializeError("Http request path could not be parsed as UTF-8".to_string()) + })?; + + let query_str = url.query(); + Ok(( + decoded_path.to_string(), + query_str.unwrap_or("").to_string(), + )) +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 49498dafdf..90698e126e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -49,6 +49,7 @@ use stacks_common::codec::{ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, }; +use stacks_common::types::net::{Error as AddrError, PeerAddress, PeerHost}; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{ hex_bytes, to_hex, Hash160, Sha256Sum, DOUBLE_SHA256_ENCODED_SIZE, HASH160_ENCODED_SIZE, @@ -59,13 +60,13 @@ use stacks_common::util::secp256k1::{ use stacks_common::util::{get_epoch_time_secs, log}; use {rusqlite, serde_json, url}; -use self::dns::*; -pub use self::http::StacksHttp; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::{Error as burnchain_error, Txid}; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::coordinator::Error as coordinator_error; use crate::chainstate::stacks::db::blocks::MemPoolRejection; +use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::index::Error as marf_error; use crate::chainstate::stacks::{ Error as chainstate_error, Error as chain_error, StacksBlock, StacksBlockHeader, @@ -74,15 +75,25 @@ use crate::chainstate::stacks::{ use crate::clarity_vm::clarity::Error as clarity_error; use crate::core::mempool::*; use crate::core::{StacksEpoch, POX_REWARD_CYCLE_LENGTH}; -use crate::cost_estimates::FeeRateEstimate; +use crate::cost_estimates::metrics::CostMetric; +use crate::cost_estimates::{CostEstimator, FeeEstimator, FeeRateEstimate}; use crate::net::atlas::{Attachment, AttachmentInstance}; -use crate::net::http::HttpReservedHeader; -pub use crate::net::http::StacksBlockAcceptedData; +use crate::net::dns::*; +use crate::net::http::error::{HttpNotFound, HttpServerError}; +use crate::net::http::{ + Error as HttpErr, HttpRequestContents, HttpRequestPreamble, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, StacksHttpResponse, TipRequest, +}; +use crate::net::p2p::PeerNetwork; use crate::util_lib::bloom::{BloomFilter, BloomNodeHasher}; use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; use crate::util_lib::strings::UrlString; +/// Implements RPC API +pub mod api; /// Implements `ASEntry4` object, which is used in db.rs to store the AS number of an IP address. pub mod asn; /// Implements the Atlas network. This network uses the infrastructure created in `src/net` to @@ -106,6 +117,8 @@ pub mod db; pub mod dns; pub mod download; pub mod http; +/// Links http crate to Stacks +pub mod httpcore; pub mod inv; pub mod neighbors; pub mod p2p; @@ -119,11 +132,9 @@ pub mod relay; pub mod rpc; pub mod server; pub mod stackerdb; -pub mod stream; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; -pub use crate::net::stream::StreamCursor; #[cfg(test)] pub mod tests; @@ -222,8 +233,6 @@ pub enum Error { ClarityError(clarity_error), /// Catch-all for chainstate errors that don't map cleanly into network errors ChainstateError(String), - /// Catch-all for errors that a client should receive more information about - ClientError(ClientError), /// Coordinator hung up CoordinatorClosed, /// view of state is stale (e.g. from the sortition db) @@ -264,6 +273,10 @@ pub enum Error { StepTimeout, /// stacker DB chunk is too big StackerDBChunkTooBig(usize), + /// HTTP error + Http(HttpErr), + /// Invalid state machine state reached + InvalidState, } impl From for Error { @@ -291,33 +304,23 @@ impl From for Error { } } -/// Enum for passing data for ClientErrors -#[derive(Debug, Clone, PartialEq)] -pub enum ClientError { - /// Catch-all - Message(String), - /// 404 - NotFound(String), -} - -impl error::Error for ClientError { - fn cause(&self) -> Option<&dyn error::Error> { - None +impl From for Error { + fn from(e: HttpErr) -> Error { + Error::Http(e) } } -impl fmt::Display for ClientError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ClientError::Message(s) => write!(f, "{}", s), - ClientError::NotFound(s) => write!(f, "HTTP path not matched: {}", s), +impl From for Error { + fn from(e: AddrError) -> Error { + match e { + AddrError::DecodeError(s) => Error::DeserializeError(s), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { + match self { Error::SerializeError(ref s) => fmt::Display::fmt(s, f), Error::DeserializeError(ref s) => fmt::Display::fmt(s, f), Error::ReadError(ref io) => fmt::Display::fmt(io, f), @@ -366,7 +369,6 @@ impl fmt::Display for Error { Error::ChainstateError(ref s) => fmt::Display::fmt(s, f), Error::ClarityError(ref e) => fmt::Display::fmt(e, f), Error::MARFError(ref e) => fmt::Display::fmt(e, f), - Error::ClientError(ref e) => write!(f, "ClientError: {}", e), Error::CoordinatorClosed => write!(f, "Coordinator hung up"), Error::StaleView => write!(f, "State view is stale"), Error::ConnectionCycle => write!(f, "Tried to connect to myself"), @@ -420,6 +422,8 @@ impl fmt::Display for Error { Error::StackerDBChunkTooBig(ref sz) => { write!(f, "StackerDB chunk size is too big ({})", sz) } + Error::Http(e) => fmt::Display::fmt(&e, f), + Error::InvalidState => write!(f, "Invalid state-machine state reached"), } } } @@ -471,7 +475,6 @@ impl error::Error for Error { Error::PeerThrottled => None, Error::LookupError(ref _s) => None, Error::ChainstateError(ref _s) => None, - Error::ClientError(ref e) => Some(e), Error::ClarityError(ref e) => Some(e), Error::MARFError(ref e) => Some(e), Error::CoordinatorClosed => None, @@ -491,6 +494,8 @@ impl error::Error for Error { Error::InvalidStackerDBContract(..) => None, Error::StepTimeout => None, Error::StackerDBChunkTooBig(..) => None, + Error::Http(ref e) => Some(e), + Error::InvalidState => None, } } } @@ -555,254 +560,251 @@ impl PartialEq for Error { } } -/// A container for an IPv4 or IPv6 address. -/// Rules: -/// -- If this is an IPv6 address, the octets are in network byte order -/// -- If this is an IPv4 address, the octets must encode an IPv6-to-IPv4-mapped address -pub struct PeerAddress([u8; 16]); -impl_array_newtype!(PeerAddress, u8, 16); -impl_array_hexstring_fmt!(PeerAddress); -impl_byte_array_newtype!(PeerAddress, u8, 16); - -impl Serialize for PeerAddress { - fn serialize(&self, s: S) -> Result { - let inst = format!("{}", self.to_socketaddr(0).ip()); - s.serialize_str(inst.as_str()) - } +/// Extension trait for PeerHost to decode it from a UrlString +pub trait PeerHostExtensions { + fn try_from_url(url_str: &UrlString) -> Option; } -impl<'de> Deserialize<'de> for PeerAddress { - fn deserialize>(d: D) -> Result { - let inst = String::deserialize(d)?; - let ip = inst.parse::().map_err(de_Error::custom)?; - - Ok(PeerAddress::from_ip(&ip)) - } -} - -impl PeerAddress { - pub fn from_slice(bytes: &[u8]) -> Option { - if bytes.len() != 16 { - return None; - } - - let mut bytes16 = [0u8; 16]; - bytes16.copy_from_slice(&bytes[0..16]); - Some(PeerAddress(bytes16)) - } - - /// Is this an IPv4 address? - pub fn is_ipv4(&self) -> bool { - self.ipv4_octets().is_some() - } - - /// Get the octet representation of this peer address as an IPv4 address. - /// The last 4 bytes of the list contain the IPv4 address. - /// This method returns None if the bytes don't encode a valid IPv4-mapped address (i.e. ::ffff:0:0/96) - pub fn ipv4_octets(&self) -> Option<[u8; 4]> { - if self.0[0..12] - != [ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - ] - { - return None; - } - let mut ret = [0u8; 4]; - ret.copy_from_slice(&self.0[12..16]); - Some(ret) - } - - /// Return the bit representation of this peer address as an IPv4 address, in network byte - /// order. Return None if this is not an IPv4 address. - pub fn ipv4_bits(&self) -> Option { - let octets_opt = self.ipv4_octets(); - if octets_opt.is_none() { - return None; - } +impl PeerHostExtensions for PeerHost { + fn try_from_url(url_str: &UrlString) -> Option { + let url = match url_str.parse_to_block_url() { + Ok(url) => url, + Err(_e) => { + return None; + } + }; - let octets = octets_opt.unwrap(); - Some( - ((octets[0] as u32) << 24) - | ((octets[1] as u32) << 16) - | ((octets[2] as u32) << 8) - | (octets[3] as u32), - ) - } + let port = match url.port_or_known_default() { + Some(port) => port, + None => { + return None; + } + }; - /// Convert to SocketAddr - pub fn to_socketaddr(&self, port: u16) -> SocketAddr { - if self.is_ipv4() { - SocketAddr::new( - IpAddr::V4(Ipv4Addr::new( - self.0[12], self.0[13], self.0[14], self.0[15], - )), + match url.host() { + Some(url::Host::Domain(name)) => Some(PeerHost::DNS(name.to_string(), port)), + Some(url::Host::Ipv4(addr)) => Some(PeerHost::from_socketaddr(&SocketAddr::new( + IpAddr::V4(addr), port, - ) - } else { - let addr_words: [u16; 8] = [ - ((self.0[0] as u16) << 8) | (self.0[1] as u16), - ((self.0[2] as u16) << 8) | (self.0[3] as u16), - ((self.0[4] as u16) << 8) | (self.0[5] as u16), - ((self.0[6] as u16) << 8) | (self.0[7] as u16), - ((self.0[8] as u16) << 8) | (self.0[9] as u16), - ((self.0[10] as u16) << 8) | (self.0[11] as u16), - ((self.0[12] as u16) << 8) | (self.0[13] as u16), - ((self.0[14] as u16) << 8) | (self.0[15] as u16), - ]; - - SocketAddr::new( - IpAddr::V6(Ipv6Addr::new( - addr_words[0], - addr_words[1], - addr_words[2], - addr_words[3], - addr_words[4], - addr_words[5], - addr_words[6], - addr_words[7], - )), + ))), + Some(url::Host::Ipv6(addr)) => Some(PeerHost::from_socketaddr(&SocketAddr::new( + IpAddr::V6(addr), port, - ) - } - } - - /// Convert from socket address - pub fn from_socketaddr(addr: &SocketAddr) -> PeerAddress { - PeerAddress::from_ip(&addr.ip()) - } - - /// Convert from IP address - pub fn from_ip(addr: &IpAddr) -> PeerAddress { - match addr { - IpAddr::V4(ref addr) => { - let octets = addr.octets(); - PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - octets[0], octets[1], octets[2], octets[3], - ]) - } - IpAddr::V6(ref addr) => { - let words = addr.segments(); - PeerAddress([ - (words[0] >> 8) as u8, - (words[0] & 0xff) as u8, - (words[1] >> 8) as u8, - (words[1] & 0xff) as u8, - (words[2] >> 8) as u8, - (words[2] & 0xff) as u8, - (words[3] >> 8) as u8, - (words[3] & 0xff) as u8, - (words[4] >> 8) as u8, - (words[4] & 0xff) as u8, - (words[5] >> 8) as u8, - (words[5] & 0xff) as u8, - (words[6] >> 8) as u8, - (words[6] & 0xff) as u8, - (words[7] >> 8) as u8, - (words[7] & 0xff) as u8, - ]) - } - } - } - - /// Convert from ipv4 octets - pub fn from_ipv4(o1: u8, o2: u8, o3: u8, o4: u8) -> PeerAddress { - PeerAddress([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, o1, o2, o3, o4, - ]) - } - - /// Is this the any-network address? i.e. 0.0.0.0 (v4) or :: (v6)? - pub fn is_anynet(&self) -> bool { - self.0 == [0x00; 16] || self == &PeerAddress::from_ipv4(0, 0, 0, 0) - } - - /// Is this a private IP address? - pub fn is_in_private_range(&self) -> bool { - if self.is_ipv4() { - // 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16 - self.0[12] == 10 - || (self.0[12] == 172 && self.0[13] >= 16 && self.0[13] <= 31) - || (self.0[12] == 192 && self.0[13] == 168) - } else { - self.0[0] >= 0xfc + ))), + None => None, } } } -pub const STACKS_PUBLIC_KEY_ENCODED_SIZE: u32 = 33; - -/// supported HTTP content types -#[derive(Debug, Clone, PartialEq)] -pub enum HttpContentType { - Bytes, - Text, - JSON, +/// Runtime arguments to an RPC handler +#[derive(Default)] +pub struct RPCHandlerArgs<'a> { + /// What height at which this node will terminate (testnet only) + pub exit_at_block_height: Option, + /// What's the hash of the genesis chainstate? + pub genesis_chainstate_hash: Sha256Sum, + /// event observer for the mempool + pub event_observer: Option<&'a dyn MemPoolEventDispatcher>, + /// tx runtime cost estimator + pub cost_estimator: Option<&'a dyn CostEstimator>, + /// tx fee estimator + pub fee_estimator: Option<&'a dyn FeeEstimator>, + /// tx runtime cost metric + pub cost_metric: Option<&'a dyn CostMetric>, } -impl fmt::Display for HttpContentType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.as_str()) +impl<'a> RPCHandlerArgs<'a> { + pub fn get_estimators_ref( + &self, + ) -> Option<(&dyn CostEstimator, &dyn FeeEstimator, &dyn CostMetric)> { + match (self.cost_estimator, self.fee_estimator, self.cost_metric) { + (Some(a), Some(b), Some(c)) => Some((a, b, c)), + _ => None, + } } } -impl HttpContentType { - pub fn as_str(&self) -> &'static str { - match *self { - HttpContentType::Bytes => "application/octet-stream", - HttpContentType::Text => "text/plain", - HttpContentType::JSON => "application/json", - } - } +/// Wrapper around Stacks chainstate data that an HTTP request handler might need +pub struct StacksNodeState<'a> { + inner_network: Option<&'a mut PeerNetwork>, + inner_sortdb: Option<&'a SortitionDB>, + inner_chainstate: Option<&'a mut StacksChainState>, + inner_mempool: Option<&'a mut MemPoolDB>, + inner_rpc_args: Option<&'a RPCHandlerArgs<'a>>, + relay_message: Option, } -impl FromStr for HttpContentType { - type Err = codec_error; - - fn from_str(header: &str) -> Result { - let s = header.to_string().to_lowercase(); - if s == "application/octet-stream" { - Ok(HttpContentType::Bytes) - } else if s == "text/plain" { - Ok(HttpContentType::Text) - } else if s == "application/json" { - Ok(HttpContentType::JSON) - } else { - Err(codec_error::DeserializeError( - "Unsupported HTTP content type".to_string(), - )) - } +impl<'a> StacksNodeState<'a> { + pub fn new( + inner_network: &'a mut PeerNetwork, + inner_sortdb: &'a SortitionDB, + inner_chainstate: &'a mut StacksChainState, + inner_mempool: &'a mut MemPoolDB, + inner_rpc_args: &'a RPCHandlerArgs<'a>, + ) -> StacksNodeState<'a> { + StacksNodeState { + inner_network: Some(inner_network), + inner_sortdb: Some(inner_sortdb), + inner_chainstate: Some(inner_chainstate), + inner_mempool: Some(inner_mempool), + inner_rpc_args: Some(inner_rpc_args), + relay_message: None, + } + } + + /// Run func() with the inner state + pub fn with_node_state(&mut self, func: F) -> R + where + F: FnOnce( + &mut PeerNetwork, + &SortitionDB, + &mut StacksChainState, + &mut MemPoolDB, + &RPCHandlerArgs<'a>, + ) -> R, + { + let network = self + .inner_network + .take() + .expect("FATAL: network not restored"); + let sortdb = self + .inner_sortdb + .take() + .expect("FATAL: sortdb not restored"); + let chainstate = self + .inner_chainstate + .take() + .expect("FATAL: chainstate not restored"); + let mempool = self + .inner_mempool + .take() + .expect("FATAL: mempool not restored"); + let rpc_args = self + .inner_rpc_args + .take() + .expect("FATAL: rpc args not restored"); + + let res = func(network, sortdb, chainstate, mempool, rpc_args); + + self.inner_network = Some(network); + self.inner_sortdb = Some(sortdb); + self.inner_chainstate = Some(chainstate); + self.inner_mempool = Some(mempool); + self.inner_rpc_args = Some(rpc_args); + + res + } + + pub fn canonical_stacks_tip_height(&mut self) -> u32 { + self.with_node_state(|network, _, _, _, _| { + network.burnchain_tip.canonical_stacks_tip_height as u32 + }) } -} -/// HTTP request preamble -#[derive(Debug, Clone, PartialEq)] -pub struct HttpRequestPreamble { - pub version: HttpVersion, - pub verb: String, - pub path: String, - pub host: PeerHost, - pub content_type: Option, - pub content_length: Option, - pub keep_alive: bool, - pub headers: HashMap, -} + pub fn set_relay_message(&mut self, msg: StacksMessageType) { + self.relay_message = Some(msg); + } + + pub fn take_relay_message(&mut self) -> Option { + self.relay_message.take() + } + + /// Load up the canonical Stacks chain tip. Note that this is subject to both burn chain block + /// Stacks block availability -- different nodes with different partial replicas of the Stacks chain state + /// will return different values here. + /// + /// # Warn + /// - There is a potential race condition. If this function is loading the latest unconfirmed + /// tip, that tip may get invalidated by the time it is used in `maybe_read_only_clarity_tx`, + /// which is used to load clarity state at a particular tip (which would lead to a 404 error). + /// If this race condition occurs frequently, we can modify `maybe_read_only_clarity_tx` to + /// re-load the unconfirmed chain tip. Refer to issue #2997. + /// + /// # Inputs + /// - `tip_req` is given by the HTTP request as the optional query parameter for the chain tip + /// hash. It will be UseLatestAnchoredTip if there was no parameter given. If it is set to + /// `latest`, the parameter will be set to UseLatestUnconfirmedTip. + /// + /// Returns the requested chain tip on success. + /// If the chain tip could not be found, then it returns Err(HttpNotFound) + /// If there was an error querying the DB, then it returns Err(HttpServerError) + pub fn load_stacks_chain_tip( + &mut self, + preamble: &HttpRequestPreamble, + contents: &HttpRequestContents, + ) -> Result { + self.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let tip_req = contents.tip_request(); + match tip_req { + TipRequest::UseLatestUnconfirmedTip => { + let unconfirmed_chain_tip_opt = match &mut chainstate.unconfirmed_state { + Some(unconfirmed_state) => { + match unconfirmed_state.get_unconfirmed_state_if_exists() { + Ok(res) => res, + Err(msg) => { + return Err(StacksHttpResponse::new_error( + preamble, + &HttpNotFound::new(format!("No unconfirmed tip: {}", &msg)), + )); + } + } + } + None => None, + }; -/// HTTP response preamble -#[derive(Debug, Clone, PartialEq)] -pub struct HttpResponsePreamble { - pub status_code: u16, - pub reason: String, - pub keep_alive: bool, - pub content_length: Option, // if not given, then content will be transfer-encoed: chunked - pub content_type: HttpContentType, // required header - pub request_id: u32, // X-Request-ID - pub headers: HashMap, + if let Some(unconfirmed_chain_tip) = unconfirmed_chain_tip_opt { + Ok(unconfirmed_chain_tip) + } else { + match chainstate.get_stacks_chain_tip(sortdb) { + Ok(Some(tip)) => Ok(StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_block_hash, + )), + Ok(None) => { + return Err(StacksHttpResponse::new_error( + preamble, + &HttpNotFound::new("No such confirmed tip".to_string()), + )); + } + Err(e) => { + return Err(StacksHttpResponse::new_error( + preamble, + &HttpServerError::new(format!( + "Failed to load chain tip: {:?}", + &e + )), + )); + } + } + } + } + TipRequest::SpecificTip(tip) => Ok(tip.clone()), + TipRequest::UseLatestAnchoredTip => match chainstate.get_stacks_chain_tip(sortdb) { + Ok(Some(tip)) => Ok(StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_block_hash, + )), + Ok(None) => { + return Err(StacksHttpResponse::new_error( + preamble, + &HttpNotFound::new( + "No stacks chain tip exists at this point in time.".to_string(), + ), + )); + } + Err(e) => { + return Err(StacksHttpResponse::new_error( + preamble, + &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)), + )); + } + }, + } + }) + } } -/// Maximum size an HTTP request or response preamble can be (within reason) -pub const HTTP_PREAMBLE_MAX_ENCODED_SIZE: u32 = 4096; -pub const HTTP_PREAMBLE_MAX_NUM_HEADERS: usize = 64; +pub const STACKS_PUBLIC_KEY_ENCODED_SIZE: u32 = 33; /// P2P message preamble -- included in all p2p network messages #[derive(Debug, Clone, PartialEq)] @@ -985,17 +987,6 @@ pub struct NatPunchData { pub nonce: u32, } -define_u8_enum!(MemPoolSyncDataID { - BloomFilter = 0x01, - TxTags = 0x02 -}); - -#[derive(Debug, Clone, PartialEq)] -pub enum MemPoolSyncData { - BloomFilter(BloomFilter), - TxTags([u8; 32], Vec), -} - /// Inform the remote peer of (a page of) the list of stacker DB contracts this node supports #[derive(Debug, Clone, PartialEq)] pub struct StackerDBHandshakeData { @@ -1086,774 +1077,6 @@ pub enum StacksMessageType { StackerDBPushChunk(StackerDBPushChunkData), } -/// Peer address variants -#[derive(Clone, PartialEq)] -pub enum PeerHost { - DNS(String, u16), - IP(PeerAddress, u16), -} - -impl fmt::Display for PeerHost { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - PeerHost::DNS(ref s, ref p) => write!(f, "{}:{}", s, p), - PeerHost::IP(ref a, ref p) => write!(f, "{}", a.to_socketaddr(*p)), - } - } -} - -impl fmt::Debug for PeerHost { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - PeerHost::DNS(ref s, ref p) => write!(f, "PeerHost::DNS({},{})", s, p), - PeerHost::IP(ref a, ref p) => write!(f, "PeerHost::IP({:?},{})", a, p), - } - } -} - -impl Hash for PeerHost { - fn hash(&self, state: &mut H) { - match *self { - PeerHost::DNS(ref name, ref port) => { - "DNS".hash(state); - name.hash(state); - port.hash(state); - } - PeerHost::IP(ref addrbytes, ref port) => { - "IP".hash(state); - addrbytes.hash(state); - port.hash(state); - } - } - } -} - -impl PeerHost { - pub fn hostname(&self) -> String { - match *self { - PeerHost::DNS(ref s, _) => s.clone(), - PeerHost::IP(ref a, ref p) => format!("{}", a.to_socketaddr(*p).ip()), - } - } - - pub fn port(&self) -> u16 { - match *self { - PeerHost::DNS(_, ref p) => *p, - PeerHost::IP(_, ref p) => *p, - } - } - - pub fn from_host_port(host: String, port: u16) -> PeerHost { - // try as IP, and fall back to DNS - match host.parse::() { - Ok(addr) => PeerHost::IP(PeerAddress::from_ip(&addr), port), - Err(_) => PeerHost::DNS(host, port), - } - } - - pub fn from_socketaddr(socketaddr: &SocketAddr) -> PeerHost { - PeerHost::IP(PeerAddress::from_socketaddr(socketaddr), socketaddr.port()) - } - - pub fn try_from_url(url_str: &UrlString) -> Option { - let url = match url_str.parse_to_block_url() { - Ok(url) => url, - Err(_e) => { - return None; - } - }; - - let port = match url.port_or_known_default() { - Some(port) => port, - None => { - return None; - } - }; - - match url.host() { - Some(url::Host::Domain(name)) => Some(PeerHost::DNS(name.to_string(), port)), - Some(url::Host::Ipv4(addr)) => Some(PeerHost::from_socketaddr(&SocketAddr::new( - IpAddr::V4(addr), - port, - ))), - Some(url::Host::Ipv6(addr)) => Some(PeerHost::from_socketaddr(&SocketAddr::new( - IpAddr::V6(addr), - port, - ))), - None => None, - } - } - - pub fn to_host_port(&self) -> (String, u16) { - match *self { - PeerHost::DNS(ref s, ref p) => (s.clone(), *p), - PeerHost::IP(ref i, ref p) => (format!("{}", i.to_socketaddr(0).ip()), *p), - } - } -} - -/// Affirmation map data reported -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCAffirmationData { - pub heaviest: AffirmationMap, - pub stacks_tip: AffirmationMap, - pub sortition_tip: AffirmationMap, - pub tentative_best: AffirmationMap, -} - -/// Information about the last PoX anchor block -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCLastPoxAnchorData { - pub anchor_block_hash: BlockHeaderHash, - pub anchor_block_txid: Txid, -} - -/// The data we return on GET /v2/info -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCPeerInfoData { - pub peer_version: u32, - pub pox_consensus: ConsensusHash, - pub burn_block_height: u64, - pub stable_pox_consensus: ConsensusHash, - pub stable_burn_block_height: u64, - pub server_version: String, - pub network_id: u32, - pub parent_network_id: u32, - pub stacks_tip_height: u64, - pub stacks_tip: BlockHeaderHash, - pub stacks_tip_consensus_hash: ConsensusHash, - pub genesis_chainstate_hash: Sha256Sum, - pub unanchored_tip: Option, - pub unanchored_seq: Option, - pub exit_at_block_height: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub node_public_key: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub node_public_key_hash: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub affirmations: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub last_pox_anchor: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub stackerdbs: Option>, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCPoxCurrentCycleInfo { - pub id: u64, - pub min_threshold_ustx: u64, - pub stacked_ustx: u64, - pub is_pox_active: bool, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCPoxNextCycleInfo { - pub id: u64, - pub min_threshold_ustx: u64, - pub min_increment_ustx: u64, - pub stacked_ustx: u64, - pub prepare_phase_start_block_height: u64, - pub blocks_until_prepare_phase: i64, - pub reward_phase_start_block_height: u64, - pub blocks_until_reward_phase: u64, - pub ustx_until_pox_rejection: u64, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCPoxContractVersion { - pub contract_id: String, - pub activation_burnchain_block_height: u64, - pub first_reward_cycle_id: u64, -} - -/// The data we return on GET /v2/pox -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCPoxInfoData { - pub contract_id: String, - pub pox_activation_threshold_ustx: u64, - pub first_burnchain_block_height: u64, - pub current_burnchain_block_height: u64, - pub prepare_phase_block_length: u64, - pub reward_phase_block_length: u64, - pub reward_slots: u64, - pub rejection_fraction: u64, - pub total_liquid_supply_ustx: u64, - pub current_cycle: RPCPoxCurrentCycleInfo, - pub next_cycle: RPCPoxNextCycleInfo, - - // below are included for backwards-compatibility - pub min_amount_ustx: u64, - pub prepare_cycle_length: u64, - pub reward_cycle_id: u64, - pub reward_cycle_length: u64, - pub rejection_votes_left_required: u64, - pub next_reward_cycle_in: u64, - - // Information specific to each PoX contract version - pub contract_versions: Vec, -} - -/// Headers response payload -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ExtendedStacksHeader { - pub consensus_hash: ConsensusHash, - #[serde( - serialize_with = "ExtendedStacksHeader_StacksBlockHeader_serialize", - deserialize_with = "ExtendedStacksHeader_StacksBlockHeader_deserialize" - )] - pub header: StacksBlockHeader, - pub parent_block_id: StacksBlockId, -} - -/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string -fn ExtendedStacksHeader_StacksBlockHeader_serialize( - header: &StacksBlockHeader, - s: S, -) -> Result { - let bytes = header.serialize_to_vec(); - let header_hex = to_hex(&bytes); - s.serialize_str(&header_hex.as_str()) -} - -/// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string -fn ExtendedStacksHeader_StacksBlockHeader_deserialize<'de, D: serde::Deserializer<'de>>( - d: D, -) -> Result { - let header_hex = String::deserialize(d)?; - let header_bytes = hex_bytes(&header_hex).map_err(de_Error::custom)?; - StacksBlockHeader::consensus_deserialize(&mut &header_bytes[..]).map_err(de_Error::custom) -} - -impl StacksMessageCodec for ExtendedStacksHeader { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - write_next(fd, &self.consensus_hash)?; - write_next(fd, &self.header)?; - write_next(fd, &self.parent_block_id)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let ch = read_next(fd)?; - let bh = read_next(fd)?; - let pbid = read_next(fd)?; - Ok(ExtendedStacksHeader { - consensus_hash: ch, - header: bh, - parent_block_id: pbid, - }) - } -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCFeeEstimate { - pub fee_rate: f64, - pub fee: u64, -} - -impl RPCFeeEstimate { - pub fn estimate_fees(scalar: u64, fee_rates: FeeRateEstimate) -> Vec { - let estimated_fees_f64 = fee_rates.clone() * (scalar as f64); - vec![ - RPCFeeEstimate { - fee: estimated_fees_f64.low as u64, - fee_rate: fee_rates.low, - }, - RPCFeeEstimate { - fee: estimated_fees_f64.middle as u64, - fee_rate: fee_rates.middle, - }, - RPCFeeEstimate { - fee: estimated_fees_f64.high as u64, - fee_rate: fee_rates.high, - }, - ] - } -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCFeeEstimateResponse { - pub estimated_cost: ExecutionCost, - pub estimated_cost_scalar: u64, - pub estimations: Vec, - pub cost_scalar_change_by_byte: f64, -} - -#[derive(Debug, Clone, PartialEq, Copy, Hash)] -#[repr(u8)] -pub enum HttpVersion { - Http10 = 0x10, - Http11 = 0x11, -} - -#[derive(Debug, Clone, PartialEq, Hash)] -pub struct HttpRequestMetadata { - pub version: HttpVersion, - pub peer: PeerHost, - pub keep_alive: bool, - pub canonical_stacks_tip_height: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct DataVarResponse { - pub data: String, - #[serde(rename = "proof")] - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub marf_proof: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ConstantValResponse { - pub data: String, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MapEntryResponse { - pub data: String, - #[serde(rename = "proof")] - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub marf_proof: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ContractSrcResponse { - pub source: String, - pub publish_height: u32, - #[serde(rename = "proof")] - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub marf_proof: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct GetIsTraitImplementedResponse { - pub is_implemented: bool, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct CallReadOnlyResponse { - pub okay: bool, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub result: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub cause: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct AccountEntryResponse { - pub balance: String, - pub locked: String, - pub unlock_height: u64, - pub nonce: u64, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub balance_proof: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(default)] - pub nonce_proof: Option, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum UnconfirmedTransactionStatus { - Microblock { - block_hash: BlockHeaderHash, - seq: u16, - }, - Mempool, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct UnconfirmedTransactionResponse { - pub tx: String, - pub status: UnconfirmedTransactionStatus, -} - -#[derive(Serialize, Deserialize)] -pub struct PostTransactionRequestBody { - pub tx: String, - pub attachment: Option, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct GetAttachmentResponse { - pub attachment: Attachment, -} - -impl Serialize for GetAttachmentResponse { - fn serialize(&self, s: S) -> Result { - let hex_encoded = to_hex(&self.attachment.content[..]); - s.serialize_str(hex_encoded.as_str()) - } -} - -impl<'de> Deserialize<'de> for GetAttachmentResponse { - fn deserialize>(d: D) -> Result { - let payload = String::deserialize(d)?; - let hex_encoded = payload.parse::().map_err(de_Error::custom)?; - let bytes = hex_bytes(&hex_encoded).map_err(de_Error::custom)?; - let attachment = Attachment::new(bytes); - Ok(GetAttachmentResponse { attachment }) - } -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct GetAttachmentsInvResponse { - pub block_id: StacksBlockId, - pub pages: Vec, -} - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct AttachmentPage { - pub index: u32, - pub inventory: Vec, -} - -/// Request ID to use or expect from non-Stacks HTTP clients. -/// In particular, if a HTTP response does not contain the x-request-id header, then it's assumed -/// to be this value. This is needed to support fetching immutables like block and microblock data -/// from non-Stacks nodes (like Gaia hubs, CDNs, vanilla HTTP servers, and so on). -pub const HTTP_REQUEST_ID_RESERVED: u32 = 0; - -impl HttpRequestMetadata { - pub fn new( - host: String, - port: u16, - canonical_stacks_tip_height: Option, - ) -> HttpRequestMetadata { - HttpRequestMetadata { - version: HttpVersion::Http11, - peer: PeerHost::from_host_port(host, port), - keep_alive: true, - canonical_stacks_tip_height, - } - } - - pub fn from_host( - peer_host: PeerHost, - canonical_stacks_tip_height: Option, - ) -> HttpRequestMetadata { - HttpRequestMetadata { - version: HttpVersion::Http11, - peer: peer_host, - keep_alive: true, - canonical_stacks_tip_height, - } - } - - pub fn from_preamble(preamble: &HttpRequestPreamble) -> HttpRequestMetadata { - let mut canonical_stacks_tip_height = None; - for header in &preamble.headers { - if let Some(HttpReservedHeader::CanonicalStacksTipHeight(h)) = - HttpReservedHeader::try_from_str(&header.0, &header.1) - { - canonical_stacks_tip_height = Some(h); - break; - } - } - HttpRequestMetadata { - version: preamble.version, - peer: preamble.host.clone(), - keep_alive: preamble.keep_alive, - canonical_stacks_tip_height, - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct CallReadOnlyRequestBody { - pub sender: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub sponsor: Option, - pub arguments: Vec, -} - -#[derive(Serialize, Deserialize)] -pub struct FeeRateEstimateRequestBody { - #[serde(default)] - pub estimated_len: Option, - pub transaction_payload: String, -} - -/// Items in the NeighborsInfo -- combines NeighborKey and NeighborAddress -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCNeighbor { - pub network_id: u32, - pub peer_version: u32, - #[serde(rename = "ip")] - pub addrbytes: PeerAddress, - pub port: u16, - pub public_key_hash: Hash160, - pub authenticated: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub stackerdbs: Option>, -} - -impl RPCNeighbor { - pub fn from_neighbor_key_and_pubkh( - nk: NeighborKey, - pkh: Hash160, - auth: bool, - stackerdbs: Vec, - ) -> RPCNeighbor { - RPCNeighbor { - network_id: nk.network_id, - peer_version: nk.peer_version, - addrbytes: nk.addrbytes, - port: nk.port, - public_key_hash: pkh, - authenticated: auth, - stackerdbs: Some(stackerdbs), - } - } -} - -/// Struct given back from a call to `/v2/neighbors`. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct RPCNeighborsInfo { - pub bootstrap: Vec, - pub sample: Vec, - pub inbound: Vec, - pub outbound: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum TipRequest { - UseLatestAnchoredTip, - UseLatestUnconfirmedTip, - SpecificTip(StacksBlockId), -} - -/// All HTTP request paths we support, and the arguments they carry in their paths -#[derive(Debug, Clone, PartialEq)] -pub enum HttpRequestType { - GetInfo(HttpRequestMetadata), - GetPoxInfo(HttpRequestMetadata, TipRequest), - GetNeighbors(HttpRequestMetadata), - GetHeaders(HttpRequestMetadata, u64, TipRequest), - GetBlock(HttpRequestMetadata, StacksBlockId), - GetMicroblocksIndexed(HttpRequestMetadata, StacksBlockId), - GetMicroblocksConfirmed(HttpRequestMetadata, StacksBlockId), - GetMicroblocksUnconfirmed(HttpRequestMetadata, StacksBlockId, u16), - GetTransactionUnconfirmed(HttpRequestMetadata, Txid), - PostTransaction(HttpRequestMetadata, StacksTransaction, Option), - PostBlock(HttpRequestMetadata, ConsensusHash, StacksBlock), - PostMicroblock(HttpRequestMetadata, StacksMicroblock, TipRequest), - GetAccount(HttpRequestMetadata, PrincipalData, TipRequest, bool), - GetDataVar( - HttpRequestMetadata, - StacksAddress, - ContractName, - ClarityName, - TipRequest, - bool, - ), - GetConstantVal( - HttpRequestMetadata, - StacksAddress, - ContractName, - ClarityName, - TipRequest, - ), - GetMapEntry( - HttpRequestMetadata, - StacksAddress, - ContractName, - ClarityName, - Value, - TipRequest, - bool, - ), - FeeRateEstimate(HttpRequestMetadata, TransactionPayload, u64), - CallReadOnlyFunction( - HttpRequestMetadata, - StacksAddress, - ContractName, - PrincipalData, - Option, - ClarityName, - Vec, - TipRequest, - ), - GetTransferCost(HttpRequestMetadata), - GetContractSrc( - HttpRequestMetadata, - StacksAddress, - ContractName, - TipRequest, - bool, - ), - GetContractABI(HttpRequestMetadata, StacksAddress, ContractName, TipRequest), - OptionsPreflight(HttpRequestMetadata, String), - GetAttachment(HttpRequestMetadata, Hash160), - GetAttachmentsInv(HttpRequestMetadata, StacksBlockId, HashSet), - GetIsTraitImplemented( - HttpRequestMetadata, - StacksAddress, - ContractName, - TraitIdentifier, - TipRequest, - ), - MemPoolQuery(HttpRequestMetadata, MemPoolSyncData, Option), - /// StackerDB HTTP queries - GetStackerDBMetadata(HttpRequestMetadata, QualifiedContractIdentifier), - GetStackerDBChunk( - HttpRequestMetadata, - QualifiedContractIdentifier, - u32, - Option, - ), - PostStackerDBChunk( - HttpRequestMetadata, - QualifiedContractIdentifier, - StackerDBChunkData, - ), - /// catch-all for any errors we should surface from parsing - ClientError(HttpRequestMetadata, ClientError), -} - -/// The fields that Actually Matter to http responses -#[derive(Debug, Clone, PartialEq)] -pub struct HttpResponseMetadata { - pub client_version: HttpVersion, - pub client_keep_alive: bool, - pub request_id: u32, - pub content_length: Option, - pub canonical_stacks_tip_height: Option, -} - -impl HttpResponseMetadata { - pub fn make_request_id() -> u32 { - let mut rng = thread_rng(); - let mut request_id = HTTP_REQUEST_ID_RESERVED; - while request_id == HTTP_REQUEST_ID_RESERVED { - request_id = rng.next_u32(); - } - request_id - } - - pub fn new( - client_version: HttpVersion, - request_id: u32, - content_length: Option, - client_keep_alive: bool, - canonical_stacks_tip_height: Option, - ) -> HttpResponseMetadata { - HttpResponseMetadata { - client_version: client_version, - client_keep_alive: client_keep_alive, - request_id: request_id, - content_length: content_length, - canonical_stacks_tip_height: canonical_stacks_tip_height, - } - } - - pub fn from_preamble( - request_version: HttpVersion, - preamble: &HttpResponsePreamble, - ) -> HttpResponseMetadata { - let mut canonical_stacks_tip_height = None; - for header in &preamble.headers { - if let Some(HttpReservedHeader::CanonicalStacksTipHeight(h)) = - HttpReservedHeader::try_from_str(&header.0, &header.1) - { - canonical_stacks_tip_height = Some(h); - break; - } - } - HttpResponseMetadata { - client_version: request_version, - client_keep_alive: preamble.keep_alive, - request_id: preamble.request_id, - content_length: preamble.content_length.clone(), - canonical_stacks_tip_height: canonical_stacks_tip_height, - } - } - - pub fn empty_error() -> HttpResponseMetadata { - HttpResponseMetadata { - client_version: HttpVersion::Http11, - client_keep_alive: false, - request_id: HttpResponseMetadata::make_request_id(), - content_length: Some(0), - canonical_stacks_tip_height: None, - } - } - - fn from_http_request_type( - req: &HttpRequestType, - canonical_stacks_tip_height: Option, - ) -> HttpResponseMetadata { - let metadata = req.metadata(); - HttpResponseMetadata::new( - metadata.version, - HttpResponseMetadata::make_request_id(), - None, - metadata.keep_alive, - canonical_stacks_tip_height, - ) - } -} - -/// All data-plane message types a peer can reply with. -#[derive(Debug, Clone, PartialEq)] -pub enum HttpResponseType { - PeerInfo(HttpResponseMetadata, RPCPeerInfoData), - PoxInfo(HttpResponseMetadata, RPCPoxInfoData), - Neighbors(HttpResponseMetadata, RPCNeighborsInfo), - Headers(HttpResponseMetadata, Vec), - HeaderStream(HttpResponseMetadata), - Block(HttpResponseMetadata, StacksBlock), - BlockStream(HttpResponseMetadata), - Microblocks(HttpResponseMetadata, Vec), - MicroblockStream(HttpResponseMetadata), - TransactionID(HttpResponseMetadata, Txid), - StacksBlockAccepted(HttpResponseMetadata, StacksBlockId, bool), - MicroblockHash(HttpResponseMetadata, BlockHeaderHash), - TokenTransferCost(HttpResponseMetadata, u64), - GetDataVar(HttpResponseMetadata, DataVarResponse), - GetConstantVal(HttpResponseMetadata, ConstantValResponse), - GetMapEntry(HttpResponseMetadata, MapEntryResponse), - CallReadOnlyFunction(HttpResponseMetadata, CallReadOnlyResponse), - GetAccount(HttpResponseMetadata, AccountEntryResponse), - GetContractABI(HttpResponseMetadata, ContractInterface), - GetContractSrc(HttpResponseMetadata, ContractSrcResponse), - GetIsTraitImplemented(HttpResponseMetadata, GetIsTraitImplementedResponse), - UnconfirmedTransaction(HttpResponseMetadata, UnconfirmedTransactionResponse), - GetAttachment(HttpResponseMetadata, GetAttachmentResponse), - GetAttachmentsInv(HttpResponseMetadata, GetAttachmentsInvResponse), - MemPoolTxStream(HttpResponseMetadata), - MemPoolTxs(HttpResponseMetadata, Option, Vec), - OptionsPreflight(HttpResponseMetadata), - TransactionFeeEstimation(HttpResponseMetadata, RPCFeeEstimateResponse), - StackerDBMetadata(HttpResponseMetadata, Vec), - StackerDBChunk(HttpResponseMetadata, Vec), - StackerDBChunkAck(HttpResponseMetadata, StackerDBChunkAckData), - // peer-given error responses - BadRequest(HttpResponseMetadata, String), - BadRequestJSON(HttpResponseMetadata, serde_json::Value), - Unauthorized(HttpResponseMetadata, String), - PaymentRequired(HttpResponseMetadata, String), - Forbidden(HttpResponseMetadata, String), - NotFound(HttpResponseMetadata, String), - ServerError(HttpResponseMetadata, String), - ServiceUnavailable(HttpResponseMetadata, String), - Error(HttpResponseMetadata, u16, String), -} - -#[derive(Debug, Clone, PartialEq, Copy)] -pub enum UrlScheme { - Http, - Https, -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[repr(u8)] pub enum StacksMessageID { @@ -1895,20 +1118,6 @@ pub struct StacksMessage { pub payload: StacksMessageType, } -/// Message type for HTTP -#[derive(Debug, Clone, PartialEq)] -pub enum StacksHttpMessage { - Request(HttpRequestType), - Response(HttpResponseType), -} - -/// HTTP message preamble -#[derive(Debug, Clone, PartialEq)] -pub enum StacksHttpPreamble { - Request(HttpRequestPreamble), - Response(HttpResponsePreamble), -} - /// Network messages implement this to have multiple messages in flight. pub trait MessageSequence { fn request_id(&self) -> u32; @@ -1993,8 +1202,6 @@ pub const GETPOXINV_MAX_BITLEN: u64 = 8; // message. pub const BLOCKS_PUSHED_MAX: u32 = 32; -impl_byte_array_message_codec!(PeerAddress, 16); - /// neighbor identifier #[derive(Clone, Eq, PartialOrd, Ord)] pub struct NeighborKey { @@ -2325,7 +1532,7 @@ impl NetworkResult { pub trait Requestable: std::fmt::Display { fn get_url(&self) -> &UrlString; - fn make_request_type(&self, peer_host: PeerHost) -> HttpRequestType; + fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest; } #[cfg(test)] @@ -2394,7 +1601,6 @@ pub mod test { use crate::net::p2p::*; use crate::net::poll::*; use crate::net::relay::*; - use crate::net::rpc::RPCHandlerArgs; use crate::net::Error as net_error; use crate::util_lib::boot::boot_code_test_addr; use crate::util_lib::strings::*; @@ -3200,6 +2406,14 @@ pub mod test { let mempool = MemPoolDB::open_test(false, config.network_id, &chainstate_path).unwrap(); let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + // extract bound ports (which may be different from what's in the config file, if e.g. + // they were 0 + let p2p_port = peer_network.bound_neighbor_key().port; + let http_port = peer_network.http.as_ref().unwrap().http_server_addr.port(); + + config.server_port = p2p_port; + config.http_port = http_port; + TestPeer { config: config, network: peer_network, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 9bcb0c730c..0de77c4ff2 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -29,6 +29,7 @@ use mio::net as mio_net; use rand::prelude::*; use rand::thread_rng; use stacks_common::types::chainstate::{PoxId, SortitionId}; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; @@ -51,15 +52,16 @@ use crate::net::chat::{ConversationP2P, NeighborStats}; use crate::net::connection::{ConnectionOptions, NetworkReplyHandle, ReplyHandleP2P}; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::download::BlockDownloader; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::StacksHttpRequest; use crate::net::inv::*; use crate::net::neighbors::*; use crate::net::poll::{NetworkPollState, NetworkState}; use crate::net::prune::*; use crate::net::relay::{RelayerStats, *, *}; -use crate::net::rpc::RPCHandlerArgs; use crate::net::server::*; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBTx, StackerDBs}; -use crate::net::{Error as net_error, Neighbor, NeighborKey, PeerAddress, *}; +use crate::net::{Error as net_error, Neighbor, NeighborKey, RPCHandlerArgs, *}; use crate::util_lib::db::{DBConn, DBTx, Error as db_error}; /// inter-thread request to send a p2p message from another thread in this program. @@ -360,7 +362,11 @@ impl PeerNetwork { >, epochs: Vec, ) -> PeerNetwork { - let http = HttpPeer::new(connection_opts.clone(), 0); + let http = HttpPeer::new( + connection_opts.clone(), + 0, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0), + ); let pub_ip = connection_opts.public_ip_address.clone(); let pub_ip_learned = pub_ip.is_none(); local_peer.public_ip_address = pub_ip.clone(); @@ -523,14 +529,14 @@ impl PeerNetwork { pub fn bind(&mut self, my_addr: &SocketAddr, http_addr: &SocketAddr) -> Result<(), net_error> { let mut net = NetworkState::new(self.connection_opts.max_sockets)?; - let p2p_handle = net.bind(my_addr)?; - let http_handle = net.bind(http_addr)?; + let (p2p_handle, bound_p2p_addr) = net.bind(my_addr)?; + let (http_handle, bound_http_addr) = net.bind(http_addr)?; test_debug!( "{:?}: bound on p2p {:?}, http {:?}", &self.local_peer, - my_addr, - http_addr + bound_p2p_addr, + bound_http_addr ); self.network = Some(net); @@ -538,14 +544,14 @@ impl PeerNetwork { self.http_network_handle = http_handle; PeerNetwork::with_http(self, |_, ref mut http| { - http.set_server_handle(http_handle); + http.set_server_handle(http_handle, bound_http_addr); }); self.bind_nk = NeighborKey { network_id: self.local_peer.network_id, peer_version: self.peer_version, - addrbytes: PeerAddress::from_socketaddr(my_addr), - port: my_addr.port(), + addrbytes: PeerAddress::from_socketaddr(&bound_p2p_addr), + port: bound_p2p_addr.port(), }; Ok(()) @@ -655,6 +661,16 @@ impl PeerNetwork { &mut self.header_cache } + /// Get a ref to the AtlasDB + pub fn get_atlasdb(&self) -> &AtlasDB { + &self.atlasdb + } + + /// Get a mut ref to the AtlasDB + pub fn get_atlasdb_mut(&mut self) -> &mut AtlasDB { + &mut self.atlasdb + } + /// Count up the number of outbound StackerDB replicas we talk to, /// given the contract ID that controls it. pub fn count_outbound_stackerdb_replicas( @@ -1038,7 +1054,11 @@ impl PeerNetwork { return Err(net_error::NotConnected); } Some(ref mut network) => { - let sock = NetworkState::connect(&neighbor.addrbytes.to_socketaddr(neighbor.port))?; + let sock = NetworkState::connect( + &neighbor.addrbytes.to_socketaddr(neighbor.port), + self.connection_opts.socket_send_buffer_size, + self.connection_opts.socket_recv_buffer_size, + )?; let hint_event_id = network.next_event_id()?; let registered_event_id = network.register(self.p2p_network_handle, hint_event_id, &sock)?; @@ -2374,14 +2394,13 @@ impl PeerNetwork { &mut self, dns_client_opt: &mut Option<&mut DNSClient>, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ibd: bool, ) -> Option> { if ibd { return None; } - return match self.do_mempool_sync(dns_client_opt, mempool, chainstate) { + return match self.do_mempool_sync(dns_client_opt, mempool) { (true, txs_opt) => { // did we run to completion? if let Some(txs) = txs_opt { @@ -2719,7 +2738,6 @@ impl PeerNetwork { fn do_network_block_download( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client: &mut DNSClient, ibd: bool, @@ -2742,7 +2760,7 @@ impl PeerNetwork { mut microblocks, mut broken_http_peers, mut broken_p2p_peers, - ) = match self.download_blocks(sortdb, mempool, chainstate, dns_client, ibd) { + ) = match self.download_blocks(sortdb, chainstate, dns_client, ibd) { Ok(x) => x, Err(net_error::NotConnected) => { // there was simply nothing to do @@ -3546,26 +3564,19 @@ impl PeerNetwork { url: &UrlString, addr: &SocketAddr, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, page_id: Txid, ) -> Result<(bool, Option), net_error> { let sync_data = mempool.make_mempool_sync_data()?; - let request = HttpRequestType::MemPoolQuery( - HttpRequestMetadata::from_host( - PeerHost::from_socketaddr(addr), - Some(self.burnchain_tip.canonical_stacks_tip_height), - ), - sync_data, - Some(page_id), - ); - - let event_id = self.connect_or_send_http_request( - url.clone(), - addr.clone(), - request, - mempool, - chainstate, + let request = StacksHttpRequest::new_for_peer( + PeerHost::from_socketaddr(addr), + "POST".into(), + "/v2/mempool/query".into(), + HttpRequestContents::new() + .query_arg("page_id".into(), format!("{}", &page_id)) + .payload_stacks(&sync_data), )?; + + let event_id = self.connect_or_send_http_request(url.clone(), addr.clone(), request)?; return Ok((false, Some(event_id))); } @@ -3602,15 +3613,15 @@ impl PeerNetwork { ); return Ok((false, None, None)); } - Some(http_response) => match http_response { - HttpResponseType::MemPoolTxs(_, page_id_opt, txs) => { + Some(http_response) => match http_response.decode_mempool_txs_page() { + Ok((txs, page_id_opt)) => { debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); return Ok((true, page_id_opt, Some(txs))); } - _ => { + Err(e) => { warn!( - "{:?}: Mempool sync request received {:?}", - &network.local_peer, &http_response + "{:?}: Mempool sync request did not receive a txs page: {:?}", + &network.local_peer, &e ); return Ok((true, None, None)); } @@ -3628,7 +3639,6 @@ impl PeerNetwork { &mut self, dns_client_opt: &mut Option<&mut DNSClient>, mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> (bool, Option>) { if get_epoch_time_secs() <= self.mempool_sync_deadline { debug!( @@ -3716,13 +3726,7 @@ impl PeerNetwork { "{:?}: Mempool sync will query {} for mempool transactions at {}", &self.local_peer, url, page_id ); - match self.mempool_sync_send_query( - url, - addr, - mempool, - chainstate, - page_id.clone(), - ) { + match self.mempool_sync_send_query(url, addr, mempool, page_id.clone()) { Ok((false, Some(event_id))) => { // success! advance debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &self.local_peer, url, page_id, event_id); @@ -3810,7 +3814,6 @@ impl PeerNetwork { fn do_network_work( &mut self, sortdb: &SortitionDB, - mempool: &MemPoolDB, chainstate: &mut StacksChainState, dns_client_opt: &mut Option<&mut DNSClient>, download_backpressure: bool, @@ -4017,7 +4020,6 @@ impl PeerNetwork { Some(ref mut dns_client) => { let done = self.do_network_block_download( sortdb, - mempool, chainstate, *dns_client, ibd, @@ -4078,8 +4080,6 @@ impl PeerNetwork { fn do_attachment_downloads( &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, mut dns_client_opt: Option<&mut DNSClient>, network_result: &mut NetworkResult, ) { @@ -4104,7 +4104,7 @@ impl PeerNetwork { self, |network, attachments_downloader| { let mut dead_events = vec![]; - match attachments_downloader.run(dns_client, mempool, chainstate, network) { + match attachments_downloader.run(dns_client, network) { Ok((ref mut attachments, ref mut events_to_deregister)) => { network_result.attachments.append(attachments); dead_events.append(events_to_deregister); @@ -5412,7 +5412,6 @@ impl PeerNetwork { // an already-used network ID. let do_prune = self.do_network_work( sortdb, - mempool, chainstate, &mut dns_client_opt, download_backpressure, @@ -5440,14 +5439,12 @@ impl PeerNetwork { // In parallel, do a mempool sync. // Remember any txs we get, so we can feed them to the relayer thread. - if let Some(mut txs) = - self.do_network_mempool_sync(&mut dns_client_opt, mempool, chainstate, ibd) - { + if let Some(mut txs) = self.do_network_mempool_sync(&mut dns_client_opt, mempool, ibd) { network_result.synced_transactions.append(&mut txs); } // download attachments - self.do_attachment_downloads(mempool, chainstate, dns_client_opt, network_result); + self.do_attachment_downloads(dns_client_opt, network_result); // synchronize stacker DBs match self.run_stacker_db_sync() { @@ -5716,15 +5713,9 @@ impl PeerNetwork { PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { let http_stacks_msgs = PeerNetwork::with_http(network, |ref mut net, ref mut http| { - http.run( - network_state, - net, - sortdb, - chainstate, - mempool, - http_poll_state, - handler_args, - ) + let mut node_state = + StacksNodeState::new(net, sortdb, chainstate, mempool, handler_args); + http.run(network_state, &mut node_state, http_poll_state) }); network_result.consume_http_uploads(http_stacks_msgs); Ok(()) diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index 6b26265ec0..5941741bc1 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -22,10 +22,11 @@ use std::{io, net, time}; use mio::{net as mio_net, PollOpt, Ready, Token}; use rand::RngCore; +use stacks_common::types::net::PeerAddress; use stacks_common::util::{log, sleep_ms}; use {mio, rand}; -use crate::net::{Error as net_error, Neighbor, NeighborKey, PeerAddress}; +use crate::net::{Error as net_error, Neighbor, NeighborKey}; use crate::util_lib::db::{DBConn, Error as db_error}; const SERVER: Token = mio::Token(0); @@ -123,8 +124,8 @@ impl NetworkState { } /// Bind to the given socket address. - /// Returns the handle to the poll state, used to key network poll events. - pub fn bind(&mut self, addr: &SocketAddr) -> Result { + /// Returns the handle to the poll state and the bound address, used to key network poll events. + pub fn bind(&mut self, addr: &SocketAddr) -> Result<(usize, SocketAddr), net_error> { let server = NetworkState::bind_address(addr)?; let next_server_event = self.next_event_id()?; @@ -140,8 +141,15 @@ impl NetworkState { net_error::BindError })?; + // N.B. the port for `addr` might be 0, in which case, `local_addr` may not be equal to + // `addr` since the port will be system-assigned. Use `local_adddr`. + let local_addr = server.local_addr().map_err(|e| { + error!("Failed to get local address for server: {:?}", &e); + net_error::BindError + })?; + let network_server = NetworkServerState { - addr: addr.clone(), + addr: local_addr.clone(), server_socket: server, server_event: mio::Token(next_server_event), }; @@ -154,7 +162,7 @@ impl NetworkState { self.servers.push(network_server); self.event_map.insert(next_server_event, 0); // server events always mapped to 0 - Ok(next_server_event) + Ok((next_server_event, local_addr)) } /// Register a socket for read/write notifications with this poller. @@ -292,7 +300,11 @@ impl NetworkState { /// Connect to a remote peer, but don't register it with the poll handle. /// The underlying connect(2) is _asynchronous_, so the caller will need to register it with a /// poll handle and wait for it to be connected. - pub fn connect(addr: &SocketAddr) -> Result { + pub fn connect( + addr: &SocketAddr, + socket_send_buffer: u32, + socket_recv_buffer: u32, + ) -> Result { let stream = mio_net::TcpStream::connect(addr).map_err(|_e| { test_debug!("Failed to convert to mio stream: {:?}", &_e); net_error::ConnectionError @@ -302,14 +314,14 @@ impl NetworkState { // Don't go crazy on TIME_WAIT states; have them all die after 5 seconds stream .set_linger(Some(time::Duration::from_millis(5000))) - .map_err(|_e| { - test_debug!("Failed to set SO_LINGER: {:?}", &_e); + .map_err(|e| { + warn!("Failed to set SO_LINGER: {:?}", &e); net_error::ConnectionError })?; // Disable Nagle algorithm stream.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + warn!("Failed to set TCP_NODELAY: {:?}", &_e); net_error::ConnectionError })?; @@ -317,8 +329,8 @@ impl NetworkState { // for a while. Linux default is 7200 seconds, so make sure we keep it here. stream .set_keepalive(Some(time::Duration::from_millis(7200 * 1000))) - .map_err(|_e| { - test_debug!("Failed to set TCP_KEEPALIVE and/or SO_KEEPALIVE: {:?}", &_e); + .map_err(|e| { + warn!("Failed to set TCP_KEEPALIVE and/or SO_KEEPALIVE: {:?}", &e); net_error::ConnectionError })?; @@ -328,6 +340,26 @@ impl NetworkState { stream.set_send_buffer_size(32).unwrap(); stream.set_recv_buffer_size(32).unwrap(); } + } else { + stream + .set_send_buffer_size(socket_send_buffer as usize) + .map_err(|e| { + warn!( + "Failed to set socket write buffer size to {}: {:?}", + socket_send_buffer, &e + ); + net_error::ConnectionError + })?; + + stream + .set_recv_buffer_size(socket_recv_buffer as usize) + .map_err(|e| { + warn!( + "Failed to set socket read buffer size to {}: {:?}", + socket_send_buffer, &e + ); + net_error::ConnectionError + })?; } test_debug!("New socket connected to {:?}: {:?}", addr, &stream); @@ -458,62 +490,54 @@ mod test { fn test_bind() { let mut ns = NetworkState::new(100).unwrap(); let mut server_events = HashSet::new(); - for port in 49000..49010 { - let addr = format!("127.0.0.1:{}", &port) - .parse::() - .unwrap(); - let event_id = ns.bind(&addr).unwrap(); + for _ in 0..10 { + let addr = "127.0.0.1:0".parse::().unwrap(); + let (event_id, _local_addr) = ns.bind(&addr).unwrap(); assert!(!server_events.contains(&event_id)); server_events.insert(event_id); } } #[test] - #[ignore] fn test_register_deregister() { let mut ns = NetworkState::new(100).unwrap(); let mut server_events = vec![]; let mut event_ids = HashSet::new(); - for port in 49010..49020 { - let addr = format!("127.0.0.1:{}", &port) - .parse::() - .unwrap(); - let event_id = ns.bind(&addr).unwrap(); + let mut ports = vec![]; + for _ in 0..10 { + let addr = "127.0.0.1:0".parse::().unwrap(); + let (event_id, local_addr) = ns.bind(&addr).unwrap(); server_events.push(event_id); event_ids.insert(event_id); + + ports.push(local_addr.port()); } let mut client_events = vec![]; - for port in 49010..49020 { + for (i, port) in ports.iter().enumerate() { let addr = format!("127.0.0.1:{}", &port) .parse::() .unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); - let event_id = ns.register(server_events[port - 49010], 1, &sock).unwrap(); + let event_id = ns.register(server_events[i], 1, &sock).unwrap(); assert!(event_id != 0); assert!(!event_ids.contains(&event_id)); ns.deregister(event_id, &sock).unwrap(); - let event_id = ns - .register(server_events[port - 49010], 101, &sock) - .unwrap(); + let event_id = ns.register(server_events[i], 101, &sock).unwrap(); assert!(event_id != 0); assert!(!event_ids.contains(&event_id)); ns.deregister(event_id, &sock).unwrap(); let event_id = ns - .register( - server_events[port - 49010], - server_events[port - 49010], - &sock, - ) + .register(server_events[i], server_events[i], &sock) .unwrap(); assert!(event_id != 0); assert!(!event_ids.contains(&event_id)); ns.deregister(event_id, &sock).unwrap(); - let event_id = ns.register(server_events[port - 49010], 11, &sock).unwrap(); + let event_id = ns.register(server_events[i], 11, &sock).unwrap(); assert!(!event_ids.contains(&event_id)); event_ids.insert(event_id); @@ -521,46 +545,43 @@ mod test { } test_debug!("====="); - for port in 49010..49020 { + for (i, port) in ports.iter().enumerate() { let addr = format!("127.0.0.1:{}", &port) .parse::() .unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); // can't use non-server events assert_eq!( Err(net_error::RegisterError), - ns.register(client_events[port - 49010], port - 49010 + 1, &sock) + ns.register(client_events[i], i + 1, &sock) ); } } #[test] - #[ignore] fn test_register_too_many_peers() { let mut ns = NetworkState::new(10).unwrap(); let mut event_ids = HashSet::new(); - let addr = format!("127.0.0.1:{}", &49019) - .parse::() - .unwrap(); - let server_event_id = ns.bind(&addr).unwrap(); - - for port in 49020..49030 { + let addr = "127.0.0.1:0".parse::().unwrap(); + let (server_event_id, local_addr) = ns.bind(&addr).unwrap(); + let port = local_addr.port(); + for _ in 0..10 { let addr = format!("127.0.0.1:{}", &port) .parse::() .unwrap(); event_ids.insert(server_event_id); - let sock = NetworkState::connect(&addr).unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); // register 10 client events let event_id = ns.register(server_event_id, 11, &sock).unwrap(); assert!(!event_ids.contains(&event_id)); } - // the 21st socket should fail - let addr = "127.0.0.1:49031".parse::().unwrap(); - let sock = NetworkState::connect(&addr).unwrap(); + // the 11th socket should fail + let addr = format!("127.0.0.1:{}", port).parse::().unwrap(); + let sock = NetworkState::connect(&addr, 4096, 4096).unwrap(); let res = ns.register(server_event_id, 11, &sock); assert_eq!(Err(net_error::TooManyPeers), res); } diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 7447a87e38..b2b7ff6c32 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -20,6 +20,7 @@ use std::net::{Shutdown, SocketAddr}; use rand::prelude::*; use rand::thread_rng; +use stacks_common::types::net::PeerAddress; use stacks_common::util::{get_epoch_time_secs, log}; use crate::net::chat::NeighborStats; diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 50de37d3c3..eda4a543b9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -46,7 +46,7 @@ use crate::monitoring::update_stacks_tip_height; use crate::net::chat::*; use crate::net::connection::*; use crate::net::db::*; -use crate::net::http::*; +use crate::net::httpcore::*; use crate::net::p2p::*; use crate::net::poll::*; use crate::net::rpc::*; @@ -2472,12 +2472,14 @@ pub mod test { use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityConnection; use crate::core::*; + use crate::net::api::getinfo::RPCPeerInfoData; use crate::net::asn::*; use crate::net::chat::*; use crate::net::codec::*; use crate::net::download::test::run_get_blocks_and_microblocks; use crate::net::download::*; - use crate::net::http::*; + use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; + use crate::net::httpcore::StacksHttpMessage; use crate::net::inv::*; use crate::net::test::*; use crate::net::*; @@ -3119,10 +3121,12 @@ pub mod test { } } - fn http_rpc(peer_http: u16, request: HttpRequestType) -> Result { + fn http_rpc( + peer_http: u16, + request: StacksHttpRequest, + ) -> Result { use std::net::TcpStream; - let request_path = request.request_path(); let mut sock = TcpStream::connect( &format!("127.0.0.1:{}", peer_http) .parse::() @@ -3130,7 +3134,7 @@ pub mod test { ) .unwrap(); - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); match sock.write_all(&request_bytes) { Ok(_) => {} Err(e) => { @@ -3154,7 +3158,12 @@ pub mod test { } test_debug!("Client received {} bytes", resp.len()); - let response = StacksHttp::parse_response(&request_path, &resp).unwrap(); + let response = StacksHttp::parse_response( + &request.preamble().verb, + &request.preamble().path_and_query_str, + &resp, + ) + .unwrap(); match response { StacksHttpMessage::Response(x) => Ok(x), _ => { @@ -3311,15 +3320,16 @@ pub mod test { } fn http_get_info(http_port: u16) -> RPCPeerInfoData { - let mut request = HttpRequestMetadata::new("127.0.0.1".to_string(), http_port, None); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "GET".to_string(), + "/v2/info".to_string(), + ); request.keep_alive = false; - let getinfo = HttpRequestType::GetInfo(request); + let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); let response = http_rpc(http_port, getinfo).unwrap(); - if let HttpResponseType::PeerInfo(_, peer_info) = response { - peer_info - } else { - panic!("Did not get peer info, but got {:?}", &response); - } + let peer_info = response.decode_peer_info().unwrap(); + peer_info } fn http_post_block( @@ -3333,15 +3343,18 @@ pub mod test { block.block_hash(), http_port ); - let mut request = HttpRequestMetadata::new("127.0.0.1".to_string(), http_port, None); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/blocks".to_string(), + ); request.keep_alive = false; - let post_block = HttpRequestType::PostBlock(request, consensus_hash.clone(), block.clone()); + let post_block = + StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); + let response = http_rpc(http_port, post_block).unwrap(); - if let HttpResponseType::StacksBlockAccepted(_, _, accepted) = response { - accepted - } else { - panic!("Received {:?}, expected StacksBlockAccepted", &response); - } + let accepted = response.decode_stacks_block_accepted().unwrap(); + accepted.accepted } fn http_post_microblock( @@ -3357,17 +3370,24 @@ pub mod test { mblock.block_hash(), http_port ); - let mut request = HttpRequestMetadata::new("127.0.0.1".to_string(), http_port, None); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/microblocks".to_string(), + ); request.keep_alive = false; let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let post_microblock = - HttpRequestType::PostMicroblock(request, mblock.clone(), TipRequest::SpecificTip(tip)); + let post_microblock = StacksHttpRequest::new( + request, + HttpRequestContents::new() + .payload_stacks(mblock) + .for_specific_tip(tip), + ); + let response = http_rpc(http_port, post_microblock).unwrap(); - if let HttpResponseType::MicroblockHash(..) = response { - return true; - } else { - panic!("Received {:?}, expected MicroblockHash", &response); - } + let payload = response.get_http_payload_ok().unwrap(); + let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); + return true; } fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 699dac10c3..f66e26a71a 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -43,6 +43,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::chunked_encoding::*; use stacks_common::util::get_epoch_time_secs; @@ -50,7 +51,6 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{types, util}; -use super::{RPCPoxCurrentCycleInfo, RPCPoxNextCycleInfo}; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::{Burnchain, BurnchainView, *}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -68,65 +68,58 @@ use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::net::atlas::{AtlasDB, Attachment, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; use crate::net::connection::{ConnectionHttp, ConnectionOptions, ReplyHandleHttp}; use crate::net::db::PeerDB; -use crate::net::http::*; +use crate::net::http::{HttpRequestContents, HttpResponseContents}; +use crate::net::httpcore::{ + StacksHttp, StacksHttpMessage, StacksHttpRequest, StacksHttpResponse, HTTP_REQUEST_ID_RESERVED, +}; use crate::net::p2p::{PeerMap, PeerNetwork}; use crate::net::relay::Relayer; use crate::net::stackerdb::{StackerDBTx, StackerDBs}; -use crate::net::{ - AccountEntryResponse, AttachmentPage, BlocksData, BlocksDatum, CallReadOnlyResponse, - ClientError, ConstantValResponse, ContractSrcResponse, DataVarResponse, Error as net_error, - GetAttachmentResponse, GetAttachmentsInvResponse, GetIsTraitImplementedResponse, - HttpRequestMetadata, HttpRequestType, HttpResponseMetadata, HttpResponseType, MapEntryResponse, - MemPoolSyncData, MicroblocksData, NeighborAddress, NeighborsData, PeerAddress, PeerHost, - ProtocolFamily, RPCAffirmationData, RPCFeeEstimate, RPCFeeEstimateResponse, - RPCLastPoxAnchorData, RPCNeighbor, RPCNeighborsInfo, RPCPeerInfoData, RPCPoxContractVersion, - RPCPoxInfoData, StackerDBPushChunkData, StacksHttp, StacksHttpMessage, StacksMessageType, - StreamCursor, TipRequest, UnconfirmedTransactionResponse, UnconfirmedTransactionStatus, - UrlString, HTTP_REQUEST_ID_RESERVED, MAX_HEADERS, MAX_NEIGHBORS_DATA_LEN, -}; +use crate::net::{Error as net_error, StacksMessageType, StacksNodeState}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{DBConn, Error as db_error}; +use crate::util_lib::strings::UrlString; use crate::{monitoring, version_string}; pub const STREAM_CHUNK_SIZE: u64 = 4096; -#[derive(Default)] -pub struct RPCHandlerArgs<'a> { - pub exit_at_block_height: Option, - pub genesis_chainstate_hash: Sha256Sum, - pub event_observer: Option<&'a dyn MemPoolEventDispatcher>, - pub cost_estimator: Option<&'a dyn CostEstimator>, - pub fee_estimator: Option<&'a dyn FeeEstimator>, - pub cost_metric: Option<&'a dyn CostMetric>, -} - pub struct ConversationHttp { + /// send/receive buffering state-machine for interfacing with a non-blocking socket connection: ConnectionHttp, + /// poll ID for this struct's associated socket conn_id: usize, + /// time (in seconds) for how long an attempt to connect to a peer is allowed to take timeout: u64, + /// remote host's identifier (DNS or IP). Goes into the `Host:` header peer_host: PeerHost, + /// URL of the remote peer's data, if given outbound_url: Option, + /// remote host's IP address peer_addr: SocketAddr, + /// remote host's keep-alive setting keep_alive: bool, - total_request_count: u64, // number of messages taken from the inbox - total_reply_count: u64, // number of messages responsed to - last_request_timestamp: u64, // absolute timestamp of the last time we received at least 1 byte in a request - last_response_timestamp: u64, // absolute timestamp of the last time we sent at least 1 byte in a response - connection_time: u64, // when this converation was instantiated - - canonical_stacks_tip_height: Option, // chain tip height of the peer's Stacks blockchain - - // ongoing block streams - reply_streams: VecDeque<( - ReplyHandleHttp, - Option<(HttpChunkedTransferWriterState, StreamCursor)>, - bool, - )>, - - // our outstanding request/response to the remote peer, if any + /// number of messages consumed + total_request_count: u64, + /// number of messages sent + total_reply_count: u64, + /// absolute timestamp of the last time we recieved at least 1 byte + last_request_timestamp: u64, + /// absolute timestamp of the last time we sent at least 1 byte + last_response_timestamp: u64, + /// absolute time when this conversation was instantiated + connection_time: u64, + /// stacks canonical chain tip that this peer reported + canonical_stacks_tip_height: Option, + /// Ongoing replies + reply_streams: VecDeque<(ReplyHandleHttp, HttpResponseContents, bool)>, + /// outstanding request pending_request: Option, - pending_response: Option, - pending_error_response: Option, + /// outstanding response + pending_response: Option, + /// whether or not there's an error response pending + pending_error_response: bool, + /// how much data to buffer (i.e. the socket's send buffer size) + socket_send_buffer_size: u32, } impl fmt::Display for ConversationHttp { @@ -153,433 +146,6 @@ impl fmt::Debug for ConversationHttp { } } -impl<'a> RPCHandlerArgs<'a> { - pub fn get_estimators_ref( - &self, - ) -> Option<(&dyn CostEstimator, &dyn FeeEstimator, &dyn CostMetric)> { - match (self.cost_estimator, self.fee_estimator, self.cost_metric) { - (Some(a), Some(b), Some(c)) => Some((a, b, c)), - _ => None, - } - } -} - -impl RPCPeerInfoData { - pub fn from_network( - network: &PeerNetwork, - chainstate: &StacksChainState, - exit_at_block_height: Option, - genesis_chainstate_hash: &Sha256Sum, - ) -> RPCPeerInfoData { - let server_version = version_string( - "stacks-node", - option_env!("STACKS_NODE_VERSION") - .or(option_env!("CARGO_PKG_VERSION")) - .unwrap_or("0.0.0.0"), - ); - let (unconfirmed_tip, unconfirmed_seq) = match chainstate.unconfirmed_state { - Some(ref unconfirmed) => { - if unconfirmed.num_mined_txs() > 0 { - ( - Some(unconfirmed.unconfirmed_chain_tip.clone()), - Some(unconfirmed.last_mblock_seq), - ) - } else { - (None, None) - } - } - None => (None, None), - }; - - let public_key = StacksPublicKey::from_private(&network.local_peer.private_key); - let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); - let public_key_hash = Hash160::from_node_public_key(&public_key); - let stackerdb_contract_ids = network.get_local_peer().stacker_dbs.clone(); - - RPCPeerInfoData { - peer_version: network.burnchain.peer_version, - pox_consensus: network.burnchain_tip.consensus_hash.clone(), - burn_block_height: network.chain_view.burn_block_height, - stable_pox_consensus: network.chain_view_stable_consensus_hash.clone(), - stable_burn_block_height: network.chain_view.burn_stable_block_height, - server_version, - network_id: network.local_peer.network_id, - parent_network_id: network.local_peer.parent_network_id, - stacks_tip_height: network.burnchain_tip.canonical_stacks_tip_height, - stacks_tip: network.burnchain_tip.canonical_stacks_tip_hash.clone(), - stacks_tip_consensus_hash: network - .burnchain_tip - .canonical_stacks_tip_consensus_hash - .clone(), - unanchored_tip: unconfirmed_tip, - unanchored_seq: unconfirmed_seq, - exit_at_block_height: exit_at_block_height, - genesis_chainstate_hash: genesis_chainstate_hash.clone(), - node_public_key: Some(public_key_buf), - node_public_key_hash: Some(public_key_hash), - affirmations: Some(RPCAffirmationData { - heaviest: network.heaviest_affirmation_map.clone(), - stacks_tip: network.stacks_tip_affirmation_map.clone(), - sortition_tip: network.sortition_tip_affirmation_map.clone(), - tentative_best: network.tentative_best_affirmation_map.clone(), - }), - last_pox_anchor: Some(RPCLastPoxAnchorData { - anchor_block_hash: network.last_anchor_block_hash.clone(), - anchor_block_txid: network.last_anchor_block_txid.clone(), - }), - stackerdbs: Some( - stackerdb_contract_ids - .into_iter() - .map(|cid| format!("{}", cid)) - .collect(), - ), - } - } -} - -impl RPCPoxInfoData { - pub fn from_db( - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - burnchain: &Burnchain, - ) -> Result { - let mainnet = chainstate.mainnet; - let chain_id = chainstate.chain_id; - let current_burn_height = - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?.block_height; - - let pox_contract_name = burnchain - .pox_constants - .active_pox_contract(current_burn_height); - - let contract_identifier = boot_code_id(pox_contract_name, mainnet); - let function = "get-pox-info"; - let cost_track = LimitedCostTracker::new_free(); - let sender = PrincipalData::Standard(StandardPrincipalData::transient()); - - debug!( - "Active PoX contract is '{}' (current_burn_height = {}, v1_unlock_height = {}", - &contract_identifier, current_burn_height, burnchain.pox_constants.v1_unlock_height - ); - - // Note: should always be 0 unless somehow configured to start later - let pox_1_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.first_block_height as u64) - .ok_or(net_error::ChainstateError( - "PoX-1 first reward cycle begins before first burn block height".to_string(), - ))?; - - let pox_2_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) - .ok_or(net_error::ChainstateError( - "PoX-2 first reward cycle begins before first burn block height".to_string(), - ))? - + 1; - - let pox_3_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) - .ok_or(net_error::ChainstateError( - "PoX-3 first reward cycle begins before first burn block height".to_string(), - ))? - + 1; - - let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - sender, - None, - cost_track, - |env| env.execute_contract(&contract_identifier, function, &vec![], true), - ) - }) - .map_err(|_| net_error::NotFoundError)?; - - let res = match data { - Some(Ok(res)) => res.expect_result_ok().expect_tuple(), - _ => return Err(net_error::DBError(db_error::NotFoundError)), - }; - - let first_burnchain_block_height = res - .get("first-burnchain-block-height") - .expect(&format!("FATAL: no 'first-burnchain-block-height'")) - .to_owned() - .expect_u128() as u64; - - let min_stacking_increment_ustx = res - .get("min-amount-ustx") - .expect(&format!("FATAL: no 'min-amount-ustx'")) - .to_owned() - .expect_u128() as u64; - - let prepare_cycle_length = res - .get("prepare-cycle-length") - .expect(&format!("FATAL: no 'prepare-cycle-length'")) - .to_owned() - .expect_u128() as u64; - - let rejection_fraction = res - .get("rejection-fraction") - .expect(&format!("FATAL: no 'rejection-fraction'")) - .to_owned() - .expect_u128() as u64; - - let reward_cycle_id = res - .get("reward-cycle-id") - .expect(&format!("FATAL: no 'reward-cycle-id'")) - .to_owned() - .expect_u128() as u64; - - let reward_cycle_length = res - .get("reward-cycle-length") - .expect(&format!("FATAL: no 'reward-cycle-length'")) - .to_owned() - .expect_u128() as u64; - - let current_rejection_votes = res - .get("current-rejection-votes") - .expect(&format!("FATAL: no 'current-rejection-votes'")) - .to_owned() - .expect_u128() as u64; - - let total_liquid_supply_ustx = res - .get("total-liquid-supply-ustx") - .expect(&format!("FATAL: no 'total-liquid-supply-ustx'")) - .to_owned() - .expect_u128() as u64; - - let total_required = (total_liquid_supply_ustx as u128 / 100) - .checked_mul(rejection_fraction as u128) - .ok_or_else(|| net_error::DBError(db_error::Overflow))? - as u64; - - let rejection_votes_left_required = total_required.saturating_sub(current_rejection_votes); - - let burnchain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - - let pox_consts = &burnchain.pox_constants; - - if prepare_cycle_length != pox_consts.prepare_length as u64 { - error!( - "PoX Constants in config mismatched with PoX contract constants: {} != {}", - prepare_cycle_length, pox_consts.prepare_length - ); - return Err(net_error::DBError(db_error::Corruption)); - } - - if reward_cycle_length != pox_consts.reward_cycle_length as u64 { - error!( - "PoX Constants in config mismatched with PoX contract constants: {} != {}", - reward_cycle_length, pox_consts.reward_cycle_length - ); - return Err(net_error::DBError(db_error::Corruption)); - } - - let effective_height = burnchain_tip.block_height - first_burnchain_block_height; - let next_reward_cycle_in = reward_cycle_length - (effective_height % reward_cycle_length); - - let next_rewards_start = burnchain_tip.block_height + next_reward_cycle_in; - let next_prepare_phase_start = next_rewards_start - prepare_cycle_length; - - let next_prepare_phase_in = i64::try_from(next_prepare_phase_start) - .map_err(|_| net_error::ChainstateError("Burn block height overflowed i64".into()))? - - i64::try_from(burnchain_tip.block_height).map_err(|_| { - net_error::ChainstateError("Burn block height overflowed i64".into()) - })?; - - let cur_block_pox_contract = pox_consts.active_pox_contract(burnchain_tip.block_height); - let cur_cycle_pox_contract = - pox_consts.active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id)); - let next_cycle_pox_contract = pox_consts - .active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id + 1)); - - let cur_cycle_stacked_ustx = chainstate.get_total_ustx_stacked( - &sortdb, - tip, - reward_cycle_id as u128, - cur_cycle_pox_contract, - )?; - let next_cycle_stacked_ustx = - // next_cycle_pox_contract might not be instantiated yet - match chainstate.get_total_ustx_stacked( - &sortdb, - tip, - reward_cycle_id as u128 + 1, - next_cycle_pox_contract, - ) { - Ok(ustx) => ustx, - Err(chain_error::ClarityError(_)) => { - // contract not instantiated yet - 0 - } - Err(e) => { - return Err(e.into()); - } - }; - - let reward_slots = pox_consts.reward_slots() as u64; - - let cur_cycle_threshold = StacksChainState::get_threshold_from_participation( - total_liquid_supply_ustx as u128, - cur_cycle_stacked_ustx, - reward_slots as u128, - ) as u64; - - let next_threshold = StacksChainState::get_threshold_from_participation( - total_liquid_supply_ustx as u128, - next_cycle_stacked_ustx, - reward_slots as u128, - ) as u64; - - let pox_activation_threshold_ustx = (total_liquid_supply_ustx as u128) - .checked_mul(pox_consts.pox_participation_threshold_pct as u128) - .map(|x| x / 100) - .ok_or_else(|| net_error::DBError(db_error::Overflow))? - as u64; - - let cur_cycle_pox_active = sortdb.is_pox_active(burnchain, &burnchain_tip)?; - - Ok(RPCPoxInfoData { - contract_id: boot_code_id(cur_block_pox_contract, chainstate.mainnet).to_string(), - pox_activation_threshold_ustx, - first_burnchain_block_height, - current_burnchain_block_height: burnchain_tip.block_height, - prepare_phase_block_length: prepare_cycle_length, - reward_phase_block_length: reward_cycle_length - prepare_cycle_length, - reward_slots, - rejection_fraction, - total_liquid_supply_ustx, - current_cycle: RPCPoxCurrentCycleInfo { - id: reward_cycle_id, - min_threshold_ustx: cur_cycle_threshold, - stacked_ustx: cur_cycle_stacked_ustx as u64, - is_pox_active: cur_cycle_pox_active, - }, - next_cycle: RPCPoxNextCycleInfo { - id: reward_cycle_id + 1, - min_threshold_ustx: next_threshold, - min_increment_ustx: min_stacking_increment_ustx, - stacked_ustx: next_cycle_stacked_ustx as u64, - prepare_phase_start_block_height: next_prepare_phase_start, - blocks_until_prepare_phase: next_prepare_phase_in, - reward_phase_start_block_height: next_rewards_start, - blocks_until_reward_phase: next_reward_cycle_in, - ustx_until_pox_rejection: rejection_votes_left_required, - }, - min_amount_ustx: next_threshold, - prepare_cycle_length, - reward_cycle_id, - reward_cycle_length, - rejection_votes_left_required, - next_reward_cycle_in, - contract_versions: vec![ - RPCPoxContractVersion { - contract_id: boot_code_id(POX_1_NAME, chainstate.mainnet).to_string(), - activation_burnchain_block_height: burnchain.first_block_height, - first_reward_cycle_id: pox_1_first_cycle, - }, - RPCPoxContractVersion { - contract_id: boot_code_id(POX_2_NAME, chainstate.mainnet).to_string(), - activation_burnchain_block_height: burnchain.pox_constants.v1_unlock_height - as u64, - first_reward_cycle_id: pox_2_first_cycle, - }, - RPCPoxContractVersion { - contract_id: boot_code_id(POX_3_NAME, chainstate.mainnet).to_string(), - activation_burnchain_block_height: burnchain - .pox_constants - .pox_3_activation_height - as u64, - first_reward_cycle_id: pox_3_first_cycle, - }, - ], - }) - } -} - -impl RPCNeighborsInfo { - /// Load neighbor address information from the peer network - pub fn from_p2p( - network_id: u32, - network_epoch: u8, - max_neighbor_age: u64, - peers: &PeerMap, - chain_view: &BurnchainView, - peerdb: &PeerDB, - ) -> Result { - let bootstrap_nodes = - PeerDB::get_bootstrap_peers(peerdb.conn(), network_id).map_err(net_error::DBError)?; - let bootstrap = bootstrap_nodes - .into_iter() - .map(|n| { - let stackerdb_contract_ids = peerdb.get_peer_stacker_dbs(&n).unwrap_or(vec![]); - RPCNeighbor::from_neighbor_key_and_pubkh( - n.addr.clone(), - Hash160::from_node_public_key(&n.public_key), - true, - stackerdb_contract_ids, - ) - }) - .collect(); - - let neighbor_sample = PeerDB::get_fresh_random_neighbors( - peerdb.conn(), - network_id, - network_epoch, - max_neighbor_age, - MAX_NEIGHBORS_DATA_LEN, - chain_view.burn_block_height, - false, - ) - .map_err(net_error::DBError)?; - - let sample: Vec = neighbor_sample - .into_iter() - .map(|n| { - let stackerdb_contract_ids = peerdb.get_peer_stacker_dbs(&n).unwrap_or(vec![]); - RPCNeighbor::from_neighbor_key_and_pubkh( - n.addr.clone(), - Hash160::from_node_public_key(&n.public_key), - true, - stackerdb_contract_ids, - ) - }) - .collect(); - - let mut inbound = vec![]; - let mut outbound = vec![]; - for (_, convo) in peers.iter() { - let nk = convo.to_neighbor_key(); - let naddr = convo.to_neighbor_address(); - if convo.is_outbound() { - outbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( - nk, - naddr.public_key_hash, - convo.is_authenticated(), - convo.get_stackerdb_contract_ids().to_vec(), - )); - } else { - inbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( - nk, - naddr.public_key_hash, - convo.is_authenticated(), - convo.get_stackerdb_contract_ids().to_vec(), - )); - } - } - - Ok(RPCNeighborsInfo { - bootstrap, - sample, - inbound, - outbound, - }) - } -} - impl ConversationHttp { pub fn new( peer_addr: SocketAddr, @@ -587,9 +153,9 @@ impl ConversationHttp { peer_host: PeerHost, conn_opts: &ConnectionOptions, conn_id: usize, + socket_send_buffer_size: u32, ) -> ConversationHttp { - let mut stacks_http = StacksHttp::new(peer_addr.clone()); - stacks_http.maximum_call_argument_size = conn_opts.maximum_call_argument_size; + let stacks_http = StacksHttp::new(peer_addr.clone(), conn_opts); ConversationHttp { connection: ConnectionHttp::new(stacks_http, conn_opts, None), conn_id: conn_id, @@ -601,12 +167,13 @@ impl ConversationHttp { canonical_stacks_tip_height: None, pending_request: None, pending_response: None, - pending_error_response: None, + pending_error_response: false, keep_alive: true, total_request_count: 0, total_reply_count: 0, last_request_timestamp: 0, last_response_timestamp: 0, + socket_send_buffer_size, connection_time: get_epoch_time_secs(), } } @@ -633,7 +200,7 @@ impl ConversationHttp { /// Start a HTTP request from this peer, and expect a response. /// Returns the request handle; does not set the handle into this connection. - fn start_request(&mut self, req: HttpRequestType) -> Result { + fn start_request(&mut self, req: StacksHttpRequest) -> Result { test_debug!( "{:?},id={}: Start HTTP request {:?}", &self.peer_host, @@ -653,7 +220,7 @@ impl ConversationHttp { /// Start a HTTP request from this peer, and expect a response. /// Non-blocking. /// Only one request in-flight is allowed. - pub fn send_request(&mut self, req: HttpRequestType) -> Result<(), net_error> { + pub fn send_request(&mut self, req: StacksHttpRequest) -> Result<(), net_error> { if self.is_request_inflight() { test_debug!( "{:?},id={}: Request in progress still", @@ -662,7 +229,7 @@ impl ConversationHttp { ); return Err(net_error::InProgress); } - if self.pending_error_response.is_some() { + if self.pending_error_response { test_debug!( "{:?},id={}: Error response is inflight", &self.peer_host, @@ -679,12 +246,8 @@ impl ConversationHttp { } /// Send a HTTP error response. - /// Discontinues and disables sending a non-error response - pub fn reply_error( - &mut self, - fd: &mut W, - res: HttpResponseType, - ) -> Result<(), net_error> { + /// Discontinues and disables sending a non-error response. + pub fn reply_error(&mut self, res: StacksHttpResponse) -> Result<(), net_error> { if self.is_request_inflight() || self.pending_response.is_some() { test_debug!( "{:?},id={}: Request or response is already in progress", @@ -693,6861 +256,398 @@ impl ConversationHttp { ); return Err(net_error::InProgress); } - if self.pending_error_response.is_some() { + if self.pending_error_response { // error already in-flight return Ok(()); } - res.send(&mut self.connection.protocol, fd)?; + let (preamble, body_contents) = res.try_into_contents()?; - let reply = self.connection.make_relay_handle(self.conn_id)?; + // make the relay handle. There may not have been a valid request in the first place, so + // we'll use a relay handle (not a reply handle) to push out the error. + let mut reply = self.connection.make_relay_handle(self.conn_id)?; - self.pending_error_response = Some(res); - self.reply_streams.push_back((reply, None, false)); + // queue up the HTTP headers, and then stream back the body. + preamble.consensus_serialize(&mut reply)?; + self.reply_streams.push_back((reply, body_contents, false)); + self.pending_error_response = true; Ok(()) } - /// Handle a GET peer info. - /// The response will be synchronously written to the given fd (so use a fd that can buffer!) - fn handle_getinfo( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - network: &PeerNetwork, - chainstate: &StacksChainState, - handler_args: &RPCHandlerArgs, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let pi = RPCPeerInfoData::from_network( - network, - chainstate, - handler_args.exit_at_block_height.clone(), - &handler_args.genesis_chainstate_hash, - ); - let response = HttpResponseType::PeerInfo(response_metadata, pi); - response.send(http, fd) - } + /// Handle an external HTTP request. + /// Returns a StacksMessageType option -- it's Some(...) if we need to forward a message to the + /// peer network (like a transaction or a block or microblock) + pub fn handle_request( + &mut self, + req: StacksHttpRequest, + node: &mut StacksNodeState, + ) -> Result, net_error> { + // NOTE: This may set node.relay_message + let keep_alive = req.preamble().keep_alive; + let (mut response_preamble, response_body) = + self.connection.protocol.try_handle_request(req, node)?; - /// Handle a GET pox info. - /// The response will be synchronously written to the given fd (so use a fd that can buffer!) - fn handle_getpoxinfo( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - burnchain: &Burnchain, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); + let mut reply = self.connection.make_relay_handle(self.conn_id)?; + let relay_msg_opt = node.take_relay_message(); - match RPCPoxInfoData::from_db(sortdb, chainstate, tip, burnchain) { - Ok(pi) => { - let response = HttpResponseType::PoxInfo(response_metadata, pi); - response.send(http, fd) - } - Err(net_error::NotFoundError) => { - debug!("Chain tip not found during get PoX info: {:?}", req); - let response = HttpResponseType::NotFound( - response_metadata, - "Failed to find chain tip".to_string(), - ); - response.send(http, fd) - } - Err(e) => { - warn!("Failed to get PoX info {:?}: {:?}", req, &e); - let response = HttpResponseType::ServerError( - response_metadata, - "Failed to query peer info".to_string(), - ); - response.send(http, fd) - } - } - } + // make sure content-length is properly set, based on how we're about to stream data back + response_preamble.content_length = response_body.content_length(); - fn handle_getattachmentsinv( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - atlasdb: &AtlasDB, - index_block_hash: &StacksBlockId, - pages_indexes: &HashSet, - _options: &ConnectionOptions, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - // We are receiving a list of page indexes with a chain tip hash. - // The amount of pages_indexes is capped by MAX_ATTACHMENT_INV_PAGES_PER_REQUEST (8) - // Pages sizes are controlled by the constant ATTACHMENTS_INV_PAGE_SIZE (8), which - // means that a `GET v2/attachments/inv` request can be requesting for a 64 bit vector - // at once. - // Since clients can be asking for non-consecutive pages indexes (1, 5_000, 10_000, ...), - // we will be handling each page index separately. - // We could also add the notion of "budget" so that a client could only get a limited number - // of pages when they are spanning over many blocks. - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - if pages_indexes.len() > MAX_ATTACHMENT_INV_PAGES_PER_REQUEST { - let msg = format!( - "Number of attachment inv pages is limited by {} per request", - MAX_ATTACHMENT_INV_PAGES_PER_REQUEST - ); - warn!("{}", msg); - let response = HttpResponseType::BadRequest(response_metadata, msg); - response.send(http, fd)?; - return Ok(()); - } - if pages_indexes.len() == 0 { - let msg = format!("Page indexes missing"); - warn!("{}", msg); - let response = HttpResponseType::NotFound(response_metadata, msg.clone()); - response.send(http, fd)?; - return Ok(()); - } + // buffer up response headers into the reply handle + response_preamble.consensus_serialize(&mut reply)?; + self.reply_streams + .push_back((reply, response_body, keep_alive)); + Ok(relay_msg_opt) + } - let mut pages_indexes = pages_indexes.iter().map(|i| *i).collect::>(); - pages_indexes.sort(); + /// Make progress on outbound requests. + fn send_outbound_responses(&mut self) -> Result<(), net_error> { + // send out streamed responses in the order they were requested + let mut drained_handle = false; + let mut drained_stream = false; + let mut broken = false; + let mut do_keep_alive = true; - let mut pages = vec![]; + test_debug!( + "{:?}: {} HTTP replies pending", + &self, + self.reply_streams.len() + ); + let _self_str = format!("{}", &self); - for page_index in pages_indexes.iter() { - match atlasdb.get_attachments_available_at_page_index(*page_index, &index_block_hash) { - Ok(inventory) => { - pages.push(AttachmentPage { - inventory, - index: *page_index, - }); + if let Some((ref mut reply, ref mut http_response, ref keep_alive)) = + self.reply_streams.front_mut() + { + do_keep_alive = *keep_alive; + + while !drained_stream { + // write out the last-generated data into the write-end of the reply handle's pipe + if let Some(pipe_fd) = reply.inner_pipe_out() { + let num_written = http_response.pipe_out(pipe_fd)?; + if num_written == 0 { + // no more chunks + drained_stream = true; + } + test_debug!("{}: Wrote {} bytes", &_self_str, num_written); + if (pipe_fd.pending() as u32) >= self.socket_send_buffer_size { + // we've written more data than can be dumped into the socket buffer, so + // we're good to go for now -- we'll get an edge trigger next time the data + // drains from this socket. + break; + } + } else { + test_debug!("{}: No inner pipe", &_self_str); + drained_stream = true; + } + } + + if !drained_stream { + // Consume data from the read-end of the reply-handle's pipe and try to drain it into + // the socket. Note that this merely fills the socket buffer; the read-end may still + // have pending data after this call (which will need to be drained into the + // socket by a subsequent call to `try_flush()` -- i.e. on the next pass of the + // event loop). + // + // The `false` parameter means that the handle should be able to continue to receive + // more data from the write-end (i.e. the request handler's streamer instance) even if + // all data gets drained to the socket buffer on flush. + match reply.try_flush_ex(false) { + Ok(res) => { + test_debug!("{}: Streamed reply is drained?: {}", &_self_str, res); + drained_handle = res; + } + Err(e) => { + // dead + warn!("{}: Broken HTTP connection: {:?}", &_self_str, &e); + broken = true; + } } - Err(e) => { - let msg = format!("Unable to read Atlas DB - {}", e); - warn!("{}", msg); - let response = HttpResponseType::NotFound(response_metadata, msg); - return response.send(http, fd); + } else { + // If we're actually done sending data, then try to flush the reply handle without + // expecting more data to be written to the write-end of this reply handle's pipe. + // Then, once all bufferred data gets drained to the socket, we can drop this request. + match reply.try_flush() { + Ok(res) => { + test_debug!("{}: Streamed reply is drained?: {}", &_self_str, res); + drained_handle = res; + } + Err(e) => { + // dead + warn!("{}: Broken HTTP connection: {:?}", &_self_str, &e); + broken = true; + } } } } - let content = GetAttachmentsInvResponse { - block_id: index_block_hash.clone(), - pages, - }; - let response = HttpResponseType::GetAttachmentsInv(response_metadata, content); - response.send(http, fd) - } + test_debug!( + "broken = {}, drained_handle = {}, drained_stream = {}", + broken, + drained_handle, + drained_stream + ); + if broken || (drained_handle && drained_stream) { + // done with this stream + test_debug!( + "{:?}: done with stream (broken={}, drained_handle={}, drained_stream={})", + &self, + broken, + drained_handle, + drained_stream + ); + self.total_reply_count += 1; + self.reply_streams.pop_front(); - fn handle_getattachment( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - atlasdb: &mut AtlasDB, - content_hash: Hash160, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - match atlasdb.find_attachment(&content_hash) { - Ok(Some(attachment)) => { - let content = GetAttachmentResponse { attachment }; - let response = HttpResponseType::GetAttachment(response_metadata, content); - response.send(http, fd) - } - _ => { - let msg = format!("Unable to find attachment"); - warn!("{}", msg); - let response = HttpResponseType::NotFound(response_metadata, msg); - response.send(http, fd) + if !do_keep_alive { + // encountered "Connection: close" + self.keep_alive = false; } } + Ok(()) } - /// Handle a GET neighbors - /// The response will be synchronously written to the given fd (so use a fd that can buffer!) - fn handle_getneighbors( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - network: &PeerNetwork, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let epoch = network.get_current_epoch(); - - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let neighbor_data = RPCNeighborsInfo::from_p2p( - network.local_peer.network_id, - epoch.network_epoch, - network.connection_opts.max_neighbor_age, - &network.peers, - &network.chain_view, - &network.peerdb, - )?; - let response = HttpResponseType::Neighbors(response_metadata, neighbor_data); - response.send(http, fd) - } - - /// Handle a not-found - fn handle_notfound( - http: &mut StacksHttp, - fd: &mut W, - response_metadata: HttpResponseMetadata, - msg: String, - ) -> Result, net_error> { - let response = HttpResponseType::NotFound(response_metadata, msg); - return response.send(http, fd).and_then(|_| Ok(None)); - } - - /// Handle a server error - fn handle_server_error( - http: &mut StacksHttp, - fd: &mut W, - response_metadata: HttpResponseMetadata, - msg: String, - ) -> Result, net_error> { - // oops - warn!("{}", &msg); - let response = HttpResponseType::ServerError(response_metadata, msg); - return response.send(http, fd).and_then(|_| Ok(None)); - } - - /// Handle a GET headers. Start streaming the reply. - /// The response's preamble (but not the headers list) will be synchronously written to the fd - /// (so use a fd that can buffer!) - /// Return a StreamCursor struct for the reward cycle we're sending, so we can continue to - /// make progress sending it - fn handle_getheaders( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - tip: &StacksBlockId, - quantity: u64, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - if quantity > (MAX_HEADERS as u64) { - // bad request - let response = HttpResponseType::BadRequestJSON( - response_metadata, - serde_json::Value::String(format!( - "Invalid request: requested more than {} headers", - MAX_HEADERS - )), - ); - response.send(http, fd).and_then(|_| Ok(None)) - } else { - let stream = match StreamCursor::new_headers(chainstate, tip, quantity as u32) { - Ok(stream) => stream, - Err(chain_error::NoSuchBlockError) => { - return ConversationHttp::handle_notfound( - http, - fd, - response_metadata, - format!("No such block {:?}", &tip), - ); - } - Err(e) => { - // nope -- error trying to check - warn!("Failed to load block header {:?}: {:?}", req, &e); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to query block header {}", tip.to_hex()), - ); - return response.send(http, fd).and_then(|_| Ok(None)); + /// Try to move pending bytes into and out of the reply handle. + /// If we finish doing so, then extract the StacksHttpResponse + /// If we are not done yet, then return Ok(reply-handle) if we can try again, or net_error if + /// we cannot. + fn try_send_recv_response( + req: ReplyHandleHttp, + ) -> Result> { + match req.try_send_recv() { + Ok(message) => match message { + StacksHttpMessage::Request(_) => { + warn!("Received response: not a HTTP response"); + return Err(Err(net_error::InvalidMessage)); } - }; - let response = HttpResponseType::HeaderStream(response_metadata); - response.send(http, fd).and_then(|_| Ok(Some(stream))) + StacksHttpMessage::Response(http_response) => Ok(http_response), + StacksHttpMessage::Error(_, http_response) => Ok(http_response), + }, + Err(res) => Err(res), } } - /// Handle a GET block. Start streaming the reply. - /// The response's preamble (but not the block data) will be synchronously written to the fd - /// (so use a fd that can buffer!) - /// Return a StreamCursor struct for the block that we're sending, so we can continue to - /// make progress sending it. - fn handle_getblock( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - index_block_hash: &StacksBlockId, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - monitoring::increment_stx_blocks_served_counter(); - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); + /// Make progress on our request/response + fn recv_inbound_response(&mut self) -> Result<(), net_error> { + // make progress on our pending request (if it exists). + let in_progress = self.pending_request.is_some(); + let is_pending = self.pending_response.is_none(); - // do we have this block? - match StacksChainState::has_block_indexed(&chainstate.blocks_path, index_block_hash) { - Ok(false) => { - return ConversationHttp::handle_notfound( - http, - fd, - response_metadata, - format!("No such block {}", index_block_hash.to_hex()), - ); - } - Err(e) => { - // nope -- error trying to check - warn!("Failed to serve block {:?}: {:?}", req, &e); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to query block {}", index_block_hash.to_hex()), - ); - response.send(http, fd).and_then(|_| Ok(None)) - } - Ok(true) => { - // yup! start streaming it back - let stream = StreamCursor::new_block(index_block_hash.clone()); - let response = HttpResponseType::BlockStream(response_metadata); - response.send(http, fd).and_then(|_| Ok(Some(stream))) - } - } - } + let pending_request = self.pending_request.take(); + let response = match pending_request { + None => Ok(self.pending_response.take()), + Some(req) => match Self::try_send_recv_response(req) { + Ok(response) => Ok(Some(response)), + Err(res) => match res { + Ok(handle) => { + // try again + self.pending_request = Some(handle); + Ok(self.pending_response.take()) + } + Err(e) => Err(e), + }, + }, + }?; - /// Handle a GET confirmed microblock stream, by _anchor block hash_. Start streaming the reply. - /// The response's preamble (but not the block data) will be synchronously written to the fd - /// (so use a fd that can buffer!) - /// Return a StreamCursor struct for the block that we're sending, so we can continue to - /// make progress sending it. - fn handle_getmicroblocks_confirmed( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - index_anchor_block_hash: &StacksBlockId, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - monitoring::increment_stx_confirmed_micro_blocks_served_counter(); - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); + self.pending_response = response; - match chainstate.has_processed_microblocks(index_anchor_block_hash) { - Ok(true) => {} - Ok(false) => { - return ConversationHttp::handle_notfound( - http, - fd, - response_metadata, - format!( - "No such confirmed microblock stream for anchor block {}", - &index_anchor_block_hash - ), - ); - } - Err(e) => { - return ConversationHttp::handle_server_error( - http, - fd, - response_metadata, - format!( - "Failed to query confirmed microblock stream {:?}: {:?}", - req, &e - ), - ); - } + if in_progress && self.pending_request.is_none() { + test_debug!( + "{:?},id={}: HTTP request finished", + &self.peer_host, + self.conn_id + ); } - match chainstate.get_confirmed_microblock_index_hash(index_anchor_block_hash) { - Err(e) => { - return ConversationHttp::handle_server_error( - http, - fd, - response_metadata, - format!( - "Failed to serve confirmed microblock stream {:?}: {:?}", - req, &e - ), - ); - } - Ok(None) => { - return ConversationHttp::handle_notfound( - http, - fd, - response_metadata, - format!( - "No such confirmed microblock stream for anchor block {}", - &index_anchor_block_hash - ), - ); - } - Ok(Some(tail_index_microblock_hash)) => { - let (response, stream_opt) = match StreamCursor::new_microblock_confirmed( - chainstate, - tail_index_microblock_hash.clone(), - ) { - Ok(stream) => ( - HttpResponseType::MicroblockStream(response_metadata), - Some(stream), - ), - Err(chain_error::NoSuchBlockError) => ( - HttpResponseType::NotFound( - response_metadata, - format!( - "No such confirmed microblock stream ending with {}", - tail_index_microblock_hash.to_hex() - ), - ), - None, - ), - Err(_e) => { - debug!( - "Failed to load confirmed microblock stream {}: {:?}", - &tail_index_microblock_hash, &_e - ); - ( - HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to query confirmed microblock stream {}", - tail_index_microblock_hash.to_hex() - ), - ), - None, - ) - } - }; - response.send(http, fd).and_then(|_| Ok(stream_opt)) - } + if is_pending && self.pending_response.is_some() { + test_debug!( + "{:?},id={}: HTTP response finished", + &self.peer_host, + self.conn_id + ); } - } - /// Handle a GET confirmed microblock stream, by last _index microblock hash_ in the stream. Start streaming the reply. - /// The response's preamble (but not the block data) will be synchronously written to the fd - /// (so use a fd that can buffer!) - /// Return a StreamCursor struct for the block that we're sending, so we can continue to - /// make progress sending it. - fn handle_getmicroblocks_indexed( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - tail_index_microblock_hash: &StacksBlockId, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - monitoring::increment_stx_micro_blocks_served_counter(); - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - // do we have this processed microblock stream? - match StacksChainState::has_processed_microblocks_indexed( - chainstate.db(), - tail_index_microblock_hash, - ) { - Ok(false) => { - // nope - return ConversationHttp::handle_notfound( - http, - fd, - response_metadata, - format!( - "No such confirmed microblock stream ending with {}", - &tail_index_microblock_hash - ), - ); - } - Err(e) => { - // nope - return ConversationHttp::handle_server_error( - http, - fd, - response_metadata, - format!( - "Failed to serve confirmed microblock stream {:?}: {:?}", - req, &e - ), - ); - } - Ok(true) => { - // yup! start streaming it back - let (response, stream_opt) = match StreamCursor::new_microblock_confirmed( - chainstate, - tail_index_microblock_hash.clone(), - ) { - Ok(stream) => ( - HttpResponseType::MicroblockStream(response_metadata), - Some(stream), - ), - Err(chain_error::NoSuchBlockError) => ( - HttpResponseType::NotFound( - response_metadata, - format!( - "No such confirmed microblock stream ending with {}", - tail_index_microblock_hash.to_hex() - ), - ), - None, - ), - Err(_e) => { - debug!( - "Failed to load confirmed indexed microblock stream {}: {:?}", - &tail_index_microblock_hash, &_e - ); - ( - HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to query confirmed microblock stream {}", - tail_index_microblock_hash.to_hex() - ), - ), - None, - ) - } - }; - response.send(http, fd).and_then(|_| Ok(stream_opt)) - } - } + Ok(()) } - /// Handle a GET token transfer cost. Reply the entire response. - /// TODO: accurately estimate the cost/length fee for token transfers, based on mempool - /// pressure. - fn handle_token_transfer_cost( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); + /// Try to get our response + pub fn try_get_response(&mut self) -> Option { + self.pending_response.take() + } - // todo -- need to actually estimate the cost / length for token transfers - // right now, it just uses the minimum. - let fee = MINIMUM_TX_FEE_RATE_PER_BYTE; - let response = HttpResponseType::TokenTransferCost(response_metadata, fee); - response.send(http, fd).map(|_| ()) + /// Make progress on in-flight messages. + pub fn try_flush(&mut self) -> Result<(), net_error> { + self.send_outbound_responses()?; + self.recv_inbound_response()?; + Ok(()) } - /// Handle a GET on an existing account, given the current chain tip. Optionally supplies a - /// MARF proof for each account detail loaded from the chain tip. - fn handle_get_account_entry( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - account: &PrincipalData, - with_proof: bool, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_account_balance(&account); - let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; - let v1_unlock_height = clarity_db.get_v1_unlock_height(); - let v2_unlock_height = clarity_db.get_v2_unlock_height(); - let (balance, balance_proof) = if with_proof { - clarity_db - .get_with_proof::(&key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) - } else { - clarity_db - .get::(&key) - .map(|a| (a, None)) - .unwrap_or_else(|| (STXBalance::zero(), None)) - }; + /// Is the connection idle? + pub fn is_idle(&self) -> bool { + self.pending_response.is_none() + && self.connection.inbox_len() == 0 + && self.connection.outbox_len() == 0 + && self.reply_streams.len() == 0 + } - let key = ClarityDatabase::make_key_for_account_nonce(&account); - let (nonce, nonce_proof) = if with_proof { - clarity_db - .get_with_proof(&key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (0, Some("".into()))) - } else { - clarity_db - .get(&key) - .map(|a| (a, None)) - .unwrap_or_else(|| (0, None)) - }; + /// Is the conversation out of pending data? + /// Don't consider it drained if we haven't received anything yet + pub fn is_drained(&self) -> bool { + ((self.total_request_count > 0 && self.total_reply_count > 0) + || self.pending_error_response) + && self.is_idle() + } - let unlocked = balance.get_available_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - ); - let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - ); + /// Should the connection be kept alive even if drained? + pub fn is_keep_alive(&self) -> bool { + self.keep_alive + } - let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); - let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + /// When was the last time we got an inbound request? + pub fn get_last_request_time(&self) -> u64 { + self.last_request_timestamp + } - AccountEntryResponse { - balance, - locked, - unlock_height, - nonce, - balance_proof, - nonce_proof, - } - }) - }) { - Ok(Some(data)) => HttpResponseType::GetAccount(response_metadata, data), - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) - } - }; + /// When was the last time we sent data as part of an outbound response? + pub fn get_last_response_time(&self) -> u64 { + self.last_response_timestamp + } - response.send(http, fd).map(|_| ()) + /// When was this converation conencted? + pub fn get_connection_time(&self) -> u64 { + self.connection_time } - /// Handle a GET on a smart contract's data var, given the current chain tip. Optionally - /// supplies a MARF proof for the value. - fn handle_get_data_var( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - var_name: &ClarityName, - with_proof: bool, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); - - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_trip( - &contract_identifier, - StoreType::Variable, - var_name, - ); - - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_with_proof(&key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - clarity_db.get(&key).map(|a| (a, None))? - }; - - let data = format!("0x{}", value_hex); - Some(DataVarResponse { data, marf_proof }) - }) - }) { - Ok(Some(Some(data))) => HttpResponseType::GetDataVar(response_metadata, data), - Ok(Some(None)) => { - HttpResponseType::NotFound(response_metadata, "Data var not found".into()) - } - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) - } - }; - - response.send(http, fd).map(|_| ()) - } - - fn handle_get_constant_val( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - constant_name: &ClarityName, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); - - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let contract = clarity_db.get_contract(&contract_identifier).ok()?; - - let cst = contract - .contract_context - .lookup_variable(constant_name)? - .serialize_to_hex(); - - let data = format!("0x{cst}"); - Some(ConstantValResponse { data }) - }) - }) { - Ok(Some(Some(data))) => HttpResponseType::GetConstantVal(response_metadata, data), - Ok(Some(None)) => { - HttpResponseType::NotFound(response_metadata, "Constant not found".into()) - } - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) - } - }; - - response.send(http, fd).map(|_| ()) - } - - /// Handle a GET on a smart contract's data map, given the current chain tip. Optionally - /// supplies a MARF proof for the value. - fn handle_get_map_entry( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - map_name: &ClarityName, - key: &Value, - with_proof: bool, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); + /// Make progress on in-flight requests and replies. + /// Returns the list of messages we'll need to forward to the peer network + pub fn chat( + &mut self, + node: &mut StacksNodeState, + ) -> Result, net_error> { + // if we have an in-flight error, then don't take any more requests. + if self.pending_error_response { + return Ok(vec![]); + } - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_data_map_entry( - &contract_identifier, - map_name, - key, - ); - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_with_proof(&key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none().serialize_to_hex(), Some("".into())) - }) - } else { - clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none().serialize_to_hex(), None) - }) - }; + // handle in-bound HTTP request(s) + let num_inbound = self.connection.inbox_len(); + let mut ret = vec![]; + test_debug!("{:?}: {} HTTP requests pending", &self, num_inbound); - let data = format!("0x{}", value_hex); - MapEntryResponse { data, marf_proof } - }) - }) { - Ok(Some(data)) => HttpResponseType::GetMapEntry(response_metadata, data), - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) + for _i in 0..num_inbound { + let msg = match self.connection.next_inbox_message() { + None => { + continue; } + Some(m) => m, }; - response.send(http, fd).map(|_| ()) - } - - /// Handle a POST to run a read-only function call with the given parameters on the given chain - /// tip. Returns the result of the function call. Returns a CallReadOnlyResponse on success. - fn handle_readonly_function_call( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - function: &ClarityName, - sender: &PrincipalData, - sponsor: Option<&PrincipalData>, - args: &[Value], - options: &ConnectionOptions, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); - - let args: Vec<_> = args - .iter() - .map(|x| SymbolicExpression::atom_value(x.clone())) - .collect(); - let mainnet = chainstate.mainnet; - let chain_id = chainstate.chain_id; - let mut cost_limit = options.read_only_call_limit.clone(); - cost_limit.write_length = 0; - cost_limit.write_count = 0; - - let data_opt_res = - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - let cost_track = clarity_tx - .with_clarity_db_readonly(|clarity_db| { - LimitedCostTracker::new_mid_block( - mainnet, chain_id, cost_limit, clarity_db, epoch, - ) - }) - .map_err(|_| { - ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) - })?; - - let clarity_version = clarity_tx - .with_analysis_db_readonly(|analysis_db| { - analysis_db.get_clarity_version(&contract_identifier) - }) - .map_err(|_| { - ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( - "{}", - &contract_identifier - ))) + match msg { + StacksHttpMessage::Request(req) => { + // new request that we can handle + self.total_request_count += 1; + self.last_request_timestamp = get_epoch_time_secs(); + let start_time = Instant::now(); + let path = req.request_path().to_string(); + let msg_opt = monitoring::instrument_http_request_handler(req, |req| { + self.handle_request(req, node) })?; - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - clarity_version, - sender.clone(), - sponsor.cloned(), - cost_track, - |env| { - // we want to execute any function as long as no actual writes are made as - // opposed to be limited to purely calling `define-read-only` functions, - // so use `read_only = false`. This broadens the number of functions that - // can be called, and also circumvents limitations on `define-read-only` - // functions that can not use `contrac-call?`, even when calling other - // read-only functions - env.execute_contract(&contract_identifier, function.as_str(), &args, false) - }, - ) - }); - - let response = match data_opt_res { - Ok(Some(Ok(data))) => HttpResponseType::CallReadOnlyFunction( - response_metadata, - CallReadOnlyResponse { - okay: true, - result: Some(format!("0x{}", data.serialize_to_hex())), - cause: None, - }, - ), - Ok(Some(Err(e))) => match e { - Unchecked(CheckErrors::CostBalanceExceeded(actual_cost, _)) - if actual_cost.write_count > 0 => - { - HttpResponseType::CallReadOnlyFunction( - response_metadata, - CallReadOnlyResponse { - okay: false, - result: None, - cause: Some("NotReadOnly".to_string()), - }, - ) - } - _ => HttpResponseType::CallReadOnlyFunction( - response_metadata, - CallReadOnlyResponse { - okay: false, - result: None, - cause: Some(e.to_string()), - }, - ), - }, - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) - } - }; - response.send(http, fd).map(|_| ()) - } - - /// Handle a GET to fetch a contract's source code, given the chain tip. Optionally returns a - /// MARF proof as well. - fn handle_get_contract_src( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - with_proof: bool, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); - - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let source = db.get_contract_src(&contract_identifier)?; - let contract_commit_key = make_contract_hash_key(&contract_identifier); - let (contract_commit, proof) = if with_proof { - db.get_with_proof::(&contract_commit_key) - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .expect("BUG: obtained source, but couldn't get contract commit") - } else { - db.get::(&contract_commit_key) - .map(|a| (a, None)) - .expect("BUG: obtained source, but couldn't get contract commit") - }; - - let publish_height = contract_commit.block_height; - Some(ContractSrcResponse { - source, - publish_height, - marf_proof: proof, - }) - }) - }) { - Ok(Some(Some(data))) => HttpResponseType::GetContractSrc(response_metadata, data), - Ok(Some(None)) => HttpResponseType::NotFound( - response_metadata, - "No contract source data found".into(), - ), - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) - } - }; - - response.send(http, fd).map(|_| ()) - } - - /// Handle a GET to fetch whether or not a contract implements a certain trait - fn handle_get_is_trait_implemented( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - trait_id: &TraitIdentifier, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); + debug!("Processed HTTPRequest"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let analysis = db.load_contract_analysis(&contract_identifier)?; - if analysis.implemented_traits.contains(trait_id) { - Some(GetIsTraitImplementedResponse { - is_implemented: true, - }) - } else { - let trait_defining_contract = - db.load_contract_analysis(&trait_id.contract_identifier)?; - let trait_definition = - trait_defining_contract.get_defined_trait(&trait_id.name)?; - let is_implemented = analysis - .check_trait_compliance( - &db.get_clarity_epoch_version(), - trait_id, - trait_definition, - ) - .is_ok(); - Some(GetIsTraitImplementedResponse { is_implemented }) + if let Some(msg) = msg_opt { + ret.push(msg); } - }) - }) { - Ok(Some(Some(data))) => { - HttpResponseType::GetIsTraitImplemented(response_metadata, data) - } - Ok(Some(None)) => HttpResponseType::NotFound( - response_metadata, - "No contract analysis found or trait definition not found".into(), - ), - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) } - }; - - response.send(http, fd).map(|_| ()) - } - - /// Handle a GET to fetch a contract's analysis data, given the chain tip. Note that this isn't - /// something that's anchored to the blockchain, and can be different across different versions - /// of Stacks -- callers must trust the Stacks node to return correct analysis data. - /// Callers who don't trust the Stacks node should just fetch the contract source - /// code and analyze it offline. - fn handle_get_contract_abi( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - tip: &StacksBlockId, - contract_addr: &StacksAddress, - contract_name: &ContractName, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let contract_identifier = - QualifiedContractIdentifier::new(contract_addr.clone().into(), contract_name.clone()); + StacksHttpMessage::Error(path, resp) => { + // new request, but resulted in an error when parsing it + self.total_request_count += 1; + self.last_request_timestamp = get_epoch_time_secs(); + let start_time = Instant::now(); + self.reply_error(resp)?; - let response = - match chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - clarity_tx.with_analysis_db_readonly(|db| { - let contract = db.load_contract(&contract_identifier, &epoch)?; - contract.contract_interface - }) - }) { - Ok(Some(Some(data))) => HttpResponseType::GetContractABI(response_metadata, data), - Ok(Some(None)) => HttpResponseType::NotFound( - response_metadata, - "No contract interface data found".into(), - ), - Ok(None) | Err(_) => { - HttpResponseType::NotFound(response_metadata, "Chain tip not found".into()) + debug!("Processed HTTPRequest Error"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); } - }; - - response.send(http, fd).map(|_| ()) - } - - /// Handle a GET unconfirmed microblock stream. Start streaming the reply. - /// The response's preamble (but not the block data) will be synchronously written to the fd - /// (so use a fd that can buffer!) - /// Return a StreamCursor struct for the block that we're sending, so we can continue to - /// make progress sending it. - fn handle_getmicroblocks_unconfirmed( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - index_anchor_block_hash: &StacksBlockId, - min_seq: u16, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - // do we have this unconfirmed microblock stream? - match chainstate.has_any_staging_microblock_indexed(index_anchor_block_hash, min_seq) { - Ok(false) => { - // nope - let response = HttpResponseType::NotFound( - response_metadata, - format!( - "No such unconfirmed microblock stream for {} at or after {}", - index_anchor_block_hash.to_hex(), - min_seq - ), - ); - response.send(http, fd).and_then(|_| Ok(None)) - } - Err(e) => { - // nope - warn!( - "Failed to serve confirmed microblock stream {:?}: {:?}", - req, &e - ); - let response = HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to query unconfirmed microblock stream for {} at or after {}", - index_anchor_block_hash.to_hex(), - min_seq - ), - ); - response.send(http, fd).and_then(|_| Ok(None)) - } - Ok(true) => { - // yup! start streaming it back - let (response, stream_opt) = match StreamCursor::new_microblock_unconfirmed( - chainstate, - index_anchor_block_hash.clone(), - min_seq, - ) { - Ok(stream) => ( - HttpResponseType::MicroblockStream(response_metadata), - Some(stream), - ), - Err(chain_error::NoSuchBlockError) => ( - HttpResponseType::NotFound( - response_metadata, - format!( - "No such unconfirmed microblock stream starting with {}", - index_anchor_block_hash.to_hex() - ), - ), - None, - ), - Err(_e) => { - debug!( - "Failed to load unconfirmed microblock stream {}: {:?}", - &index_anchor_block_hash, &_e - ); - ( - HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to query unconfirmed microblock stream {}", - index_anchor_block_hash.to_hex() - ), - ), - None, - ) - } - }; - response.send(http, fd).and_then(|_| Ok(stream_opt)) - } - } - } - - /// Handle a GET unconfirmed transaction. - /// The response will be synchronously written to the fd. - fn handle_gettransaction_unconfirmed( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - chainstate: &StacksChainState, - mempool: &MemPoolDB, - txid: &Txid, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - // present in the unconfirmed state? - if let Some(ref unconfirmed) = chainstate.unconfirmed_state.as_ref() { - if let Some((transaction, mblock_hash, seq)) = - unconfirmed.get_unconfirmed_transaction(txid) - { - let response = HttpResponseType::UnconfirmedTransaction( - response_metadata, - UnconfirmedTransactionResponse { - status: UnconfirmedTransactionStatus::Microblock { - block_hash: mblock_hash, - seq: seq, - }, - tx: to_hex(&transaction.serialize_to_vec()), - }, - ); - return response.send(http, fd).map(|_| ()); - } - } - - // present in the mempool? - if let Some(txinfo) = MemPoolDB::get_tx(mempool.conn(), txid)? { - let response = HttpResponseType::UnconfirmedTransaction( - response_metadata, - UnconfirmedTransactionResponse { - status: UnconfirmedTransactionStatus::Mempool, - tx: to_hex(&txinfo.tx.serialize_to_vec()), - }, - ); - return response.send(http, fd).map(|_| ()); - } - - // not found - let response = HttpResponseType::NotFound( - response_metadata, - format!("No such unconfirmed transaction {}", txid), - ); - return response.send(http, fd).map(|_| ()); - } - - /// Load up the canonical Stacks chain tip. Note that this is subject to both burn chain block - /// Stacks block availability -- different nodes with different partial replicas of the Stacks chain state - /// will return different values here. - /// - /// # Warn - /// - There is a potential race condition. If this function is loading the latest unconfirmed - /// tip, that tip may get invalidated by the time it is used in `maybe_read_only_clarity_tx`, - /// which is used to load clarity state at a particular tip (which would lead to a 404 error). - /// If this race condition occurs frequently, we can modify `maybe_read_only_clarity_tx` to - /// re-load the unconfirmed chain tip. Refer to issue #2997. - /// - /// # Inputs - /// - `tip_req` is given by the HTTP request as the optional query parameter for the chain tip - /// hash. It will be UseLatestAnchoredTip if there was no parameter given. If it is set to - /// `latest`, the parameter will be set to UseLatestUnconfirmedTip. - fn handle_load_stacks_chain_tip( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - tip_req: &TipRequest, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - match tip_req { - TipRequest::UseLatestUnconfirmedTip => { - let unconfirmed_chain_tip_opt = match &mut chainstate.unconfirmed_state { - Some(unconfirmed_state) => { - match unconfirmed_state.get_unconfirmed_state_if_exists() { - Ok(res) => res, - Err(msg) => { - let response_metadata = - HttpResponseMetadata::from_http_request_type( - req, - Some(canonical_stacks_tip_height), - ); - let response = HttpResponseType::NotFound(response_metadata, msg); - return response.send(http, fd).and_then(|_| Ok(None)); - } - } - } - None => None, - }; - - if let Some(unconfirmed_chain_tip) = unconfirmed_chain_tip_opt { - Ok(Some(unconfirmed_chain_tip)) - } else { - match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( - &tip.consensus_hash, - &tip.anchored_block_hash, - ))), + StacksHttpMessage::Response(resp) => { + // Is there someone else waiting for this message? If so, pass it along. + // (this _should_ be our pending_request handle) + match self + .connection + .fulfill_request(StacksHttpMessage::Response(resp)) + { None => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - req, - Some(canonical_stacks_tip_height), - ); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::NotFound( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(http, fd).and_then(|_| Ok(None)) + test_debug!("{:?}: Fulfilled pending HTTP request", &self); + } + Some(_msg) => { + // unsolicited; discard + test_debug!("{:?}: Dropping unsolicited HTTP response", &self); } } } } - TipRequest::SpecificTip(tip) => Ok(Some(*tip).clone()), - TipRequest::UseLatestAnchoredTip => match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( - &tip.consensus_hash, - &tip.anchored_block_hash, - ))), - None => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - req, - Some(canonical_stacks_tip_height), - ); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(http, fd).and_then(|_| Ok(None)) - } - }, } - } - fn handle_load_stacks_chain_tip_hashes( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - tip: StacksBlockId, - chainstate: &StacksChainState, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - match chainstate.get_block_header_hashes(&tip)? { - Some((ch, bl)) => { - return Ok(Some((ch, bl))); - } - None => {} - } - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(http, fd).and_then(|_| Ok(None)) + Ok(ret) } - fn handle_post_fee_rate_estimate( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - handler_args: &RPCHandlerArgs, - sortdb: &SortitionDB, - tx: &TransactionPayload, - estimated_len: u64, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - let stacks_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height)? - .ok_or_else(|| { - warn!( - "Failed to get fee rate estimate because could not load Stacks epoch for canonical burn height = {}", - tip.block_height - ); - net_error::ChainstateError("Could not load Stacks epoch for canonical burn height".into()) - })?; - if let Some((cost_estimator, fee_estimator, metric)) = handler_args.get_estimators_ref() { - let estimated_cost = match cost_estimator.estimate_cost(tx, &stacks_epoch.epoch_id) { - Ok(x) => x, - Err(e) => { - debug!( - "Estimator RPC endpoint failed to estimate tx: {}", - tx.name() - ); - return HttpResponseType::BadRequestJSON(response_metadata, e.into_json()) - .send(http, fd); - } - }; + /// Remove all timed-out messages, and ding the remote peer as unhealthy + pub fn clear_timeouts(&mut self) -> () { + self.connection.drain_timeouts(); + } - let scalar_cost = - metric.from_cost_and_len(&estimated_cost, &stacks_epoch.block_limit, estimated_len); - let fee_rates = match fee_estimator.get_rate_estimates() { - Ok(x) => x, + /// Load data into our HTTP connection + pub fn recv(&mut self, r: &mut R) -> Result { + let mut total_recv = 0; + loop { + let nrecv = match self.connection.recv_data(r) { + Ok(nr) => nr, Err(e) => { - debug!( - "Estimator RPC endpoint failed to estimate fees for tx: {}", - tx.name() - ); - return HttpResponseType::BadRequestJSON(response_metadata, e.into_json()) - .send(http, fd); + debug!("{:?}: failed to recv: {:?}", self, &e); + return Err(e); } }; - let mut estimations = RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); - - let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; - - for estimate in estimations.iter_mut() { - if estimate.fee < minimum_fee { - estimate.fee = minimum_fee; - } + total_recv += nrecv; + if nrecv > 0 { + self.last_request_timestamp = get_epoch_time_secs(); + } else { + break; } - - let response = HttpResponseType::TransactionFeeEstimation( - response_metadata, - RPCFeeEstimateResponse { - estimated_cost, - estimations, - estimated_cost_scalar: scalar_cost, - cost_scalar_change_by_byte: metric.change_per_byte(), - }, - ); - response.send(http, fd) - } else { - debug!("Fee and cost estimation not configured on this stacks node"); - let response = HttpResponseType::BadRequestJSON( - response_metadata, - json!({ - "error": "Fee and Cost Estimation not configured on this Stacks node", - "reason": "CostEstimationDisabled", - }), - ); - response.send(http, fd) } + monitoring::update_inbound_rpc_bandwidth(total_recv as i64); + Ok(total_recv) } - /// Handle a transaction. Directly submit it to the mempool so the client can see any - /// rejection reasons up-front (different from how the peer network handles it). Indicate - /// whether or not the transaction was accepted (and thus needs to be forwarded) in the return - /// value. - fn handle_post_transaction( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - mempool: &mut MemPoolDB, - tx: StacksTransaction, - atlasdb: &mut AtlasDB, - attachment: Option, - event_observer: Option<&dyn MemPoolEventDispatcher>, - canonical_stacks_tip_height: u64, - ast_rules: ASTRules, - ) -> Result { - let txid = tx.txid(); - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let (response, accepted) = if mempool.has_tx(&txid) { - debug!("Mempool already has POSTed transaction {}", &txid); - ( - HttpResponseType::TransactionID(response_metadata, txid), - false, - ) - } else { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - let stacks_epoch = sortdb - .index_conn() - .get_stacks_epoch(tip.block_height as u32) - .ok_or_else(|| { - warn!( - "Failed to store transaction because could not load Stacks epoch for canonical burn height = {}", - tip.block_height - ); - net_error::ChainstateError("Could not load Stacks epoch for canonical burn height".into()) - })?; - - if Relayer::do_static_problematic_checks() - && !Relayer::static_check_problematic_relayed_tx( - chainstate.mainnet, - stacks_epoch.epoch_id, - &tx, - ast_rules, - ) - .is_ok() - { - debug!( - "Transaction {} is problematic in rules {:?}; will not store or relay", - &tx.txid(), - ast_rules - ); - ( - HttpResponseType::TransactionID(response_metadata, txid), - false, - ) - } else { - match mempool.submit( - chainstate, - sortdb, - &consensus_hash, - &block_hash, - &tx, - event_observer, - &stacks_epoch.block_limit, - &stacks_epoch.epoch_id, - ) { - Ok(_) => { - debug!("Mempool accepted POSTed transaction {}", &txid); - ( - HttpResponseType::TransactionID(response_metadata, txid), - true, - ) - } - Err(e) => { - debug!("Mempool rejected POSTed transaction {}: {:?}", &txid, &e); - ( - HttpResponseType::BadRequestJSON(response_metadata, e.into_json(&txid)), - false, - ) - } - } - } - }; - - if let Some(ref attachment) = attachment { - if let TransactionPayload::ContractCall(ref contract_call) = tx.payload { - if atlasdb - .should_keep_attachment(&contract_call.to_clarity_contract_id(), &attachment) - { - atlasdb - .insert_uninstantiated_attachment(attachment) - .map_err(|e| net_error::DBError(e))?; - } - } - } + /// Write data out of our HTTP connection. Write as much as we can + pub fn send(&mut self, w: &mut W) -> Result { + let mut total_sz = 0; + loop { + test_debug!("{:?}: Try to send bytes (total {})", self, total_sz); - response.send(http, fd).and_then(|_| Ok(accepted)) - } + // fill the reply handles in self.connection with data + self.try_flush()?; - /// Handle a block. Directly submit a Stacks block to this node's chain state. - /// Indicate whether or not the block was accepted (i.e. it was new, and valid) - fn handle_post_block( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - consensus_hash: &ConsensusHash, - block: &StacksBlock, - canonical_stacks_tip_height: u64, - ) -> Result { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - // is this a consensus hash we recognize? - let (response, accepted) = - match SortitionDB::get_sortition_id_by_consensus(&sortdb.conn(), consensus_hash) { - Ok(Some(_)) => { - // we recognize this consensus hash - let ic = sortdb.index_conn(); - match Relayer::process_new_anchored_block( - &ic, - chainstate, - consensus_hash, - block, - 0, - ) { - Ok(true) => { - debug!( - "Accepted Stacks block {}/{}", - consensus_hash, - &block.block_hash() - ); - ( - HttpResponseType::StacksBlockAccepted( - response_metadata, - StacksBlockHeader::make_index_block_hash( - consensus_hash, - &block.block_hash(), - ), - true, - ), - true, - ) - } - Ok(false) => { - debug!( - "Did not accept Stacks block {}/{}", - consensus_hash, - &block.block_hash() - ); - ( - HttpResponseType::StacksBlockAccepted( - response_metadata, - StacksBlockHeader::make_index_block_hash( - consensus_hash, - &block.block_hash(), - ), - false, - ), - false, - ) - } - Err(e) => { - error!( - "Failed to process anchored block {}/{}: {:?}", - consensus_hash, - &block.block_hash(), - &e - ); - ( - HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to process anchored block {}/{}: {:?}", - consensus_hash, - &block.block_hash(), - &e - ), - ), - false, - ) - } - } - } - Ok(None) => { - debug!( - "Unrecognized consensus hash {} for block {}", - consensus_hash, - &block.block_hash() - ); - ( - HttpResponseType::NotFound( - response_metadata, - format!("No such consensus hash '{}'", consensus_hash), - ), - false, - ) - } + // dump reply handle state into `w` + let sz = match self.connection.send_data(w) { + Ok(sz) => sz, Err(e) => { - error!( - "Failed to query sortition ID by consensus '{}'", - consensus_hash - ); - ( - HttpResponseType::ServerError( - response_metadata, - format!( - "Failed to query sortition ID for consensus hash '{}': {:?}", - consensus_hash, &e - ), - ), - false, - ) + info!("{:?}: failed to send on HTTP conversation: {:?}", self, &e); + return Err(e); } }; - response.send(http, fd).and_then(|_| Ok(accepted)) - } - - /// Handle a microblock. Directly submit it to the microblock store so the client can see any - /// rejection reasons up-front (different from how the peer network handles it). Indicate - /// whether or not the microblock was accepted (and thus needs to be forwarded) in the return - /// value. - fn handle_post_microblock( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - microblock: &StacksMicroblock, - canonical_stacks_tip_height: u64, - ) -> Result { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - // make sure we can accept this - let ch_sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) { - Ok(Some(sn)) => sn, - Ok(None) => { - let resp = HttpResponseType::NotFound( - response_metadata, - "No such consensus hash".to_string(), - ); - return resp.send(http, fd).and_then(|_| Ok(false)); - } - Err(e) => { - let resp = HttpResponseType::BadRequestJSON( - response_metadata, - chain_error::DBError(e).into_json(), - ); - return resp.send(http, fd).and_then(|_| Ok(false)); - } - }; - - let sort_handle = sortdb.index_handle(&ch_sn.sortition_id); - let parent_block_snapshot = - Relayer::get_parent_stacks_block_snapshot(&sort_handle, consensus_hash, block_hash)?; - let ast_rules = - SortitionDB::get_ast_rules(&sort_handle, parent_block_snapshot.block_height)?; - let epoch_id = - SortitionDB::get_stacks_epoch(&sort_handle, parent_block_snapshot.block_height)? - .expect("FATAL: no epoch defined") - .epoch_id; - - let (response, accepted) = if !Relayer::static_check_problematic_relayed_microblock( - chainstate.mainnet, - epoch_id, - microblock, - ast_rules, - ) { - info!("Microblock {} from {}/{} is problematic; will not store or relay it, nor its descendants", µblock.block_hash(), consensus_hash, &block_hash); - ( - // NOTE: txid is ignored in chainstate error .into_json() - HttpResponseType::BadRequestJSON( - response_metadata, - chain_error::ProblematicTransaction(Txid([0u8; 32])).into_json(), - ), - false, - ) - } else { - match chainstate.preprocess_streamed_microblock(consensus_hash, block_hash, microblock) - { - Ok(accepted) => { - if accepted { - debug!( - "Accepted uploaded microblock {}/{}-{}", - &consensus_hash, - &block_hash, - µblock.block_hash() - ); - } else { - debug!( - "Did not accept microblock {}/{}-{}", - &consensus_hash, - &block_hash, - µblock.block_hash() - ); - } - - ( - HttpResponseType::MicroblockHash( - response_metadata, - microblock.block_hash(), - ), - accepted, - ) - } - Err(e) => ( - HttpResponseType::BadRequestJSON(response_metadata, e.into_json()), - false, - ), - } - }; - - response.send(http, fd).and_then(|_| Ok(accepted)) - } - - /// Handle a request for mempool transactions in bulk - fn handle_mempool_query( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - query: MemPoolSyncData, - max_txs: u64, - canonical_stacks_tip_height: u64, - page_id: Option, - ) -> Result { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - let response = HttpResponseType::MemPoolTxStream(response_metadata); - let height = chainstate - .get_stacks_chain_tip(sortdb)? - .map(|blk| blk.height) - .unwrap_or(0); - - debug!( - "Begin mempool query"; - "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), - "block_height" => height, - "max_txs" => max_txs - ); - - let stream = StreamCursor::new_tx_stream(query, max_txs, height, page_id); - response.send(http, fd).and_then(|_| Ok(stream)) - } - - /// Handle a request for the vector of stacker DB slot metadata - fn handle_get_stackerdb_metadata( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - stackerdbs: &StackerDBs, - stackerdb_contract_id: &QualifiedContractIdentifier, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - let response = if let Ok(slots) = stackerdbs.get_db_slot_metadata(stackerdb_contract_id) { - HttpResponseType::StackerDBMetadata(response_metadata, slots) - } else { - HttpResponseType::NotFound(response_metadata, "No such StackerDB contract".into()) - }; + test_debug!("{:?}: Sent {} bytes (total {})", self, sz, total_sz); - return response.send(http, fd).map(|_| ()); - } - - /// Handle a request for a stacker DB chunk, optionally with a given version - fn handle_get_stackerdb_chunk( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - stackerdbs: &StackerDBs, - stackerdb_contract_id: &QualifiedContractIdentifier, - slot_id: u32, - slot_version: Option, - canonical_stacks_tip_height: u64, - ) -> Result<(), net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - let chunk_res = if let Some(version) = slot_version.as_ref() { - stackerdbs - .get_chunk(stackerdb_contract_id, slot_id, *version) - .map(|chunk_data| chunk_data.map(|chunk_data| chunk_data.data)) - } else { - stackerdbs.get_latest_chunk(stackerdb_contract_id, slot_id) - }; - - let response = match chunk_res { - Ok(Some(chunk)) => HttpResponseType::StackerDBChunk(response_metadata, chunk), - Ok(None) | Err(net_error::NoSuchStackerDB(..)) => { - // not found - HttpResponseType::NotFound(response_metadata, "No such StackerDB chunk".into()) - } - Err(e) => { - // some other error - error!("Failed to load StackerDB chunk"; - "smart_contract_id" => stackerdb_contract_id.to_string(), - "slot_id" => slot_id, - "slot_version" => slot_version, - "error" => format!("{:?}", &e) - ); - HttpResponseType::ServerError( - response_metadata, - format!("Failed to load StackerDB chunk"), - ) - } - }; - - return response.send(http, fd).map(|_| ()); - } - - /// Handle a post for a new StackerDB chunk. - /// If we accept it, then forward it to the relayer as well - /// so an event can be generated for it. - fn handle_post_stackerdb_chunk( - http: &mut StacksHttp, - fd: &mut W, - req: &HttpRequestType, - tx: &mut StackerDBTx, - stackerdb_contract_id: &QualifiedContractIdentifier, - stackerdb_chunk: &StackerDBChunkData, - canonical_stacks_tip_height: u64, - ) -> Result, net_error> { - let response_metadata = - HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); - - if let Err(_e) = tx.get_stackerdb_id(stackerdb_contract_id) { - // shouldn't be necessary (this is checked against the peer network's configured DBs), - // but you never know. - let resp = HttpResponseType::NotFound(response_metadata, "No such StackerDB".into()); - return resp.send(http, fd).and_then(|_| Ok(None)); - } - if let Err(_e) = tx.try_replace_chunk( - stackerdb_contract_id, - &stackerdb_chunk.get_slot_metadata(), - &stackerdb_chunk.data, - ) { - let slot_metadata_opt = - match tx.get_slot_metadata(stackerdb_contract_id, stackerdb_chunk.slot_id) { - Ok(slot_opt) => slot_opt, - Err(e) => { - // some other error - error!("Failed to load replaced StackerDB chunk metadata"; - "smart_contract_id" => stackerdb_contract_id.to_string(), - "error" => format!("{:?}", &e) - ); - let resp = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load StackerDB chunk"), - ); - return resp.send(http, fd).and_then(|_| Ok(None)); - } - }; - - let (reason, slot_metadata_opt) = if let Some(slot_metadata) = slot_metadata_opt { - ( - format!("Data for this slot and version already exist"), - Some(slot_metadata), - ) + total_sz += sz; + if sz > 0 { + self.last_response_timestamp = get_epoch_time_secs(); } else { - ( - format!( - "{:?}", - net_error::NoSuchSlot( - stackerdb_contract_id.clone(), - stackerdb_chunk.slot_id - ) - ), - None, - ) - }; - - let ack = StackerDBChunkAckData { - accepted: false, - reason: Some(reason), - metadata: slot_metadata_opt, - }; - let resp = HttpResponseType::StackerDBChunkAck(response_metadata, ack); - return resp.send(http, fd).and_then(|_| Ok(None)); + break; + } } - - let slot_metadata = if let Some(md) = - tx.get_slot_metadata(stackerdb_contract_id, stackerdb_chunk.slot_id)? - { - md - } else { - // shouldn't be reachable - let resp = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load slot metadata after storing chunk"), - ); - return resp.send(http, fd).and_then(|_| Ok(None)); - }; - - // success! - let ack = StackerDBChunkAckData { - accepted: true, - reason: None, - metadata: Some(slot_metadata), - }; - - let resp = HttpResponseType::StackerDBChunkAck(response_metadata, ack); - return resp.send(http, fd).and_then(|_| { - Ok(Some(StacksMessageType::StackerDBPushChunk( - StackerDBPushChunkData { - contract_id: stackerdb_contract_id.clone(), - rc_consensus_hash: ConsensusHash([0u8; 20]), // unused, - chunk_data: stackerdb_chunk.clone(), - }, - ))) - }); + monitoring::update_inbound_rpc_bandwidth(total_sz as i64); + Ok(total_sz) } - /// Handle an external HTTP request. - /// Some requests, such as those for blocks, will create new reply streams. This method adds - /// those new streams into the `reply_streams` set. - /// Returns a StacksMessageType option -- it's Some(...) if we need to forward a message to the - /// peer network (like a transaction or a block or microblock) - pub fn handle_request( - &mut self, - req: HttpRequestType, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, - handler_opts: &RPCHandlerArgs, - ) -> Result, net_error> { - let mut reply = self.connection.make_relay_handle(self.conn_id)?; - let keep_alive = req.metadata().keep_alive; - let mut ret = None; - - let stream_opt = match req { - HttpRequestType::GetInfo(ref _md) => { - ConversationHttp::handle_getinfo( - &mut self.connection.protocol, - &mut reply, - &req, - network, - chainstate, - handler_opts, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetPoxInfo(ref _md, ref tip_req) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_getpoxinfo( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - &network.burnchain, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetNeighbors(ref _md) => { - ConversationHttp::handle_getneighbors( - &mut self.connection.protocol, - &mut reply, - &req, - network, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetHeaders(ref _md, ref quantity, ref tip_req) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_getheaders( - &mut self.connection.protocol, - &mut reply, - &req, - &tip, - *quantity, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? - } else { - None - } - } - HttpRequestType::GetBlock(ref _md, ref index_block_hash) => { - ConversationHttp::handle_getblock( - &mut self.connection.protocol, - &mut reply, - &req, - index_block_hash, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? - } - HttpRequestType::GetMicroblocksIndexed(ref _md, ref index_head_hash) => { - ConversationHttp::handle_getmicroblocks_indexed( - &mut self.connection.protocol, - &mut reply, - &req, - index_head_hash, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? - } - HttpRequestType::GetMicroblocksConfirmed(ref _md, ref anchor_index_block_hash) => { - ConversationHttp::handle_getmicroblocks_confirmed( - &mut self.connection.protocol, - &mut reply, - &req, - anchor_index_block_hash, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? - } - HttpRequestType::GetMicroblocksUnconfirmed( - ref _md, - ref index_anchor_block_hash, - ref min_seq, - ) => ConversationHttp::handle_getmicroblocks_unconfirmed( - &mut self.connection.protocol, - &mut reply, - &req, - index_anchor_block_hash, - *min_seq, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )?, - HttpRequestType::GetTransactionUnconfirmed(ref _md, ref txid) => { - ConversationHttp::handle_gettransaction_unconfirmed( - &mut self.connection.protocol, - &mut reply, - &req, - chainstate, - mempool, - txid, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetAccount(ref _md, ref principal, ref tip_req, ref with_proof) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_account_entry( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - principal, - *with_proof, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetDataVar( - ref _md, - ref contract_addr, - ref contract_name, - ref var_name, - ref tip_req, - ref with_proof, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_data_var( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - var_name, - *with_proof, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetConstantVal( - ref _md, - ref contract_addr, - ref contract_name, - ref const_name, - ref tip_req, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_constant_val( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - const_name, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetMapEntry( - ref _md, - ref contract_addr, - ref contract_name, - ref map_name, - ref key, - ref tip_req, - ref with_proof, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_map_entry( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - map_name, - key, - *with_proof, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetTransferCost(ref _md) => { - ConversationHttp::handle_token_transfer_cost( - &mut self.connection.protocol, - &mut reply, - &req, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetContractABI( - ref _md, - ref contract_addr, - ref contract_name, - ref tip_req, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_contract_abi( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::FeeRateEstimate(ref _md, ref tx, estimated_len) => { - ConversationHttp::handle_post_fee_rate_estimate( - &mut self.connection.protocol, - &mut reply, - &req, - handler_opts, - sortdb, - tx, - estimated_len, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::CallReadOnlyFunction( - ref _md, - ref ctrct_addr, - ref ctrct_name, - ref as_sender, - ref as_sponsor, - ref func_name, - ref args, - ref tip_req, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_readonly_function_call( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - ctrct_addr, - ctrct_name, - func_name, - as_sender, - as_sponsor.as_ref(), - args, - &self.connection.options, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetContractSrc( - ref _md, - ref contract_addr, - ref contract_name, - ref tip_req, - ref with_proof, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_contract_src( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - *with_proof, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::PostTransaction(ref _md, ref tx, ref attachment) => { - match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => { - let accepted = ConversationHttp::handle_post_transaction( - &mut self.connection.protocol, - &mut reply, - &req, - chainstate, - sortdb, - tip.consensus_hash, - tip.anchored_block_hash, - mempool, - tx.clone(), - &mut network.atlasdb, - attachment.clone(), - handler_opts.event_observer.as_deref(), - network.burnchain_tip.canonical_stacks_tip_height, - network.ast_rules, - )?; - if accepted { - // forward to peer network - ret = Some(StacksMessageType::Transaction(tx.clone())); - } - } - None => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - &req, - Some(network.burnchain_tip.canonical_stacks_tip_height), - ); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(&mut self.connection.protocol, &mut reply)?; - } - } - None - } - HttpRequestType::GetAttachment(ref _md, ref content_hash) => { - ConversationHttp::handle_getattachment( - &mut self.connection.protocol, - &mut reply, - &req, - &mut network.atlasdb, - content_hash.clone(), - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetAttachmentsInv( - ref _md, - ref index_block_hash, - ref pages_indexes, - ) => { - ConversationHttp::handle_getattachmentsinv( - &mut self.connection.protocol, - &mut reply, - &req, - &mut network.atlasdb, - &index_block_hash, - pages_indexes, - &self.connection.options, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::PostBlock(ref _md, ref consensus_hash, ref block) => { - let accepted = ConversationHttp::handle_post_block( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - consensus_hash, - block, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - if accepted { - // inform the peer network so it can announce its presence - ret = Some(StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash.clone(), block.clone())], - })); - } - None - } - HttpRequestType::PostMicroblock(ref _md, ref mblock, ref tip_req) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - if let Some((consensus_hash, block_hash)) = - ConversationHttp::handle_load_stacks_chain_tip_hashes( - &mut self.connection.protocol, - &mut reply, - &req, - tip, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? - { - let accepted = ConversationHttp::handle_post_microblock( - &mut self.connection.protocol, - &mut reply, - &req, - &consensus_hash, - &block_hash, - sortdb, - chainstate, - mblock, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - if accepted { - // forward to peer network - let tip = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ); - ret = Some(StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: tip, - microblocks: vec![(*mblock).clone()], - })); - } - } - } - None - } - HttpRequestType::MemPoolQuery(ref _md, ref query, ref page_id_opt) => { - Some(ConversationHttp::handle_mempool_query( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - query.clone(), - network.connection_opts.mempool_max_tx_query, - network.burnchain_tip.canonical_stacks_tip_height, - page_id_opt.clone(), - )?) - } - HttpRequestType::OptionsPreflight(ref _md, ref _path) => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - &req, - Some(network.burnchain_tip.canonical_stacks_tip_height), - ); - let response = HttpResponseType::OptionsPreflight(response_metadata); - response - .send(&mut self.connection.protocol, &mut reply) - .map(|_| ())?; - None - } - HttpRequestType::GetIsTraitImplemented( - ref _md, - ref contract_addr, - ref contract_name, - ref trait_id, - ref tip_req, - ) => { - if let Some(tip) = ConversationHttp::handle_load_stacks_chain_tip( - &mut self.connection.protocol, - &mut reply, - &req, - tip_req, - sortdb, - chainstate, - network.burnchain_tip.canonical_stacks_tip_height, - )? { - ConversationHttp::handle_get_is_trait_implemented( - &mut self.connection.protocol, - &mut reply, - &req, - sortdb, - chainstate, - &tip, - contract_addr, - contract_name, - trait_id, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - } - None - } - HttpRequestType::GetStackerDBMetadata(ref _md, ref stackerdb_contract_id) => { - ConversationHttp::handle_get_stackerdb_metadata( - &mut self.connection.protocol, - &mut reply, - &req, - network.get_stackerdbs(), - stackerdb_contract_id, - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::GetStackerDBChunk( - ref _md, - ref stackerdb_contract_id, - ref slot_id, - ref slot_version_opt, - ) => { - ConversationHttp::handle_get_stackerdb_chunk( - &mut self.connection.protocol, - &mut reply, - &req, - network.get_stackerdbs(), - stackerdb_contract_id, - *slot_id, - slot_version_opt.clone(), - network.burnchain_tip.canonical_stacks_tip_height, - )?; - None - } - HttpRequestType::PostStackerDBChunk( - ref _md, - ref stackerdb_contract_id, - ref chunk_data, - ) => { - let tip_height = network.burnchain_tip.canonical_stacks_tip_height; - if let Ok(mut tx) = network.stackerdbs_tx_begin(stackerdb_contract_id) { - ret = ConversationHttp::handle_post_stackerdb_chunk( - &mut self.connection.protocol, - &mut reply, - &req, - &mut tx, - stackerdb_contract_id, - chunk_data, - tip_height, - )?; - tx.commit()?; - } else { - let response_metadata = - HttpResponseMetadata::from_http_request_type(&req, Some(tip_height)); - let resp = - HttpResponseType::NotFound(response_metadata, "No such StackerDB".into()); - resp.send(&mut self.connection.protocol, &mut reply) - .map(|_| ())?; - }; - None - } - HttpRequestType::ClientError(ref _md, ref err) => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - &req, - Some(network.burnchain_tip.canonical_stacks_tip_height), - ); - let response = match err { - ClientError::Message(s) => HttpResponseType::BadRequestJSON( - response_metadata, - serde_json::Value::String(s.to_string()), - ), - ClientError::NotFound(path) => { - HttpResponseType::NotFound(response_metadata, path.clone()) - } - }; - - response - .send(&mut self.connection.protocol, &mut reply) - .map(|_| ())?; - None - } - }; - - match stream_opt { - None => { - self.reply_streams.push_back((reply, None, keep_alive)); - } - Some(stream) => { - self.reply_streams.push_back(( - reply, - Some(( - HttpChunkedTransferWriterState::new(STREAM_CHUNK_SIZE as usize), - stream, - )), - keep_alive, - )); - } - } - Ok(ret) - } - - /// Make progress on outbound requests. - fn send_outbound_responses( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { - // send out streamed responses in the order they were requested - let mut drained_handle = false; - let mut drained_stream = false; - let mut broken = false; - let mut do_keep_alive = true; - - test_debug!( - "{:?}: {} HTTP replies pending", - &self, - self.reply_streams.len() - ); - let _self_str = format!("{}", &self); - - match self.reply_streams.front_mut() { - Some((ref mut reply, ref mut stream_opt, ref keep_alive)) => { - do_keep_alive = *keep_alive; - - // if we're streaming, make some progress on the stream - match stream_opt { - Some((ref mut http_chunk_state, ref mut stream)) => { - let mut encoder = - HttpChunkedTransferWriter::from_writer_state(reply, http_chunk_state); - match stream.stream_to(mempool, chainstate, &mut encoder, STREAM_CHUNK_SIZE) - { - Ok(nw) => { - test_debug!("{}: Streamed {} bytes", &_self_str, nw); - if nw == 0 { - // EOF -- finish chunk and stop sending. - if !encoder.corked() { - encoder.flush().map_err(|e| { - test_debug!( - "{}: Write error on encoder flush: {:?}", - &_self_str, - &e - ); - net_error::WriteError(e) - })?; - - encoder.cork(); - - test_debug!("{}: Stream indicates EOF", &_self_str); - } - - // try moving some data to the connection only once we're done - // streaming - match reply.try_flush() { - Ok(res) => { - test_debug!( - "{}: Streamed reply is drained?: {}", - &_self_str, - res - ); - drained_handle = res; - } - Err(e) => { - // dead - warn!( - "{}: Broken HTTP connection: {:?}", - &_self_str, &e - ); - broken = true; - } - } - drained_stream = true; - } - } - Err(e) => { - // broken -- terminate the stream. - // For example, if we're streaming an unconfirmed block or - // microblock, the data can get moved to the chunk store out from - // under the stream. - warn!( - "{}: Failed to send to HTTP connection: {:?}", - &_self_str, &e - ); - broken = true; - } - } - } - None => { - // not streamed; all data is buffered - drained_stream = true; - - // try moving some data to the connection - match reply.try_flush() { - Ok(res) => { - test_debug!("{}: Reply is drained", &_self_str); - drained_handle = res; - } - Err(e) => { - // dead - warn!("{}: Broken HTTP connection: {:?}", &_self_str, &e); - broken = true; - } - } - } - } - } - None => {} - } - - if broken || (drained_handle && drained_stream) { - // done with this stream - test_debug!( - "{:?}: done with stream (broken={}, drained_handle={}, drained_stream={})", - &self, - broken, - drained_handle, - drained_stream - ); - self.total_reply_count += 1; - self.reply_streams.pop_front(); - - if !do_keep_alive { - // encountered "Connection: close" - self.keep_alive = false; - } - } - Ok(()) - } - - pub fn try_send_recv_response( - req: ReplyHandleHttp, - ) -> Result> { - match req.try_send_recv() { - Ok(message) => match message { - StacksHttpMessage::Request(_) => { - warn!("Received response: not a HTTP response"); - return Err(Err(net_error::InvalidMessage)); - } - StacksHttpMessage::Response(http_response) => Ok(http_response), - }, - Err(res) => Err(res), - } - } - - /// Make progress on our request/response - fn recv_inbound_response(&mut self) -> Result<(), net_error> { - // make progress on our pending request (if it exists). - let inprogress = self.pending_request.is_some(); - let is_pending = self.pending_response.is_none(); - - let pending_request = self.pending_request.take(); - let response = match pending_request { - None => Ok(self.pending_response.take()), - Some(req) => match ConversationHttp::try_send_recv_response(req) { - Ok(response) => Ok(Some(response)), - Err(res) => match res { - Ok(handle) => { - // try again - self.pending_request = Some(handle); - Ok(self.pending_response.take()) - } - Err(e) => Err(e), - }, - }, - }?; - - self.pending_response = response; - - if inprogress && self.pending_request.is_none() { - test_debug!( - "{:?},id={}: HTTP request finished", - &self.peer_host, - self.conn_id - ); - } - - if is_pending && self.pending_response.is_some() { - test_debug!( - "{:?},id={}: HTTP response finished", - &self.peer_host, - self.conn_id - ); - } - - Ok(()) - } - - /// Try to get our response - pub fn try_get_response(&mut self) -> Option { - self.pending_response.take() - } - - /// Make progress on in-flight messages. - pub fn try_flush( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result<(), net_error> { - self.send_outbound_responses(mempool, chainstate)?; - self.recv_inbound_response()?; - Ok(()) - } - - /// Is the connection idle? - pub fn is_idle(&self) -> bool { - self.pending_response.is_none() - && self.connection.inbox_len() == 0 - && self.connection.outbox_len() == 0 - && self.reply_streams.len() == 0 - } - - /// Is the conversation out of pending data? - /// Don't consider it drained if we haven't received anything yet - pub fn is_drained(&self) -> bool { - ((self.total_request_count > 0 && self.total_reply_count > 0) - || self.pending_error_response.is_some()) - && self.is_idle() - } - - /// Should the connection be kept alive even if drained? - pub fn is_keep_alive(&self) -> bool { - self.keep_alive - } - - /// When was the last time we got an inbound request? - pub fn get_last_request_time(&self) -> u64 { - self.last_request_timestamp - } - - /// When was the last time we sent data as part of an outbound response? - pub fn get_last_response_time(&self) -> u64 { - self.last_response_timestamp - } - - /// When was this converation conencted? - pub fn get_connection_time(&self) -> u64 { - self.connection_time - } - - /// Make progress on in-flight requests and replies. - /// Returns the list of transactions we'll need to forward to the peer network - pub fn chat( - &mut self, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, - handler_args: &RPCHandlerArgs, - ) -> Result, net_error> { - // if we have an in-flight error, then don't take any more requests. - if self.pending_error_response.is_some() { - return Ok(vec![]); - } - - // handle in-bound HTTP request(s) - let num_inbound = self.connection.inbox_len(); - let mut ret = vec![]; - test_debug!("{:?}: {} HTTP requests pending", &self, num_inbound); - - for _i in 0..num_inbound { - let msg = match self.connection.next_inbox_message() { - None => { - continue; - } - Some(m) => m, - }; - - match msg { - StacksHttpMessage::Request(req) => { - // new request - self.total_request_count += 1; - self.last_request_timestamp = get_epoch_time_secs(); - if req.metadata().canonical_stacks_tip_height.is_some() { - test_debug!( - "Request metadata: canonical stacks tip height is {:?}", - &req.metadata().canonical_stacks_tip_height - ); - self.canonical_stacks_tip_height = - req.metadata().canonical_stacks_tip_height; - } - let start_time = Instant::now(); - let path = req.get_path(); - let msg_opt = monitoring::instrument_http_request_handler(req, |req| { - self.handle_request(req, network, sortdb, chainstate, mempool, handler_args) - })?; - - debug!("Processed HTTPRequest"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); - - if let Some(msg) = msg_opt { - ret.push(msg); - } - } - StacksHttpMessage::Response(resp) => { - // Is there someone else waiting for this message? If so, pass it along. - // (this _should_ be our pending_request handle) - if resp.metadata().canonical_stacks_tip_height.is_some() { - test_debug!( - "Response metadata: canonical stacks tip height is {:?}", - &resp.metadata().canonical_stacks_tip_height - ); - self.canonical_stacks_tip_height = - resp.metadata().canonical_stacks_tip_height; - } - match self - .connection - .fulfill_request(StacksHttpMessage::Response(resp)) - { - None => { - test_debug!("{:?}: Fulfilled pending HTTP request", &self); - } - Some(_msg) => { - // unsolicited; discard - test_debug!("{:?}: Dropping unsolicited HTTP response", &self); - } - } - } - } - } - - Ok(ret) - } - - /// Remove all timed-out messages, and ding the remote peer as unhealthy - pub fn clear_timeouts(&mut self) -> () { - self.connection.drain_timeouts(); - } - - /// Load data into our HTTP connection - pub fn recv(&mut self, r: &mut R) -> Result { - let mut total_recv = 0; - loop { - let nrecv = match self.connection.recv_data(r) { - Ok(nr) => nr, - Err(e) => { - debug!("{:?}: failed to recv: {:?}", self, &e); - return Err(e); - } - }; - - total_recv += nrecv; - if nrecv > 0 { - self.last_request_timestamp = get_epoch_time_secs(); - } else { - break; - } - } - monitoring::update_inbound_rpc_bandwidth(total_recv as i64); - Ok(total_recv) - } - - /// Write data out of our HTTP connection. Write as much as we can - pub fn send( - &mut self, - w: &mut W, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Result { - let mut total_sz = 0; - loop { - // prime the Write - self.try_flush(mempool, chainstate)?; - - let sz = match self.connection.send_data(w) { - Ok(sz) => sz, - Err(e) => { - info!("{:?}: failed to send on HTTP conversation: {:?}", self, &e); - return Err(e); - } - }; - - total_sz += sz; - if sz > 0 { - self.last_response_timestamp = get_epoch_time_secs(); - } else { - break; - } - } - monitoring::update_inbound_rpc_bandwidth(total_sz as i64); - Ok(total_sz) - } - - /// Make a new getinfo request to this endpoint - pub fn new_getinfo(&self, stacks_height: Option) -> HttpRequestType { - HttpRequestType::GetInfo(HttpRequestMetadata::from_host( - self.peer_host.clone(), - stacks_height, - )) - } - - /// Make a new getinfo request to this endpoint - pub fn new_getpoxinfo(&self, tip_req: TipRequest) -> HttpRequestType { - HttpRequestType::GetPoxInfo( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - tip_req, - ) - } - - /// Make a new getneighbors request to this endpoint - pub fn new_getneighbors(&self) -> HttpRequestType { - HttpRequestType::GetNeighbors(HttpRequestMetadata::from_host(self.peer_host.clone(), None)) - } - - /// Make a new getheaders request to this endpoint - pub fn new_getheaders(&self, quantity: u64, tip_req: TipRequest) -> HttpRequestType { - HttpRequestType::GetHeaders( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - quantity, - tip_req, - ) - } - - /// Make a new getblock request to this endpoint - pub fn new_getblock(&self, index_block_hash: StacksBlockId) -> HttpRequestType { - HttpRequestType::GetBlock( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - index_block_hash, - ) - } - - /// Make a new get-microblocks request to this endpoint - pub fn new_getmicroblocks_indexed( - &self, - index_microblock_hash: StacksBlockId, - ) -> HttpRequestType { - HttpRequestType::GetMicroblocksIndexed( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - index_microblock_hash, - ) - } - - /// Make a new get-microblocks-confirmed request to this endpoint - pub fn new_getmicroblocks_confirmed( - &self, - index_anchor_block_hash: StacksBlockId, - ) -> HttpRequestType { - HttpRequestType::GetMicroblocksConfirmed( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - index_anchor_block_hash, - ) - } - - /// Make a new get-microblocks request for unconfirmed microblocks - pub fn new_getmicroblocks_unconfirmed( - &self, - anchored_index_block_hash: StacksBlockId, - min_seq: u16, - ) -> HttpRequestType { - HttpRequestType::GetMicroblocksUnconfirmed( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - anchored_index_block_hash, - min_seq, - ) - } - - /// Make a new get-unconfirmed-tx request - pub fn new_gettransaction_unconfirmed(&self, txid: Txid) -> HttpRequestType { - HttpRequestType::GetTransactionUnconfirmed( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - txid, - ) - } - - /// Make a new post-transaction request - pub fn new_post_transaction(&self, tx: StacksTransaction) -> HttpRequestType { - HttpRequestType::PostTransaction( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - tx, - None, - ) - } - - /// Make a new post-block request - pub fn new_post_block(&self, ch: ConsensusHash, block: StacksBlock) -> HttpRequestType { - HttpRequestType::PostBlock( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - ch, - block, - ) - } - - /// Make a new post-microblock request - pub fn new_post_microblock( - &self, - mblock: StacksMicroblock, - tip_req: TipRequest, - ) -> HttpRequestType { - HttpRequestType::PostMicroblock( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - mblock, - tip_req, - ) - } - - /// Make a new request for an account - pub fn new_getaccount( - &self, - principal: PrincipalData, - tip_req: TipRequest, - with_proof: bool, - ) -> HttpRequestType { - HttpRequestType::GetAccount( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - principal, - tip_req, - with_proof, - ) - } - - /// Make a new request for a data var - pub fn new_getdatavar( - &self, - contract_addr: StacksAddress, - contract_name: ContractName, - var_name: ClarityName, - tip_req: TipRequest, - with_proof: bool, - ) -> HttpRequestType { - HttpRequestType::GetDataVar( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_addr, - contract_name, - var_name, - tip_req, - with_proof, - ) - } - - pub fn new_getconstantval( - &self, - contract_add: StacksAddress, - contract_name: ContractName, - constant_name: ClarityName, - tip_req: TipRequest, - ) -> HttpRequestType { - HttpRequestType::GetConstantVal( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_add, - contract_name, - constant_name, - tip_req, - ) - } - - /// Make a new request for a data map - pub fn new_getmapentry( - &self, - contract_addr: StacksAddress, - contract_name: ContractName, - map_name: ClarityName, - key: Value, - tip_req: TipRequest, - with_proof: bool, - ) -> HttpRequestType { - HttpRequestType::GetMapEntry( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_addr, - contract_name, - map_name, - key, - tip_req, - with_proof, - ) - } - - /// Make a new request to get a contract's source - pub fn new_getcontractsrc( - &self, - contract_addr: StacksAddress, - contract_name: ContractName, - tip_req: TipRequest, - with_proof: bool, - ) -> HttpRequestType { - HttpRequestType::GetContractSrc( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_addr, - contract_name, - tip_req, - with_proof, - ) - } - - /// Make a new request to get a contract's ABI - pub fn new_getcontractabi( - &self, - contract_addr: StacksAddress, - contract_name: ContractName, - tip_req: TipRequest, - ) -> HttpRequestType { - HttpRequestType::GetContractABI( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_addr, - contract_name, - tip_req, - ) - } - - /// Make a new request to run a read-only function - pub fn new_callreadonlyfunction( - &self, - contract_addr: StacksAddress, - contract_name: ContractName, - sender: PrincipalData, - sponsor: Option, - function_name: ClarityName, - function_args: Vec, - tip_req: TipRequest, - ) -> HttpRequestType { - HttpRequestType::CallReadOnlyFunction( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - contract_addr, - contract_name, - sender, - sponsor, - function_name, - function_args, - tip_req, - ) - } - - /// Make a new request for attachment inventory page - pub fn new_getattachmentsinv( - &self, - index_block_hash: StacksBlockId, - pages_indexes: HashSet, - ) -> HttpRequestType { - HttpRequestType::GetAttachmentsInv( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - index_block_hash, - pages_indexes, - ) - } - - /// Make a new request for mempool contents - pub fn new_mempool_query( - &self, - query: MemPoolSyncData, - page_id_opt: Option, - ) -> HttpRequestType { - HttpRequestType::MemPoolQuery( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - query, - page_id_opt, - ) - } - - /// Make a request for a stackerDB's metadata - pub fn new_get_stackerdb_metadata( - &self, - stackerdb_contract_id: QualifiedContractIdentifier, - ) -> HttpRequestType { - HttpRequestType::GetStackerDBMetadata( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - stackerdb_contract_id, - ) - } - - /// Make a request for a stackerDB's chunk - pub fn new_get_stackerdb_chunk( - &self, - stackerdb_contract_id: QualifiedContractIdentifier, - slot_id: u32, - slot_version: Option, - ) -> HttpRequestType { - HttpRequestType::GetStackerDBChunk( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - stackerdb_contract_id, - slot_id, - slot_version, - ) - } - - /// Make a new post for a stackerDB's chunk - pub fn new_post_stackerdb_chunk( - &self, - stackerdb_contract_id: QualifiedContractIdentifier, - slot_id: u32, - slot_version: u32, - sig: MessageSignature, - data: Vec, - ) -> HttpRequestType { - HttpRequestType::PostStackerDBChunk( - HttpRequestMetadata::from_host(self.peer_host.clone(), None), - stackerdb_contract_id, - StackerDBChunkData { - slot_id, - slot_version, - sig, - data, - }, - ) - } -} - -#[cfg(test)] -mod test { - use std::cell::RefCell; - use std::convert::TryInto; - use std::iter::FromIterator; - - use clarity::vm::types::*; - use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; - use stacks_common::address::*; - use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash}; - use stacks_common::util::get_epoch_time_secs; - use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; - use stacks_common::util::pipe::*; - - use super::*; - use crate::burnchains::bitcoin::indexer::BitcoinIndexer; - use crate::burnchains::{Burnchain, BurnchainView, *}; - use crate::chainstate::burn::ConsensusHash; - use crate::chainstate::stacks::db::blocks::test::*; - use crate::chainstate::stacks::db::StacksChainState; - use crate::chainstate::stacks::miner::*; - use crate::chainstate::stacks::test::*; - use crate::chainstate::stacks::{ - Error as chain_error, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, *, - }; - use crate::core::mempool::{BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS}; - use crate::net::codec::*; - use crate::net::http::*; - use crate::net::stream::*; - use crate::net::test::*; - use crate::net::*; - - const TEST_CONTRACT: &'static str = " - (define-constant cst 123) - (define-data-var bar int 0) - (define-map unit-map { account: principal } { units: int }) - (define-public (get-bar) (ok (var-get bar))) - (define-public (set-bar (x int) (y int)) - (begin (var-set bar (/ x y)) (ok (var-get bar)))) - (define-public (add-unit) - (begin - (map-set unit-map { account: tx-sender } { units: 1 } ) - (var-set bar 1) - (ok 1))) - (begin - (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) - - ;; stacker DB - (define-read-only (stackerdb-get-signer-slots) - (ok (list - { - signer: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R, - num-slots: u3 - } - { - signer: 'STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW, - num-slots: u3 - }))) - - (define-read-only (stackerdb-get-config) - (ok { - chunk-size: u4096, - write-freq: u0, - max-writes: u4096, - max-neighbors: u32, - hint-replicas: (list ) - })) - "; - - const TEST_CONTRACT_UNCONFIRMED: &'static str = "(define-read-only (ro-test) (ok 1))"; - - fn convo_send_recv( - sender: &mut ConversationHttp, - sender_mempool: &MemPoolDB, - sender_chainstate: &mut StacksChainState, - receiver: &mut ConversationHttp, - receiver_mempool: &MemPoolDB, - receiver_chainstate: &mut StacksChainState, - ) -> () { - let (mut pipe_read, mut pipe_write) = Pipe::new(); - pipe_read.set_nonblocking(true); - - loop { - let res = true; - - sender.try_flush(sender_mempool, sender_chainstate).unwrap(); - receiver - .try_flush(sender_mempool, receiver_chainstate) - .unwrap(); - - pipe_write.try_flush().unwrap(); - - let all_relays_flushed = - receiver.num_pending_outbound() == 0 && sender.num_pending_outbound() == 0; - - let nw = sender - .send(&mut pipe_write, sender_mempool, sender_chainstate) - .unwrap(); - let nr = receiver.recv(&mut pipe_read).unwrap(); - - test_debug!( - "res = {}, all_relays_flushed = {} ({},{}), nr = {}, nw = {}", - res, - all_relays_flushed, - receiver.num_pending_outbound(), - sender.num_pending_outbound(), - nr, - nw - ); - if res && all_relays_flushed && nr == 0 && nw == 0 { - test_debug!("Breaking send_recv"); - break; - } - } - } - - /// General testing function to test RPC calls. - /// This function sets up two peers, a client and a server. - /// It takes in a function of type F that generates the request to be sent to the server - /// It takes in another function of type C that verifies that the result from - /// the server is as expected. - /// The parameter `include_microblocks` determines whether a microblock stream is mined or not. - fn test_rpc( - test_name: &str, - peer_1_p2p: u16, - peer_1_http: u16, - peer_2_p2p: u16, - peer_2_http: u16, - include_microblocks: bool, - make_request: F, - check_result: C, - ) -> () - where - F: FnOnce( - &mut TestPeer, - &mut ConversationHttp, - &mut TestPeer, - &mut ConversationHttp, - ) -> HttpRequestType, - C: FnOnce( - &HttpRequestType, - &HttpResponseType, - &mut TestPeer, - &mut TestPeer, - &ConversationHttp, - &ConversationHttp, - ) -> bool, - { - // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - // STVN97YYA10MY5F6KQJHKNYJNM24C4A1AT39WRW - let privk2 = StacksPrivateKey::from_hex( - "94c319327cc5cd04da7147d32d836eb2e4c44f4db39aa5ede7314a761183d0c701", - ) - .unwrap(); - let microblock_privkey = StacksPrivateKey::new(); - let microblock_pubkeyhash = - Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - - let addr1 = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&privk1)], - ) - .unwrap(); - let addr2 = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&privk2)], - ) - .unwrap(); - - let mut peer_1_config = TestPeerConfig::new(test_name, peer_1_p2p, peer_1_http); - let mut peer_2_config = TestPeerConfig::new(test_name, peer_2_p2p, peer_2_http); - - // stacker DBs get initialized thru reconfiguration when the above block gets processed - peer_1_config.add_stacker_db( - QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), - StackerDBConfig::noop(), - ); - peer_2_config.add_stacker_db( - QualifiedContractIdentifier::new(addr1.clone().into(), "hello-world".into()), - StackerDBConfig::noop(), - ); - - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); - let peer_2_indexer = BitcoinIndexer::new_unit_test(&peer_2_config.burnchain.working_dir); - - peer_1_config.initial_balances = vec![ - (addr1.to_account_principal(), 1000000000), - (addr2.to_account_principal(), 1000000000), - ]; - - peer_2_config.initial_balances = vec![ - (addr1.to_account_principal(), 1000000000), - (addr2.to_account_principal(), 1000000000), - ]; - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - // mine one block with a contract in it - // first the coinbase - // make a coinbase for this miner - let mut tx_coinbase = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), - ); - tx_coinbase.chain_id = 0x80000000; - tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; - tx_coinbase.auth.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); - tx_signer.sign_origin(&privk1).unwrap(); - let tx_coinbase_signed = tx_signer.get_tx().unwrap(); - - // next the contract - let contract = TEST_CONTRACT.clone(); - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::new_smart_contract( - &format!("hello-world"), - &contract.to_string(), - None, - ) - .unwrap(), - ); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(1); - tx_contract.set_tx_fee(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - tx_signer.sign_origin(&privk1).unwrap(); - let tx_contract_signed = tx_signer.get_tx().unwrap(); - - // update account and state in a microblock that will be unconfirmed - let mut tx_cc = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::new_contract_call(addr1.clone(), "hello-world", "add-unit", vec![]) - .unwrap(), - ); - - tx_cc.chain_id = 0x80000000; - tx_cc.auth.set_origin_nonce(2); - tx_cc.set_tx_fee(123); - - let mut tx_signer = StacksTransactionSigner::new(&tx_cc); - tx_signer.sign_origin(&privk1).unwrap(); - let tx_cc_signed = tx_signer.get_tx().unwrap(); - let tx_cc_len = { - let mut bytes = vec![]; - tx_cc_signed.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - // make an unconfirmed contract - let unconfirmed_contract = TEST_CONTRACT_UNCONFIRMED.clone(); - let mut tx_unconfirmed_contract = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::new_smart_contract( - &format!("hello-world-unconfirmed"), - &unconfirmed_contract.to_string(), - None, - ) - .unwrap(), - ); - - tx_unconfirmed_contract.chain_id = 0x80000000; - tx_unconfirmed_contract.auth.set_origin_nonce(3); - tx_unconfirmed_contract.set_tx_fee(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx_unconfirmed_contract); - tx_signer.sign_origin(&privk1).unwrap(); - let tx_unconfirmed_contract_signed = tx_signer.get_tx().unwrap(); - let tx_unconfirmed_contract_len = { - let mut bytes = vec![]; - tx_unconfirmed_contract_signed - .consensus_serialize(&mut bytes) - .unwrap(); - bytes.len() as u64 - }; - - let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let mut anchor_cost = ExecutionCost::zero(); - let mut anchor_size = 0; - - // make a block and a microblock. - // Put the coinbase and smart-contract in the anchored block. - // Put the contract-call in the microblock - let (burn_ops, stacks_block, microblocks) = peer_1.make_tenure( - |ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, _| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &parent_tip, - vrf_proof, - tip.total_burn, - microblock_pubkeyhash, - ) - .unwrap(); - let (anchored_block, anchored_block_size, anchored_block_cost) = - StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], - ) - .unwrap(); - - anchor_size = anchored_block_size; - anchor_cost = anchored_block_cost; - - (anchored_block, vec![]) - }, - ); - - let (_, _, consensus_hash) = peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, &vec![]); - peer_2.process_stacks_epoch_at_tip(&stacks_block, &vec![]); - - // begin microblock section - if include_microblocks { - // build 1-block microblock stream with the contract-call and the unconfirmed contract - let microblock = { - let sortdb = peer_1.sortdb.take().unwrap(); - Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); - let mblock = { - let sort_iconn = sortdb.index_conn(); - let mut microblock_builder = StacksMicroblockBuilder::new( - stacks_block.block_hash(), - consensus_hash.clone(), - peer_1.chainstate(), - &sort_iconn, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - let microblock = microblock_builder - .mine_next_microblock_from_txs( - vec![ - (tx_cc_signed, tx_cc_len), - (tx_unconfirmed_contract_signed, tx_unconfirmed_contract_len), - ], - µblock_privkey, - ) - .unwrap(); - microblock - }; - peer_1.sortdb = Some(sortdb); - mblock - }; - - // store microblock stream - peer_1 - .chainstate() - .preprocess_streamed_microblock( - &consensus_hash, - &stacks_block.block_hash(), - µblock, - ) - .unwrap(); - peer_2 - .chainstate() - .preprocess_streamed_microblock( - &consensus_hash, - &stacks_block.block_hash(), - µblock, - ) - .unwrap(); - - // process microblock stream to generate unconfirmed state - let canonical_tip = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - let sortdb1 = peer_1.sortdb.take().unwrap(); - let sortdb2 = peer_2.sortdb.take().unwrap(); - peer_1 - .chainstate() - .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) - .unwrap(); - peer_2 - .chainstate() - .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) - .unwrap(); - peer_1.sortdb = Some(sortdb1); - peer_2.sortdb = Some(sortdb2); - } - // end microblock section - - // stuff some transactions into peer_2's mempool - // (relates to mempool query tests) - let mut mempool = peer_2.mempool.take().unwrap(); - let mut mempool_tx = mempool.tx_begin().unwrap(); - for i in 0..10 { - let pk = StacksPrivateKey::new(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&StacksPrivateKey::new())], - ) - .unwrap(); - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &consensus_hash, - &stacks_block.block_hash(), - txid.clone(), - tx_bytes, - tx_fee, - stacks_block.header.total_work.work, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - } - mempool_tx.commit().unwrap(); - peer_2.mempool.replace(mempool); - - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - let _ = peer_1 - .network - .refresh_burnchain_view( - &peer_1_indexer, - &peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - false, - ) - .unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); - - let peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); - let _ = peer_2 - .network - .refresh_burnchain_view( - &peer_2_indexer, - &peer_2_sortdb, - &mut peer_2_stacks_node.chainstate, - false, - ) - .unwrap(); - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); - - let view_1 = peer_1.get_burnchain_view().unwrap(); - let view_2 = peer_2.get_burnchain_view().unwrap(); - - let mut convo_1 = ConversationHttp::new( - format!("127.0.0.1:{}", peer_1_http) - .parse::() - .unwrap(), - Some(UrlString::try_from(format!("http://peer1.com")).unwrap()), - peer_1.to_peer_host(), - &peer_1.config.connection_opts, - 0, - ); - - let mut convo_2 = ConversationHttp::new( - format!("127.0.0.1:{}", peer_2_http) - .parse::() - .unwrap(), - Some(UrlString::try_from(format!("http://peer2.com")).unwrap()), - peer_2.to_peer_host(), - &peer_2.config.connection_opts, - 1, - ); - - let req = make_request(&mut peer_1, &mut convo_1, &mut peer_2, &mut convo_2); - - convo_1.send_request(req.clone()).unwrap(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let peer_2_mempool = peer_2.mempool.take().unwrap(); - - test_debug!("convo1 sends to convo2"); - convo_send_recv( - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - ); - - // hack around the borrow-checker - let mut peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - - Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) - .unwrap(); - - convo_1 - .chat( - &mut peer_1.network, - &mut peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - &mut peer_1_mempool, - &RPCHandlerArgs::default(), - ) - .unwrap(); - - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); - peer_1.mempool = Some(peer_1_mempool); - peer_2.mempool = Some(peer_2_mempool); - - test_debug!("convo2 sends to convo1"); - - // hack around the borrow-checker - let mut peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); - let mut peer_2_mempool = peer_2.mempool.take().unwrap(); - - let _ = peer_2 - .network - .refresh_burnchain_view( - &peer_2_indexer, - &peer_2_sortdb, - &mut peer_2_stacks_node.chainstate, - false, - ) - .unwrap(); - - Relayer::setup_unconfirmed_state(&mut peer_2_stacks_node.chainstate, &peer_2_sortdb) - .unwrap(); - - convo_2 - .chat( - &mut peer_2.network, - &mut peer_2_sortdb, - &mut peer_2_stacks_node.chainstate, - &mut peer_2_mempool, - &RPCHandlerArgs::default(), - ) - .unwrap(); - - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - - convo_send_recv( - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - ); - - test_debug!("flush convo1"); - - // hack around the borrow-checker - convo_send_recv( - &mut convo_1, - &peer_1_mempool, - peer_1.chainstate(), - &mut convo_2, - &peer_2_mempool, - peer_2.chainstate(), - ); - - peer_2.mempool = Some(peer_2_mempool); - - let mut peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - - let _ = peer_1 - .network - .refresh_burnchain_view( - &peer_1_indexer, - &peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - false, - ) - .unwrap(); - - Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) - .unwrap(); - - convo_1 - .chat( - &mut peer_1.network, - &mut peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - &mut peer_1_mempool, - &RPCHandlerArgs::default(), - ) - .unwrap(); - - convo_1 - .try_flush(&peer_1_mempool, &mut peer_1_stacks_node.chainstate) - .unwrap(); - - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); - peer_1.mempool = Some(peer_1_mempool); - - // should have gotten a reply - let resp_opt = convo_1.try_get_response(); - assert!(resp_opt.is_some()); - - let resp = resp_opt.unwrap(); - assert!(check_result( - &req, - &resp, - &mut peer_1, - &mut peer_2, - &convo_1, - &convo_2 - )); - } - - /// This test tests two things: - /// (1) the get info RPC call - /// (2) whether the ConversationHttp object gets correctly updated with a peer's canonical - /// stacks tip height, which is sent in HTTP headers as part of the request/response - #[test] - #[ignore] - fn test_rpc_getinfo() { - let peer_server_info = RefCell::new(None); - let client_stacks_height = 17; - test_rpc( - function_name!(), - 40000, - 40001, - 50000, - 50001, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let peer_info = RPCPeerInfoData::from_network( - &peer_server.network, - &peer_server.stacks_node.as_ref().unwrap().chainstate, - None, - &Sha256Sum::zero(), - ); - - *peer_server_info.borrow_mut() = Some(peer_info); - - convo_client.new_getinfo(Some(client_stacks_height)) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - assert_eq!(convo_client.canonical_stacks_tip_height, Some(1)); - assert_eq!( - convo_server.canonical_stacks_tip_height, - Some(client_stacks_height) - ); - match http_response { - HttpResponseType::PeerInfo(response_md, peer_data) => { - assert_eq!(Some((*peer_data).clone()), *peer_server_info.borrow()); - assert!(peer_data.node_public_key.is_some()); - assert!(peer_data.node_public_key_hash.is_some()); - assert_eq!( - peer_data.node_public_key_hash, - Some(Hash160::from_node_public_key( - &peer_data - .node_public_key - .clone() - .unwrap() - .to_public_key() - .unwrap() - )) - ); - assert!(peer_data.stackerdbs.is_some()); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_getpoxinfo() { - // Test v2/pox (aka GetPoxInfo) endpoint. - // In this test, `tip_req` is set to UseLatestAnchoredTip. - // Thus, the query for pox info will be against the canonical Stacks tip, which we expect to succeed. - let pox_server_info = RefCell::new(None); - test_rpc( - function_name!(), - 40002, - 40003, - 50002, - 50003, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let mut sortdb = peer_server.sortdb.as_mut().unwrap(); - let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; - let stacks_block_id = { - let tip = chainstate.get_stacks_chain_tip(sortdb).unwrap().unwrap(); - StacksBlockHeader::make_index_block_hash( - &tip.consensus_hash, - &tip.anchored_block_hash, - ) - }; - let pox_info = RPCPoxInfoData::from_db( - &mut sortdb, - chainstate, - &stacks_block_id, - &peer_client.config.burnchain, - ) - .unwrap(); - *pox_server_info.borrow_mut() = Some(pox_info); - convo_client.new_getpoxinfo(TipRequest::UseLatestAnchoredTip) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - convo_client, - convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::PoxInfo(response_md, pox_data) => { - assert_eq!(Some((*pox_data).clone()), *pox_server_info.borrow()); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_getpoxinfo_use_latest_tip() { - // Test v2/pox (aka GetPoxInfo) endpoint. - // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for pox - // info against the unconfirmed state will succeed. - let pox_server_info = RefCell::new(None); - test_rpc( - function_name!(), - 40004, - 40005, - 50004, - 50005, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let mut sortdb = peer_server.sortdb.as_mut().unwrap(); - let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; - let stacks_block_id = chainstate - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - let pox_info = RPCPoxInfoData::from_db( - &mut sortdb, - chainstate, - &stacks_block_id, - &peer_client.config.burnchain, - ) - .unwrap(); - *pox_server_info.borrow_mut() = Some(pox_info); - convo_client.new_getpoxinfo(TipRequest::UseLatestUnconfirmedTip) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::PoxInfo(response_md, pox_data) => { - assert_eq!(Some((*pox_data).clone()), *pox_server_info.borrow()); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_getneighbors() { - test_rpc( - function_name!(), - 40010, - 40011, - 50010, - 50011, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { convo_client.new_getneighbors() }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::Neighbors(response_md, neighbor_info) => { - assert_eq!(neighbor_info.sample.len(), 1); - assert_eq!(neighbor_info.sample[0].port, peer_client.config.server_port); // we see ourselves as the neighbor - assert_eq!(neighbor_info.bootstrap.len(), 1); - assert_eq!( - neighbor_info.bootstrap[0].port, - peer_client.config.server_port - ); // we see ourselves as the bootstrap - for n in neighbor_info.sample.iter() { - assert!(n.stackerdbs.is_some()); - } - for n in neighbor_info.bootstrap.iter() { - assert!(n.stackerdbs.is_some()); - } - for n in neighbor_info.inbound.iter() { - assert!(n.stackerdbs.is_some()); - } - for n in neighbor_info.outbound.iter() { - assert!(n.stackerdbs.is_some()); - } - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_getheaders() { - let server_blocks_cell = RefCell::new(None); - - test_rpc( - function_name!(), - 40012, - 40013, - 50012, - 50013, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // have "server" peer store a few continuous block to staging - let mut blocks: Vec = vec![]; - let mut index_block_hashes = vec![]; - for i in 0..25 { - let mut peer_server_block = make_codec_test_block(25); - - peer_server_block.header.total_work.work = (i + 1) as u64; - peer_server_block.header.total_work.burn = (i + 1) as u64; - peer_server_block.header.parent_block = blocks - .last() - .map(|blk| blk.block_hash()) - .unwrap_or(BlockHeaderHash([0u8; 32])); - - let peer_server_consensus_hash = ConsensusHash([(i + 1) as u8; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block.block_hash(), - ); - - test_debug!("Store peer server index block {:?}", &index_block_hash); - store_staging_block( - peer_server.chainstate(), - &peer_server_consensus_hash, - &peer_server_block, - &ConsensusHash([i as u8; 20]), - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &peer_server_consensus_hash, - &peer_server_block.block_hash(), - true, - ); - - index_block_hashes.push(index_block_hash); - blocks.push(peer_server_block); - } - - let rev_blocks: Vec<_> = blocks.into_iter().rev().collect(); - let rev_ibhs: Vec<_> = index_block_hashes.into_iter().rev().collect(); - - let tip = rev_ibhs[0].clone(); - *server_blocks_cell.borrow_mut() = Some((rev_blocks, rev_ibhs)); - - // now ask for it - convo_client.new_getheaders(25, TipRequest::SpecificTip(tip)) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::Headers(response_md, headers) => { - assert_eq!(headers.len(), 25); - let expected = server_blocks_cell.borrow().clone().unwrap(); - for (i, h) in headers.iter().enumerate() { - assert_eq!(h.header, expected.0[i].header); - assert_eq!(h.consensus_hash, ConsensusHash([(25 - i) as u8; 20])); - if i + 1 < headers.len() { - assert_eq!(h.parent_block_id, expected.1[i + 1]); - } - } - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_unconfirmed_getblock() { - let server_block_cell = RefCell::new(None); - - test_rpc( - function_name!(), - 40020, - 40021, - 50020, - 50021, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // have "server" peer store a block to staging - let peer_server_block = make_codec_test_block(25); - let peer_server_consensus_hash = ConsensusHash([0x02; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block.block_hash(), - ); - - test_debug!("Store peer server index block {:?}", &index_block_hash); - store_staging_block( - peer_server.chainstate(), - &peer_server_consensus_hash, - &peer_server_block, - &ConsensusHash([0x03; 20]), - 456, - 123, - ); - - *server_block_cell.borrow_mut() = Some(peer_server_block); - - // now ask for it - convo_client.new_getblock(index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::Block(response_md, block_info) => { - assert_eq!( - block_info.block_hash(), - (*server_block_cell.borrow()).as_ref().unwrap().block_hash() - ); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_confirmed_getblock() { - let server_block_cell = RefCell::new(None); - - test_rpc( - function_name!(), - 40030, - 40031, - 50030, - 50031, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // have "server" peer store a block to staging - let peer_server_block = make_codec_test_block(25); - let peer_server_consensus_hash = ConsensusHash([0x02; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block.block_hash(), - ); - - test_debug!("Store peer server index block {:?}", &index_block_hash); - store_staging_block( - peer_server.chainstate(), - &peer_server_consensus_hash, - &peer_server_block, - &ConsensusHash([0x03; 20]), - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &peer_server_consensus_hash, - &peer_server_block.block_hash(), - true, - ); - - *server_block_cell.borrow_mut() = Some(peer_server_block); - - // now ask for it - convo_client.new_getblock(index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::Block(response_md, block_info) => { - assert_eq!( - block_info.block_hash(), - (*server_block_cell.borrow()).as_ref().unwrap().block_hash() - ); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_indexed_microblocks() { - let server_microblocks_cell = RefCell::new(vec![]); - - test_rpc( - function_name!(), - 40040, - 40041, - 50040, - 50041, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let parent_block = make_codec_test_block(25); - let parent_consensus_hash = ConsensusHash([0x02; 20]); - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_block.block_hash(), - ); - - let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); - mblocks.truncate(15); - - let mut child_block = make_codec_test_block(25); - let child_consensus_hash = ConsensusHash([0x03; 20]); - - child_block.header.parent_block = parent_block.block_hash(); - child_block.header.parent_microblock = - mblocks.last().as_ref().unwrap().block_hash(); - child_block.header.parent_microblock_sequence = - mblocks.last().as_ref().unwrap().header.sequence; - - store_staging_block( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block, - &ConsensusHash([0x01; 20]), - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block.block_hash(), - true, - ); - - store_staging_block( - peer_server.chainstate(), - &child_consensus_hash, - &child_block, - &parent_consensus_hash, - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &child_consensus_hash, - &child_block.block_hash(), - true, - ); - - let index_microblock_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &mblocks.last().as_ref().unwrap().block_hash(), - ); - - for mblock in mblocks.iter() { - store_staging_microblock( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block.block_hash(), - &mblock, - ); - } - - set_microblocks_processed( - peer_server.chainstate(), - &child_consensus_hash, - &child_block.block_hash(), - &mblocks.last().as_ref().unwrap().block_hash(), - ); - - *server_microblocks_cell.borrow_mut() = mblocks; - - convo_client.new_getmicroblocks_indexed(index_microblock_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match (*http_response).clone() { - HttpResponseType::Microblocks(_, mut microblocks) => { - microblocks.reverse(); - assert_eq!(microblocks.len(), (*server_microblocks_cell.borrow()).len()); - assert_eq!(microblocks, *server_microblocks_cell.borrow()); - true - } - _ => { - error!("Invalid response: {:?}", http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_confirmed_microblocks() { - let server_microblocks_cell = RefCell::new(vec![]); - - test_rpc( - function_name!(), - 40042, - 40043, - 50042, - 50043, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let parent_block = make_codec_test_block(25); - let parent_consensus_hash = ConsensusHash([0x02; 20]); - - let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); - mblocks.truncate(15); - - let mut child_block = make_codec_test_block(25); - let child_consensus_hash = ConsensusHash([0x03; 20]); - - child_block.header.parent_block = parent_block.block_hash(); - child_block.header.parent_microblock = - mblocks.last().as_ref().unwrap().block_hash(); - child_block.header.parent_microblock_sequence = - mblocks.last().as_ref().unwrap().header.sequence; - - let child_index_block_hash = StacksBlockHeader::make_index_block_hash( - &child_consensus_hash, - &child_block.block_hash(), - ); - - store_staging_block( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block, - &ConsensusHash([0x01; 20]), - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block.block_hash(), - true, - ); - - store_staging_block( - peer_server.chainstate(), - &child_consensus_hash, - &child_block, - &parent_consensus_hash, - 456, - 123, - ); - set_block_processed( - peer_server.chainstate(), - &child_consensus_hash, - &child_block.block_hash(), - true, - ); - - for mblock in mblocks.iter() { - store_staging_microblock( - peer_server.chainstate(), - &parent_consensus_hash, - &parent_block.block_hash(), - &mblock, - ); - } - - set_microblocks_processed( - peer_server.chainstate(), - &child_consensus_hash, - &child_block.block_hash(), - &mblocks.last().as_ref().unwrap().block_hash(), - ); - - *server_microblocks_cell.borrow_mut() = mblocks; - - convo_client.new_getmicroblocks_confirmed(child_index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match (*http_response).clone() { - HttpResponseType::Microblocks(_, mut microblocks) => { - microblocks.reverse(); - assert_eq!(microblocks.len(), (*server_microblocks_cell.borrow()).len()); - assert_eq!(microblocks, *server_microblocks_cell.borrow()); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_unconfirmed_microblocks() { - let server_microblocks_cell = RefCell::new(vec![]); - - test_rpc( - function_name!(), - 40050, - 40051, - 50050, - 50051, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let consensus_hash = ConsensusHash([0x02; 20]); - let anchored_block_hash = BlockHeaderHash([0x03; 32]); - let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); - - let mut mblocks = make_sample_microblock_stream(&privk, &anchored_block_hash); - mblocks.truncate(15); - - for mblock in mblocks.iter() { - store_staging_microblock( - peer_server.chainstate(), - &consensus_hash, - &anchored_block_hash, - &mblock, - ); - } - - *server_microblocks_cell.borrow_mut() = mblocks; - - // start at seq 5 - convo_client.new_getmicroblocks_unconfirmed(index_block_hash, 5) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::Microblocks(response_md, microblocks) => { - assert_eq!(microblocks.len(), 10); - assert_eq!( - *microblocks, - (*server_microblocks_cell.borrow())[5..].to_vec() - ); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_unconfirmed_transaction() { - let last_txid = RefCell::new(Txid([0u8; 32])); - let last_mblock = RefCell::new(BlockHeaderHash([0u8; 32])); - - test_rpc( - function_name!(), - 40052, - 40053, - 50052, - 50053, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let sortdb = peer_server.sortdb.take().unwrap(); - Relayer::setup_unconfirmed_state(peer_server.chainstate(), &sortdb).unwrap(); - peer_server.sortdb = Some(sortdb); - - assert!(peer_server.chainstate().unconfirmed_state.is_some()); - let (txid, mblock_hash) = match peer_server.chainstate().unconfirmed_state { - Some(ref unconfirmed) => { - assert!(unconfirmed.mined_txs.len() > 0); - let mut txid = Txid([0u8; 32]); - let mut mblock_hash = BlockHeaderHash([0u8; 32]); - for (next_txid, (_, mbh, ..)) in unconfirmed.mined_txs.iter() { - txid = next_txid.clone(); - mblock_hash = mbh.clone(); - break; - } - (txid, mblock_hash) - } - None => { - panic!("No unconfirmed state"); - } - }; - - *last_txid.borrow_mut() = txid.clone(); - *last_mblock.borrow_mut() = mblock_hash.clone(); - - convo_client.new_gettransaction_unconfirmed(txid) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::UnconfirmedTransaction(response_md, unconfirmed_resp) => { - assert_eq!( - unconfirmed_resp.status, - UnconfirmedTransactionStatus::Microblock { - block_hash: (*last_mblock.borrow()).clone(), - seq: 0 - } - ); - let tx = StacksTransaction::consensus_deserialize( - &mut &hex_bytes(&unconfirmed_resp.tx).unwrap()[..], - ) - .unwrap(); - assert_eq!(tx.txid(), *last_txid.borrow()); - true - } - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_missing_getblock() { - test_rpc( - function_name!(), - 40060, - 40061, - 50060, - 50061, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let peer_server_block_hash = BlockHeaderHash([0x04; 32]); - let peer_server_consensus_hash = ConsensusHash([0x02; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block_hash, - ); - - // now ask for it - convo_client.new_getblock(index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(response_md, msg) => true, - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_missing_index_getmicroblocks() { - test_rpc( - function_name!(), - 40070, - 40071, - 50070, - 50071, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let peer_server_block_hash = BlockHeaderHash([0x04; 32]); - let peer_server_consensus_hash = ConsensusHash([0x02; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block_hash, - ); - - // now ask for it - convo_client.new_getmicroblocks_indexed(index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(response_md, msg) => true, - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_missing_confirmed_getmicroblocks() { - test_rpc( - function_name!(), - 40072, - 40073, - 50072, - 50073, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let peer_server_block_hash = BlockHeaderHash([0x04; 32]); - let peer_server_consensus_hash = ConsensusHash([0x02; 20]); - let index_block_hash = StacksBlockHeader::make_index_block_hash( - &peer_server_consensus_hash, - &peer_server_block_hash, - ); - - // now ask for it - convo_client.new_getmicroblocks_confirmed(index_block_hash) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(response_md, msg) => true, - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_missing_unconfirmed_microblocks() { - let server_microblocks_cell = RefCell::new(vec![]); - - test_rpc( - function_name!(), - 40080, - 40081, - 50080, - 50081, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let consensus_hash = ConsensusHash([0x02; 20]); - let anchored_block_hash = BlockHeaderHash([0x03; 32]); - let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); - - let mut mblocks = make_sample_microblock_stream(&privk, &anchored_block_hash); - mblocks.truncate(15); - - for mblock in mblocks.iter() { - store_staging_microblock( - peer_server.chainstate(), - &consensus_hash, - &anchored_block_hash, - &mblock, - ); - } - - *server_microblocks_cell.borrow_mut() = mblocks; - - // start at seq 16 (which doesn't exist) - convo_client.new_getmicroblocks_unconfirmed(index_block_hash, 16) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(response_md, msg) => true, - _ => { - error!("Invalid response: {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_src() { - // Test v2/contracts/source (aka GetContractSrc) endpoint. - // In this test, we don't set any tip parameters, and allow the endpoint to execute against - // the canonical Stacks tip. - // The contract source we are querying for exists in the anchored state, so we expect the - // query to succeed. - test_rpc( - function_name!(), - 40090, - 40091, - 50090, - 50091, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getcontractsrc( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetContractSrc(response_md, data) => { - assert_eq!(data.source, TEST_CONTRACT); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_src_unconfirmed_with_canonical_tip() { - // Test v2/contracts/source (aka GetContractSrc) endpoint. - // In this test, we don't set any tip parameters, and allow the endpoint to execute against - // the canonical Stacks tip. - // The contract source we are querying for only exists in the unconfirmed state, so we - // expect the query to fail. - test_rpc( - function_name!(), - 40100, - 40101, - 50100, - 50101, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getcontractsrc( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(_, error_str) => { - assert_eq!(error_str, "No contract source data found"); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_src_with_unconfirmed_tip() { - // Test v2/contracts/source (aka GetContractSrc) endpoint. - // In this test, we set `tip_req` to be the unconfirmed chain tip. - // The contract source we are querying for exists in the unconfirmed state, so we expect - // the query to succeed. - test_rpc( - function_name!(), - 40102, - 40103, - 50102, - 50103, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_getcontractsrc( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::SpecificTip(unconfirmed_tip), - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetContractSrc(response_md, data) => { - assert_eq!(data.source, TEST_CONTRACT_UNCONFIRMED); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_src_use_latest_tip() { - // Test v2/contracts/source (aka GetContractSrc) endpoint. - // In this test, we set `tip_req` to UseLatestUnconfirmedTip. - // The contract source we are querying for exists in the unconfirmed state, so we expect - // the query to succeed. - test_rpc( - function_name!(), - 40104, - 40105, - 50104, - 50105, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getcontractsrc( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetContractSrc(response_md, data) => { - assert_eq!(data.source, TEST_CONTRACT_UNCONFIRMED); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_account() { - test_rpc( - function_name!(), - 40110, - 40111, - 50110, - 50111, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getaccount( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetAccount(response_md, data) => { - assert_eq!(data.nonce, 2); - let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); - assert_eq!(balance, 1000000000); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - /// In this test, the query parameter `tip_req` is set to UseLatestUnconfirmedTip, and so we expect the - /// tip used for the query to be the latest microblock. - /// We check that the account state matches the state in the most recent microblock. - #[test] - #[ignore] - fn test_rpc_get_account_use_latest_tip() { - test_rpc( - function_name!(), - 40112, - 40113, - 50112, - 50113, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getaccount( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetAccount(response_md, data) => { - assert_eq!(data.nonce, 4); - let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); - assert_eq!(balance, 999999877); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - /// In this test, the query parameter `tip_req` is set to UseLatestUnconfirmedTip, but we did not generate - /// microblocks in the rpc test. Thus, we expect the tip used for the query to be the previous - /// anchor block (which is the latest tip). - /// We check that the account state matches the state in the previous anchor block. - #[test] - #[ignore] - fn test_rpc_get_account_use_latest_tip_no_microblocks() { - test_rpc( - function_name!(), - 40114, - 40115, - 50114, - 50115, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getaccount( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetAccount(response_md, data) => { - assert_eq!(data.nonce, 2); - let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); - assert_eq!(balance, 1000000000); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_account_unconfirmed() { - test_rpc( - function_name!(), - 40120, - 40121, - 50120, - 50121, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_getaccount( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - TipRequest::SpecificTip(unconfirmed_tip), - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetAccount(response_md, data) => { - assert_eq!(data.nonce, 4); - let balance = u128::from_str_radix(&data.balance[2..], 16).unwrap(); - assert_eq!(balance, 1000000000 - 123); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_data_var() { - test_rpc( - function_name!(), - 40122, - 40123, - 50122, - 50123, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getdatavar( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "bar".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetDataVar(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::Int(0) - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_data_var_unconfirmed() { - test_rpc( - function_name!(), - 40124, - 40125, - 50124, - 50125, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_getdatavar( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "bar".try_into().unwrap(), - TipRequest::SpecificTip(unconfirmed_tip), - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetDataVar(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::Int(1) - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_data_var_nonexistant() { - test_rpc( - function_name!(), - 40125, - 40126, - 50125, - 50126, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getdatavar( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "bar-nonexistant".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(_, msg) => { - assert_eq!(msg, "Data var not found"); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_constant_val() { - test_rpc( - function_name!(), - 40122, - 40123, - 50122, - 50123, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getconstantval( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "cst".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetConstantVal(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::Int(123) - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_constant_val_unconfirmed() { - test_rpc( - function_name!(), - 40124, - 40125, - 50124, - 50125, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_getconstantval( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "cst".try_into().unwrap(), - TipRequest::SpecificTip(unconfirmed_tip), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetConstantVal(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::Int(123) - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_constant_val_nonexistant() { - test_rpc( - function_name!(), - 40125, - 40126, - 50125, - 50126, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getconstantval( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "cst-nonexistant".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(_, msg) => { - assert_eq!(msg, "Constant not found"); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_map_entry() { - // Test v2/map_entry (aka GetMapEntry) endpoint. - // In this test, we don't set any tip parameters, and we expect that querying for map data - // against the canonical Stacks tip will succeed. - test_rpc( - function_name!(), - 40130, - 40131, - 50130, - 50131, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let principal = - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(); - convo_client.new_getmapentry( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "unit-map".try_into().unwrap(), - Value::Tuple( - TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) - .unwrap(), - ), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetMapEntry(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::some(Value::Tuple( - TupleData::from_data(vec![("units".into(), Value::Int(123))]) - .unwrap() - )) - .unwrap() - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_map_entry_unconfirmed() { - // Test v2/map_entry (aka GetMapEntry) endpoint. - // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying for map data - // against the unconfirmed state will succeed. - test_rpc( - function_name!(), - 40140, - 40141, - 50140, - 50141, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - let principal = - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(); - convo_client.new_getmapentry( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "unit-map".try_into().unwrap(), - Value::Tuple( - TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) - .unwrap(), - ), - TipRequest::SpecificTip(unconfirmed_tip), - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetMapEntry(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::some(Value::Tuple( - TupleData::from_data(vec![("units".into(), Value::Int(1))]) - .unwrap() - )) - .unwrap() - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_map_entry_use_latest_tip() { - test_rpc( - function_name!(), - 40142, - 40143, - 50142, - 50143, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let principal = - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(); - convo_client.new_getmapentry( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world".try_into().unwrap(), - "unit-map".try_into().unwrap(), - Value::Tuple( - TupleData::from_data(vec![("account".into(), Value::Principal(principal))]) - .unwrap(), - ), - TipRequest::UseLatestAnchoredTip, - false, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetMapEntry(response_md, data) => { - assert_eq!( - Value::try_deserialize_hex_untyped(&data.data).unwrap(), - Value::some(Value::Tuple( - TupleData::from_data(vec![("units".into(), Value::Int(1))]) - .unwrap() - )) - .unwrap() - ); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_abi() { - // Test /v2/contracts/interface (aka GetContractABI) endpoint. - // In this test, we don't set any tip parameters, and we expect that querying - // against the canonical Stacks tip will succeed. - test_rpc( - function_name!(), - 40150, - 40151, - 50150, - 50151, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getcontractabi( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::NotFound(..) => { - // not confirmed yet - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_abi_unconfirmed() { - // Test /v2/contracts/interface (aka GetContractABI) endpoint. - // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying - // against the unconfirmed state will succeed. - test_rpc( - function_name!(), - 40152, - 40153, - 50152, - 50153, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_getcontractabi( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::SpecificTip(unconfirmed_tip), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetContractABI(response_md, data) => true, - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_contract_abi_use_latest_tip() { - test_rpc( - function_name!(), - 40154, - 40155, - 50154, - 50155, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_getcontractabi( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::GetContractABI(response_md, data) => true, - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_call_read_only() { - // Test /v2/contracts/call-read (aka CallReadOnlyFunction) endpoint. - // In this test, we don't set any tip parameters, and we expect that querying - // against the canonical Stacks tip will succeed. - test_rpc( - function_name!(), - 40170, - 40171, - 50170, - 50171, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_callreadonlyfunction( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - None, - "ro-test".try_into().unwrap(), - vec![], - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::CallReadOnlyFunction(response_md, data) => { - assert!(data.cause.is_some()); - assert!(data.cause.clone().unwrap().find("NoSuchContract").is_some()); - assert!(!data.okay); - assert!(data.result.is_none()); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_call_read_only_use_latest_tip() { - // Test /v2/contracts/call-read (aka CallReadOnlyFunction) endpoint. - // In this test, we set `tip_req` to UseLatestUnconfirmedTip, and we expect that querying - // against the unconfirmed state will succeed. - test_rpc( - function_name!(), - 40172, - 40173, - 50172, - 50173, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_callreadonlyfunction( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - None, - "ro-test".try_into().unwrap(), - vec![], - TipRequest::UseLatestAnchoredTip, - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::CallReadOnlyFunction(response_md, data) => { - assert!(data.okay); - assert_eq!( - Value::try_deserialize_hex_untyped(&data.result.clone().unwrap()) - .unwrap(), - Value::okay(Value::Int(1)).unwrap() - ); - assert!(data.cause.is_none()); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_call_read_only_unconfirmed() { - test_rpc( - function_name!(), - 40180, - 40181, - 50180, - 50181, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let unconfirmed_tip = peer_client - .chainstate() - .unconfirmed_state - .as_ref() - .unwrap() - .unconfirmed_chain_tip - .clone(); - convo_client.new_callreadonlyfunction( - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap(), - "hello-world-unconfirmed".try_into().unwrap(), - StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R") - .unwrap() - .to_account_principal(), - None, - "ro-test".try_into().unwrap(), - vec![], - TipRequest::SpecificTip(unconfirmed_tip), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - match http_response { - HttpResponseType::CallReadOnlyFunction(response_md, data) => { - assert!(data.okay); - assert_eq!( - Value::try_deserialize_hex_untyped(&data.result.clone().unwrap()) - .unwrap(), - Value::okay(Value::Int(1)).unwrap() - ); - assert!(data.cause.is_none()); - true - } - _ => { - error!("Invalid response; {:?}", &http_response); - false - } - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_getattachmentsinv_limit_reached() { - test_rpc( - function_name!(), - 40190, - 40191, - 50190, - 50191, - true, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - let pages_indexes = HashSet::from_iter(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]); - convo_client.new_getattachmentsinv(StacksBlockId([0x00; 32]), pages_indexes) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::BadRequest(_, msg) => { - assert_eq!( - msg, - "Number of attachment inv pages is limited by 8 per request" - ); - true - } - _ => false, - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_mempool_query_txtags() { - test_rpc( - function_name!(), - 40813, - 40814, - 50813, - 50814, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_mempool_query( - MemPoolSyncData::TxTags([0u8; 32], vec![]), - Some(Txid([0u8; 32])), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::MemPoolTxs(_, _, txs) => { - // got everything - assert_eq!(txs.len(), 10); - true - } - _ => false, - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_mempool_query_bloom() { - test_rpc( - function_name!(), - 40815, - 40816, - 50815, - 50816, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // empty bloom filter - convo_client.new_mempool_query( - MemPoolSyncData::BloomFilter(BloomFilter::new( - BLOOM_COUNTER_ERROR_RATE, - MAX_BLOOM_COUNTER_TXS, - BloomNodeHasher::new(&[0u8; 32]), - )), - Some(Txid([0u8; 32])), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::MemPoolTxs(_, _, txs) => { - // got everything - assert_eq!(txs.len(), 10); - true - } - _ => false, - } - }, - ); - } - - #[test] - #[ignore] - fn test_rpc_get_stackerdb_metadata() { - test_rpc( - function_name!(), - 40817, - 40818, - 50815, - 50816, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - convo_client.new_get_stackerdb_metadata( - QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::StackerDBMetadata(_, metadata) => { - // config was updated - assert_eq!(metadata.len(), 6); - for (i, slot) in metadata.iter().enumerate() { - assert_eq!(slot.slot_id, i as u32); - assert_eq!(slot.slot_version, 0); - assert_eq!(slot.data_hash, Sha512Trunc256Sum([0u8; 32])); - assert_eq!(slot.signature, MessageSignature::empty()); - } - true - } - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_get_stackerdb_versioned_chunk() { - test_rpc( - function_name!(), - 40819, - 40820, - 50817, - 50818, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - debug!("Set up peer stackerDB"); - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - let tx = peer_server - .network - .stackerdbs - .tx_begin(StackerDBConfig::noop()) - .unwrap(); - tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) - .unwrap(); - tx.commit().unwrap(); - - // now go ask for it - convo_client.new_get_stackerdb_chunk(contract_id, 0, Some(1)) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::StackerDBChunk(_, chunk_data) => { - assert_eq!(chunk_data, "hello world".as_bytes()); - true - } - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_get_stackerdb_latest_chunk() { - test_rpc( - function_name!(), - 40821, - 40822, - 50819, - 50820, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - debug!("Set up peer stackerDB"); - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - let tx = peer_server - .network - .stackerdbs - .tx_begin(StackerDBConfig::noop()) - .unwrap(); - tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) - .unwrap(); - tx.commit().unwrap(); - - // now go ask for it - convo_client.new_get_stackerdb_chunk(contract_id, 0, None) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::StackerDBChunk(_, chunk_data) => { - assert_eq!(chunk_data, "hello world".as_bytes()); - true - } - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_get_stackerdb_nonexistant_chunk() { - test_rpc( - function_name!(), - 40821, - 40822, - 50819, - 50820, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - debug!("Set up peer stackerDB"); - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - let tx = peer_server - .network - .stackerdbs - .tx_begin(StackerDBConfig::noop()) - .unwrap(); - tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) - .unwrap(); - tx.commit().unwrap(); - - // now go ask for it - convo_client.new_get_stackerdb_chunk(contract_id, 0, Some(2)) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::NotFound(..) => true, - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_get_stackerdb_nonexistant_db() { - test_rpc( - function_name!(), - 40823, - 40824, - 50821, - 50822, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - debug!("Set up peer stackerDB"); - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - let tx = peer_server - .network - .stackerdbs - .tx_begin(StackerDBConfig::noop()) - .unwrap(); - tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) - .unwrap(); - tx.commit().unwrap(); - - // now go ask for it, but from the wrong contract - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.nope", - ) - .unwrap(); - convo_client.new_get_stackerdb_chunk(contract_id, 0, None) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::NotFound(..) => true, - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_post_stackerdb_chunk() { - test_rpc( - function_name!(), - 40823, - 40824, - 50821, - 50822, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - convo_client.new_post_stackerdb_chunk( - contract_id, - slot_metadata.slot_id, - slot_metadata.slot_version, - slot_metadata.signature, - data.to_vec(), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::StackerDBChunkAck(_, ack) => { - assert!(ack.accepted); - assert!(ack.metadata.is_some()); - - let md = ack.metadata.clone().unwrap(); - - assert_eq!(md.slot_id, 0); - assert_eq!(md.slot_version, 1); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - assert_eq!(md.data_hash, data_hash); - - // server actually has it - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - assert_eq!( - peer_server - .network - .stackerdbs - .get_latest_chunk(&contract_id, 0) - .unwrap() - .unwrap(), - "hello world".as_bytes() - ); - true - } - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_post_stale_stackerdb_chunk() { - test_rpc( - function_name!(), - 40825, - 40826, - 50823, - 50824, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - let tx = peer_server - .network - .stackerdbs - .tx_begin(StackerDBConfig::noop()) - .unwrap(); - tx.try_replace_chunk(&contract_id, &slot_metadata, "hello world".as_bytes()) - .unwrap(); - tx.commit().unwrap(); - - // conflicting data - let conflict_data = "conflict".as_bytes(); - let conflict_data_hash = Sha512Trunc256Sum::from_data(conflict_data); - let mut conflict_slot_metadata = - SlotMetadata::new_unsigned(0, 1, conflict_data_hash); - conflict_slot_metadata.sign(&privk1).unwrap(); - - convo_client.new_post_stackerdb_chunk( - contract_id, - conflict_slot_metadata.slot_id, - conflict_slot_metadata.slot_version, - conflict_slot_metadata.signature, - data.to_vec(), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::StackerDBChunkAck(_, ack) => { - assert!(!ack.accepted); - assert!(ack.reason.is_some()); - assert!(ack.metadata.is_some()); - - let md = ack.metadata.clone().unwrap(); - - assert_eq!(md.slot_id, 0); - assert_eq!(md.slot_version, 1); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - assert_eq!(md.data_hash, data_hash); - - // server actually has it - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - assert_eq!( - peer_server - .network - .stackerdbs - .get_latest_chunk(&contract_id, 0) - .unwrap() - .unwrap(), - "hello world".as_bytes() - ); - true - } - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_post_nonexistant_stackerdb_chunk() { - test_rpc( - function_name!(), - 40827, - 40828, - 50825, - 50826, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - let mut slot_metadata = SlotMetadata::new_unsigned(0, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - // ... but for the wrong DB - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.nope", - ) - .unwrap(); - convo_client.new_post_stackerdb_chunk( - contract_id, - slot_metadata.slot_id, - slot_metadata.slot_version, - slot_metadata.signature, - data.to_vec(), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - let req_md = http_request.metadata().clone(); - println!("{:?}", http_response); - match http_response { - HttpResponseType::NotFound(..) => true, - _ => false, - } - }, - ) - } - - #[test] - #[ignore] - fn test_rpc_post_overflow_stackerdb_chunk() { - test_rpc( - function_name!(), - 40829, - 40830, - 50827, - 50828, - false, - |ref mut peer_client, - ref mut convo_client, - ref mut peer_server, - ref mut convo_server| { - // insert a value in slot 0 - let contract_id = QualifiedContractIdentifier::parse( - "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world", - ) - .unwrap(); - let privk1 = StacksPrivateKey::from_hex( - "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", - ) - .unwrap(); - - let data = "hello world".as_bytes(); - let data_hash = Sha512Trunc256Sum::from_data(data); - - // invalid slot! - let mut slot_metadata = SlotMetadata::new_unsigned(100000, 1, data_hash); - slot_metadata.sign(&privk1).unwrap(); - - convo_client.new_post_stackerdb_chunk( - contract_id, - slot_metadata.slot_id, - slot_metadata.slot_version, - slot_metadata.signature, - data.to_vec(), - ) - }, - |ref http_request, - ref http_response, - ref mut peer_client, - ref mut peer_server, - ref convo_client, - ref convo_server| { - match http_response { - HttpResponseType::StackerDBChunkAck(_, ack) => { - assert!(!ack.accepted); - assert!(ack.reason.is_some()); - assert!(ack.metadata.is_none()); - true - } - _ => false, - } - }, - ) - } - - #[test] - fn test_getinfo_compat() { - let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null}"#; - let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; - let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - - // they all parse - for json_obj in &[ - &old_getinfo_json, - &getinfo_no_pubkey_json, - &getinfo_no_pubkey_hash_json, - &getinfo_full_json, - ] { - let _v: RPCPeerInfoData = serde_json::from_str(json_obj).unwrap(); - } + pub fn get_peer_host(&self) -> PeerHost { + self.peer_host.clone() } } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 9e96940163..e93819e34e 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -19,6 +19,7 @@ use std::io::{Error as io_error, ErrorKind, Read, Write}; use std::sync::mpsc::{sync_channel, Receiver, RecvError, SendError, SyncSender, TryRecvError}; use mio::net as mio_net; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::{Burnchain, BurnchainView}; @@ -29,6 +30,7 @@ use crate::net::atlas::AtlasDB; use crate::net::connection::*; use crate::net::db::*; use crate::net::http::*; +use crate::net::httpcore::*; use crate::net::p2p::{PeerMap, PeerNetwork}; use crate::net::poll::*; use crate::net::rpc::*; @@ -36,43 +38,52 @@ use crate::net::{Error as net_error, *}; #[derive(Debug)] pub struct HttpPeer { - // ongoing http conversations (either they reached out to us, or we to them) + /// ongoing http conversations (either they reached out to us, or we to them) pub peers: HashMap, pub sockets: HashMap, - // outbound connections that are pending connection + /// outbound connections that are pending connection pub connecting: HashMap< usize, ( mio_net::TcpStream, Option, - Option, + Option, u64, ), >, - // server network handle + /// server network handle pub http_server_handle: usize, - // connection options + /// server socket address + pub http_server_addr: SocketAddr, + + /// connection options pub connection_opts: ConnectionOptions, } impl HttpPeer { - pub fn new(conn_opts: ConnectionOptions, server_handle: usize) -> HttpPeer { + pub fn new( + conn_opts: ConnectionOptions, + server_handle: usize, + server_addr: SocketAddr, + ) -> HttpPeer { HttpPeer { peers: HashMap::new(), sockets: HashMap::new(), connecting: HashMap::new(), http_server_handle: server_handle, + http_server_addr: server_addr, connection_opts: conn_opts, } } - pub fn set_server_handle(&mut self, h: usize) -> () { + pub fn set_server_handle(&mut self, h: usize, addr: SocketAddr) -> () { self.http_server_handle = h; + self.http_server_addr = addr; } /// Is there a HTTP conversation open to this data_url that is not in progress? @@ -116,7 +127,7 @@ impl HttpPeer { network: &PeerNetwork, data_url: UrlString, addr: SocketAddr, - request: Option, + request: Option, ) -> Result { if let Some(event_id) = self.find_free_conversation(&data_url) { let http_nk = NeighborKey { @@ -128,7 +139,11 @@ impl HttpPeer { return Err(net_error::AlreadyConnected(event_id, http_nk)); } - let sock = NetworkState::connect(&addr)?; + let sock = NetworkState::connect( + &addr, + network.connection_opts.socket_send_buffer_size, + network.connection_opts.socket_recv_buffer_size, + )?; let hint_event_id = network_state.next_event_id()?; let next_event_id = network_state.register(self.http_server_handle, hint_event_id, &sock)?; @@ -196,13 +211,15 @@ impl HttpPeer { fn register_http( &mut self, network_state: &mut NetworkState, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, + node_state: &mut StacksNodeState, event_id: usize, mut socket: mio_net::TcpStream, outbound_url: Option, - initial_request: Option, + initial_request: Option, ) -> Result<(), net_error> { + let send_buffer_size = node_state + .with_node_state(|network, _, _, _, _| network.connection_opts.socket_send_buffer_size); + let client_addr = match socket.peer_addr() { Ok(addr) => addr, Err(e) => { @@ -233,6 +250,7 @@ impl HttpPeer { peer_host, &self.connection_opts, event_id, + send_buffer_size, ); debug!( @@ -251,12 +269,9 @@ impl HttpPeer { } // prime the socket - match HttpPeer::saturate_http_socket(&mut socket, &mut new_convo, mempool, chainstate) { - Ok(_) => {} - Err(e) => { - let _ = network_state.deregister(event_id, &socket); - return Err(e); - } + if let Err(e) = HttpPeer::saturate_http_socket(&mut socket, &mut new_convo) { + let _ = network_state.deregister(event_id, &socket); + return Err(e); } } @@ -326,12 +341,10 @@ impl HttpPeer { pub fn saturate_http_socket( client_sock: &mut mio::net::TcpStream, convo: &mut ConversationHttp, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, ) -> Result<(), net_error> { // saturate the socket loop { - let send_res = convo.send(client_sock, mempool, chainstate); + let send_res = convo.send(client_sock); match send_res { Err(e) => { debug!("Failed to send data to socket {:?}: {:?}", &client_sock, &e); @@ -353,8 +366,7 @@ impl HttpPeer { fn process_new_sockets( &mut self, network_state: &mut NetworkState, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, + node_state: &mut StacksNodeState, poll_state: &mut NetworkPollState, ) -> Vec { let mut registered = vec![]; @@ -386,15 +398,9 @@ impl HttpPeer { continue; } - if let Err(_e) = self.register_http( - network_state, - mempool, - chainstate, - event_id, - client_sock, - None, - None, - ) { + if let Err(_e) = + self.register_http(network_state, node_state, event_id, client_sock, None, None) + { // NOTE: register_http will deregister the socket for us continue; } @@ -408,14 +414,10 @@ impl HttpPeer { /// Returns whether or not the convo is still alive, as well as any message(s) that need to be /// forwarded to the peer network. fn process_http_conversation( - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, + node_state: &mut StacksNodeState, event_id: usize, client_sock: &mut mio_net::TcpStream, convo: &mut ConversationHttp, - handler_args: &RPCHandlerArgs, ) -> Result<(bool, Vec), net_error> { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; @@ -435,28 +437,20 @@ impl HttpPeer { // got sent bad data. If this was an inbound conversation, send it a HTTP // 400 and close the socket. debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error( - client_sock, - HttpResponseType::BadRequest( - HttpResponseMetadata::empty_error(), - "".to_string(), + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode" + .to_string(), ), - ) { + )) { Ok(_) => { - match HttpPeer::saturate_http_socket( - client_sock, - convo, - mempool, - chainstate, - ) { - Ok(_) => {} - Err(e) => { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - convo_dead = true; - } + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { + debug!( + "Failed to flush HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; } } Err(e) => { @@ -483,7 +477,7 @@ impl HttpPeer { // react to inbound messages -- do we need to send something out, or fulfill requests // to other threads? Try to chat even if the recv() failed, since we'll want to at // least drain the conversation inbox. - let msgs = match convo.chat(network, sortdb, chainstate, mempool, handler_args) { + let msgs = match convo.chat(node_state) { Ok(msgs) => msgs, Err(e) => { debug!( @@ -498,15 +492,12 @@ impl HttpPeer { if !convo_dead { // (continue) sending out data in this conversation, if the conversation is still // ongoing - match HttpPeer::saturate_http_socket(client_sock, convo, mempool, chainstate) { - Ok(_) => {} - Err(e) => { - debug!( - "Failed to send HTTP data to event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { + debug!( + "Failed to send HTTP data to event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } @@ -522,8 +513,7 @@ impl HttpPeer { fn process_connecting_sockets( &mut self, network_state: &mut NetworkState, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, + node_state: &mut StacksNodeState, poll_state: &mut NetworkPollState, ) -> () { for event_id in poll_state.ready.iter() { @@ -535,8 +525,7 @@ impl HttpPeer { if let Err(_e) = self.register_http( network_state, - mempool, - chainstate, + node_state, *event_id, socket, data_url.clone(), @@ -558,11 +547,7 @@ impl HttpPeer { fn process_ready_sockets( &mut self, poll_state: &mut NetworkPollState, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, - handler_args: &RPCHandlerArgs, + node_state: &mut StacksNodeState, ) -> (Vec, Vec) { let mut to_remove = vec![]; let mut msgs = vec![]; @@ -586,14 +571,10 @@ impl HttpPeer { // activity on a http socket test_debug!("Process HTTP data from {:?}", convo); match HttpPeer::process_http_conversation( - network, - sortdb, - chainstate, - mempool, + node_state, *event_id, client_sock, convo, - handler_args, ) { Ok((alive, mut new_msgs)) => { if !alive { @@ -620,21 +601,14 @@ impl HttpPeer { /// Flush outgoing replies, but don't block. /// Drop broken handles. /// Return the list of conversation event IDs to close (i.e. they're broken, or the request is done) - fn flush_conversations( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - ) -> Vec { + fn flush_conversations(&mut self) -> Vec { let mut close = vec![]; // flush each outgoing conversation for (event_id, ref mut convo) in self.peers.iter_mut() { - match convo.try_flush(mempool, chainstate) { - Ok(_) => {} - Err(_e) => { - info!("Broken HTTP connection {:?}: {:?}", convo, &_e); - close.push(*event_id); - } + if let Err(e) = convo.try_flush() { + info!("Broken HTTP connection {:?}: {:?}", convo, &e); + close.push(*event_id); } if convo.is_drained() && !convo.is_keep_alive() { // did some work, but nothing more to do and we're not keep-alive @@ -655,35 +629,24 @@ impl HttpPeer { pub fn run( &mut self, network_state: &mut NetworkState, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, + node_state: &mut StacksNodeState, mut poll_state: NetworkPollState, - handler_args: &RPCHandlerArgs, ) -> Vec { // set up new inbound conversations - self.process_new_sockets(network_state, mempool, chainstate, &mut poll_state); + self.process_new_sockets(network_state, node_state, &mut poll_state); // set up connected sockets - self.process_connecting_sockets(network_state, mempool, chainstate, &mut poll_state); + self.process_connecting_sockets(network_state, node_state, &mut poll_state); // run existing conversations, clear out broken ones, and get back messages forwarded to us - let (stacks_msgs, error_events) = self.process_ready_sockets( - &mut poll_state, - network, - sortdb, - chainstate, - mempool, - handler_args, - ); + let (stacks_msgs, error_events) = self.process_ready_sockets(&mut poll_state, node_state); for error_event in error_events { debug!("Failed HTTP connection on event {}", error_event); self.deregister_http(network_state, error_event); } // move conversations along - let close_events = self.flush_conversations(mempool, chainstate); + let close_events = self.flush_conversations(); for close_event in close_events { debug!("Close HTTP connection on event {}", close_event); self.deregister_http(network_state, close_event); @@ -725,6 +688,7 @@ mod test { use crate::chainstate::stacks::{Error as chain_error, StacksBlockHeader, *, *}; use crate::net::codec::*; use crate::net::http::*; + use crate::net::httpcore::*; use crate::net::rpc::*; use crate::net::test::*; use crate::net::*; @@ -854,20 +818,23 @@ mod test { 1, 0, |client_id, _| { - let mut request = HttpRequestType::GetInfo(HttpRequestMetadata::from_host( + let mut request = StacksHttpRequest::new_for_peer( PeerHost::from_host_port("127.0.0.1".to_string(), 51001), - None, - )); - request.metadata_mut().keep_alive = false; + "GET".to_string(), + "/v2/info".to_string(), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { // should be a PeerInfo let http_response_bytes = http_response_bytes_res.unwrap(); let response = - StacksHttp::parse_response("/v2/info", &http_response_bytes).unwrap(); + StacksHttp::parse_response("GET", "/v2/info", &http_response_bytes).unwrap(); true }, ); @@ -884,20 +851,23 @@ mod test { 10, 0, |client_id, _| { - let mut request = HttpRequestType::GetInfo(HttpRequestMetadata::from_host( + let mut request = StacksHttpRequest::new_for_peer( PeerHost::from_host_port("127.0.0.1".to_string(), 51011), - None, - )); - request.metadata_mut().keep_alive = false; + "GET".to_string(), + "/v2/info".to_string(), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { // should be a PeerInfo let http_response_bytes = http_response_bytes_res.unwrap(); let response = - StacksHttp::parse_response("/v2/info", &http_response_bytes).unwrap(); + StacksHttp::parse_response("GET", "/v2/info", &http_response_bytes).unwrap(); true }, ); @@ -930,16 +900,16 @@ mod test { 123, ); - let mut request = HttpRequestType::GetBlock( - HttpRequestMetadata::from_host( - PeerHost::from_host_port("127.0.0.1".to_string(), 51021), - None, - ), - index_block_hash, - ); - request.metadata_mut().keep_alive = false; + let mut request = StacksHttpRequest::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), 51021), + "GET".to_string(), + format!("/v2/blocks/{}", &index_block_hash), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { @@ -955,10 +925,14 @@ mod test { let request_path = format!("/v2/blocks/{}", &index_block_hash); let response = - StacksHttp::parse_response(&request_path, &http_response_bytes).unwrap(); + StacksHttp::parse_response("GET", &request_path, &http_response_bytes).unwrap(); match response { - StacksHttpMessage::Response(HttpResponseType::Block(md, block_data)) => { - block_data == peer_server_block + StacksHttpMessage::Response(stacks_http_response) => { + if let Ok(block) = StacksHttpResponse::decode_block(stacks_http_response) { + block == peer_server_block + } else { + false + } } _ => false, } @@ -994,16 +968,16 @@ mod test { 123, ); - let mut request = HttpRequestType::GetBlock( - HttpRequestMetadata::from_host( - PeerHost::from_host_port("127.0.0.1".to_string(), 51031), - None, - ), - index_block_hash, - ); - request.metadata_mut().keep_alive = false; + let mut request = StacksHttpRequest::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), 51031), + "GET".to_string(), + format!("/v2/blocks/{}", &index_block_hash), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { @@ -1019,10 +993,14 @@ mod test { let request_path = format!("/v2/blocks/{}", &index_block_hash); let response = - StacksHttp::parse_response(&request_path, &http_response_bytes).unwrap(); + StacksHttp::parse_response("GET", &request_path, &http_response_bytes).unwrap(); match response { - StacksHttpMessage::Response(HttpResponseType::Block(md, block_data)) => { - block_data == peer_server_block + StacksHttpMessage::Response(stacks_http_response) => { + if let Ok(block) = StacksHttpResponse::decode_block(stacks_http_response) { + block == peer_server_block + } else { + false + } } _ => false, } @@ -1048,31 +1026,37 @@ mod test { 10, 0, |client_id, _| { - let mut request = HttpRequestType::GetInfo(HttpRequestMetadata::from_host( + let mut request = StacksHttpRequest::new_for_peer( PeerHost::from_host_port("127.0.0.1".to_string(), 51041), - None, - )); - request.metadata_mut().keep_alive = false; + "GET".to_string(), + "/v2/info".to_string(), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { match http_response_bytes_res { Ok(http_response_bytes) => { // should be a PeerInfo - let response = - match StacksHttp::parse_response("/v2/info", &http_response_bytes) { - Ok(res) => res, - Err(e) => { - eprintln!( - "Failed to parse /v2/info response from:\n{:?}\n{:?}", - &http_response_bytes, &e - ); - assert!(false); - unreachable!(); - } - }; + let response = match StacksHttp::parse_response( + "GET", + "/v2/info", + &http_response_bytes, + ) { + Ok(res) => res, + Err(e) => { + eprintln!( + "Failed to parse /v2/info response from:\n{:?}\n{:?}", + &http_response_bytes, &e + ); + assert!(false); + unreachable!(); + } + }; *have_success.borrow_mut() = true; true } @@ -1104,13 +1088,16 @@ mod test { 1, 30, |client_id, _| { - let mut request = HttpRequestType::GetInfo(HttpRequestMetadata::from_host( + let mut request = StacksHttpRequest::new_for_peer( PeerHost::from_host_port("127.0.0.1".to_string(), 51051), - None, - )); - request.metadata_mut().keep_alive = false; + "GET".to_string(), + "/v2/info".to_string(), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { @@ -1176,17 +1163,16 @@ mod test { let signed_contract_tx = signer.get_tx().unwrap(); - let mut request = HttpRequestType::PostTransaction( - HttpRequestMetadata::from_host( - PeerHost::from_host_port("127.0.0.1".to_string(), 51061), - None, - ), - signed_contract_tx, - None, - ); - request.metadata_mut().keep_alive = false; + let mut request = StacksHttpRequest::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), 51061), + "POST".to_string(), + "/v2/transactions".to_string(), + HttpRequestContents::new().payload_stacks(&signed_contract_tx), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| { @@ -1278,13 +1264,16 @@ mod test { sleep_ms(15_000); // send a different request - let mut request = HttpRequestType::GetInfo(HttpRequestMetadata::from_host( + let mut request = StacksHttpRequest::new_for_peer( PeerHost::from_host_port("127.0.0.1".to_string(), 51083), - None, - )); - request.metadata_mut().keep_alive = false; + "GET".to_string(), + "/v2/info".to_string(), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, res| true, @@ -1328,16 +1317,16 @@ mod test { 123, ); - let mut request = HttpRequestType::GetBlock( - HttpRequestMetadata::from_host( - PeerHost::from_host_port("127.0.0.1".to_string(), 51071), - None, - ), - index_block_hash, - ); - request.metadata_mut().keep_alive = false; + let mut request = StacksHttpRequest::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), 51071), + "GET".to_string(), + format!("/v2/blocks/{}", index_block_hash), + HttpRequestContents::new(), + ) + .unwrap(); + request.preamble_mut().keep_alive = false; - let request_bytes = StacksHttp::serialize_request(&request).unwrap(); + let request_bytes = request.try_serialize().unwrap(); request_bytes }, |client_id, http_response_bytes_res| true, diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index eba0b05867..376f57b141 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -47,6 +47,7 @@ use clarity::vm::types::{ }; use clarity::vm::ClarityName; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Hash160; @@ -57,7 +58,7 @@ use crate::clarity_vm::clarity::{ClarityReadOnlyConnection, Error as clarity_err use crate::net::stackerdb::{ StackerDBConfig, StackerDBs, STACKERDB_INV_MAX, STACKERDB_MAX_CHUNK_SIZE, }; -use crate::net::{Error as net_error, NeighborAddress, PeerAddress}; +use crate::net::{Error as net_error, NeighborAddress}; const MAX_HINT_REPLICAS: u32 = 128; diff --git a/stackslib/src/net/stream.rs b/stackslib/src/net/stream.rs deleted file mode 100644 index b9f683cfcc..0000000000 --- a/stackslib/src/net/stream.rs +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io; -use std::io::{Read, Write}; - -use rand::{thread_rng, Rng}; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; - -use crate::burnchains::Txid; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, -}; -use crate::core::mempool::MemPoolDB; -use crate::net::MemPoolSyncData; - -/// Interface for streaming data -pub trait Streamer { - /// Return the offset into the stream at which this Streamer points. This value is equivalent - /// to returning the number of bytes streamed out so far. - fn offset(&self) -> u64; - /// Update the stream's offset pointer by `nw` bytes, so the implementation can keep track of - /// how much data has been sent so far. - fn add_bytes(&mut self, nw: u64); -} - -/// Opaque structure for streaming block, microblock, and header data from disk -#[derive(Debug, PartialEq, Clone)] -pub enum StreamCursor { - Block(BlockStreamData), - Microblocks(MicroblockStreamData), - Headers(HeaderStreamData), - MempoolTxs(TxStreamData), -} - -#[derive(Debug, PartialEq, Clone)] -pub struct BlockStreamData { - /// index block hash of the block to download - pub index_block_hash: StacksBlockId, - /// offset into whatever is being read (the blob, or the file in the chunk store) - pub offset: u64, - /// total number of bytes read. - pub total_bytes: u64, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct MicroblockStreamData { - /// index block hash of the block to download - pub index_block_hash: StacksBlockId, - /// microblock blob row id - pub rowid: Option, - /// offset into whatever is being read (the blob, or the file in the chunk store) - pub offset: u64, - /// total number of bytes read. - pub total_bytes: u64, - - /// length prefix - pub num_items_buf: [u8; 4], - pub num_items_ptr: usize, - - /// microblock pointer - pub microblock_hash: BlockHeaderHash, - pub parent_index_block_hash: StacksBlockId, - - /// unconfirmed state - pub seq: u16, - pub unconfirmed: bool, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct HeaderStreamData { - /// index block hash of the block to download - pub index_block_hash: StacksBlockId, - /// offset into whatever is being read (the blob, or the file in the chunk store) - pub offset: u64, - /// total number of bytes read. - pub total_bytes: u64, - /// number of headers requested - pub num_headers: u32, - - /// header buffer data - pub header_bytes: Option>, - pub end_of_stream: bool, - pub corked: bool, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct TxStreamData { - /// Mempool sync data requested - pub tx_query: MemPoolSyncData, - /// last txid loaded - pub last_randomized_txid: Txid, - /// serialized transaction buffer that's being sent - pub tx_buf: Vec, - pub tx_buf_ptr: usize, - /// number of transactions visited in the DB so far - pub num_txs: u64, - /// maximum we can visit in the query - pub max_txs: u64, - /// height of the chain at time of query - pub height: u64, - /// Are we done sending transactions, and are now in the process of sending the trailing page - /// ID? - pub corked: bool, -} - -impl MicroblockStreamData { - /// Stream the number of microblocks, as a SIP-003-encoded 4-byte big-endian integer. - /// Returns the number of bytes written to `fd` on success - /// Returns chainstate errors otherwise. - fn stream_count(&mut self, fd: &mut W, count: u64) -> Result { - let mut num_written = 0; - while self.num_items_ptr < self.num_items_buf.len() && num_written < count { - // stream length prefix - test_debug!( - "Length prefix: try to send {:?} (ptr={})", - &self.num_items_buf[self.num_items_ptr..], - self.num_items_ptr - ); - let num_sent = match fd.write(&self.num_items_buf[self.num_items_ptr..]) { - Ok(0) => { - // done (disconnected) - test_debug!("Length prefix: wrote 0 bytes",); - return Ok(num_written); - } - Ok(n) => { - self.num_items_ptr += n; - n as u64 - } - Err(e) => { - if e.kind() == io::ErrorKind::Interrupted { - // EINTR; try again - continue; - } else if e.kind() == io::ErrorKind::WouldBlock - || (cfg!(windows) && e.kind() == io::ErrorKind::TimedOut) - { - // blocked - return Ok(num_written); - } else { - return Err(ChainstateError::WriteError(e)); - } - } - }; - num_written += num_sent; - test_debug!( - "Length prefix: sent {} bytes ({} total)", - num_sent, - num_written - ); - } - Ok(num_written) - } -} - -impl StreamCursor { - /// Create a new stream cursor for a Stacks block - pub fn new_block(index_block_hash: StacksBlockId) -> StreamCursor { - StreamCursor::Block(BlockStreamData { - index_block_hash: index_block_hash, - offset: 0, - total_bytes: 0, - }) - } - - /// Create a new stream cursor for a Stacks microblock stream that has been confirmed. - /// Returns an error if the identified microblock stream does not exist. - pub fn new_microblock_confirmed( - chainstate: &StacksChainState, - tail_index_microblock_hash: StacksBlockId, - ) -> Result { - // look up parent - let mblock_info = StacksChainState::load_staging_microblock_info_indexed( - &chainstate.db(), - &tail_index_microblock_hash, - )? - .ok_or(ChainstateError::NoSuchBlockError)?; - - let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( - &mblock_info.consensus_hash, - &mblock_info.anchored_block_hash, - ); - - // need to send out the consensus_serialize()'ed array length before sending microblocks. - // this is exactly what seq tells us, though. - let num_items_buf = ((mblock_info.sequence as u32) + 1).to_be_bytes(); - - Ok(StreamCursor::Microblocks(MicroblockStreamData { - index_block_hash: StacksBlockId([0u8; 32]), - rowid: None, - offset: 0, - total_bytes: 0, - microblock_hash: mblock_info.microblock_hash, - parent_index_block_hash: parent_index_block_hash, - seq: mblock_info.sequence, - unconfirmed: false, - num_items_buf: num_items_buf, - num_items_ptr: 0, - })) - } - - /// Create a new stream cursor for a Stacks microblock stream that is unconfirmed. - /// Returns an error if the parent Stacks block does not exist, or if the sequence number is - /// too far ahead of the unconfirmed stream's tail. - pub fn new_microblock_unconfirmed( - chainstate: &StacksChainState, - anchored_index_block_hash: StacksBlockId, - seq: u16, - ) -> Result { - let mblock_info = StacksChainState::load_next_descendant_microblock( - &chainstate.db(), - &anchored_index_block_hash, - seq, - )? - .ok_or(ChainstateError::NoSuchBlockError)?; - - Ok(StreamCursor::Microblocks(MicroblockStreamData { - index_block_hash: anchored_index_block_hash.clone(), - rowid: None, - offset: 0, - total_bytes: 0, - microblock_hash: mblock_info.block_hash(), - parent_index_block_hash: anchored_index_block_hash, - seq: seq, - unconfirmed: true, - num_items_buf: [0u8; 4], - num_items_ptr: 4, // stops us from trying to send a length prefix - })) - } - - pub fn new_headers( - chainstate: &StacksChainState, - tip: &StacksBlockId, - num_headers_requested: u32, - ) -> Result { - let header_info = StacksChainState::load_staging_block_info(chainstate.db(), tip)? - .ok_or(ChainstateError::NoSuchBlockError)?; - - let num_headers = if header_info.height < (num_headers_requested as u64) { - header_info.height as u32 - } else { - num_headers_requested - }; - - test_debug!("Request for {} headers from {}", num_headers, tip); - - Ok(StreamCursor::Headers(HeaderStreamData { - index_block_hash: tip.clone(), - offset: 0, - total_bytes: 0, - num_headers: num_headers, - header_bytes: None, - end_of_stream: false, - corked: false, - })) - } - - /// Create a new stream cursor for mempool transactions - pub fn new_tx_stream( - tx_query: MemPoolSyncData, - max_txs: u64, - height: u64, - page_id_opt: Option, - ) -> StreamCursor { - let last_randomized_txid = page_id_opt.unwrap_or_else(|| { - let random_bytes = thread_rng().gen::<[u8; 32]>(); - Txid(random_bytes) - }); - - StreamCursor::MempoolTxs(TxStreamData { - tx_query, - last_randomized_txid: last_randomized_txid, - tx_buf: vec![], - tx_buf_ptr: 0, - num_txs: 0, - max_txs: max_txs, - height: height, - corked: false, - }) - } - - /// Write a single byte to the given `fd`. - /// Non-blocking -- masks EINTR by returning 0. - fn stream_one_byte(fd: &mut W, b: u8) -> Result { - loop { - match fd.write(&[b]) { - Ok(0) => { - // done (disconnected) - return Ok(0); - } - Ok(n) => { - return Ok(n as u64); - } - Err(e) => { - if e.kind() == io::ErrorKind::Interrupted { - // EINTR; try again - continue; - } else if e.kind() == io::ErrorKind::WouldBlock - || (cfg!(windows) && e.kind() == io::ErrorKind::TimedOut) - { - // blocked - return Ok(0); - } else { - return Err(ChainstateError::WriteError(e)); - } - } - } - } - } - - /// Get the offset into the stream at which the cursor points - pub fn get_offset(&self) -> u64 { - match self { - StreamCursor::Block(ref stream) => stream.offset(), - StreamCursor::Microblocks(ref stream) => stream.offset(), - StreamCursor::Headers(ref stream) => stream.offset(), - // no-op for mempool txs - StreamCursor::MempoolTxs(..) => 0, - } - } - - /// Update the cursor's offset by nw - pub fn add_more_bytes(&mut self, nw: u64) { - match self { - StreamCursor::Block(ref mut stream) => stream.add_bytes(nw), - StreamCursor::Microblocks(ref mut stream) => stream.add_bytes(nw), - StreamCursor::Headers(ref mut stream) => stream.add_bytes(nw), - // no-op fo mempool txs - StreamCursor::MempoolTxs(..) => (), - } - } - - /// Stream chainstate data into the given `fd`. - /// Depending on what StreamCursor variant we are, the data may come from the chainstate or - /// mempool. - /// Returns the number of bytes streamed on success. - /// Return an error on I/O errors, or if this cursor does not represent chainstate data. - pub fn stream_to( - &mut self, - mempool: &MemPoolDB, - chainstate: &mut StacksChainState, - fd: &mut W, - count: u64, - ) -> Result { - match self { - StreamCursor::Microblocks(ref mut stream) => { - let mut num_written = 0; - if !stream.unconfirmed { - // Confirmed microblocks are represented as a consensus-encoded vector of - // microblocks, in reverse sequence order. - // Write 4-byte length prefix first - num_written += stream.stream_count(fd, count)?; - StacksChainState::stream_microblocks_confirmed(&chainstate, fd, stream, count) - .and_then(|bytes_sent| Ok(bytes_sent + num_written)) - } else { - StacksChainState::stream_microblocks_unconfirmed(&chainstate, fd, stream, count) - .and_then(|bytes_sent| Ok(bytes_sent + num_written)) - } - } - StreamCursor::MempoolTxs(ref mut tx_stream) => mempool.stream_txs(fd, tx_stream, count), - StreamCursor::Headers(ref mut stream) => { - // headers are a JSON array. Start by writing '[', then write each header, and - // then write ']' - let mut num_written = 0; - if stream.total_bytes == 0 { - test_debug!("Opening header stream"); - let byte_written = StreamCursor::stream_one_byte(fd, '[' as u8)?; - num_written += byte_written; - stream.total_bytes += byte_written; - } - if stream.total_bytes > 0 { - let mut sent = chainstate.stream_headers(fd, stream, count)?; - - if stream.end_of_stream && !stream.corked { - // end of stream; cork it - test_debug!("Corking header stream"); - let byte_written = StreamCursor::stream_one_byte(fd, ']' as u8)?; - if byte_written > 0 { - sent += byte_written; - stream.total_bytes += byte_written; - stream.corked = true; - } - } - num_written += sent; - } - Ok(num_written) - } - StreamCursor::Block(ref mut stream) => chainstate.stream_block(fd, stream, count), - } - } -} - -impl Streamer for StreamCursor { - fn offset(&self) -> u64 { - self.get_offset() - } - fn add_bytes(&mut self, nw: u64) { - self.add_more_bytes(nw) - } -} - -impl Streamer for HeaderStreamData { - fn offset(&self) -> u64 { - self.offset - } - fn add_bytes(&mut self, nw: u64) { - self.offset += nw; - self.total_bytes += nw; - } -} - -impl Streamer for BlockStreamData { - fn offset(&self) -> u64 { - self.offset - } - fn add_bytes(&mut self, nw: u64) { - self.offset += nw; - self.total_bytes += nw; - } -} - -impl Streamer for MicroblockStreamData { - fn offset(&self) -> u64 { - self.offset - } - fn add_bytes(&mut self, nw: u64) { - self.offset += nw; - self.total_bytes += nw; - } -} diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs new file mode 100644 index 0000000000..8cd42f45b7 --- /dev/null +++ b/stackslib/src/net/tests/httpcore.rs @@ -0,0 +1,1052 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::Write; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str; + +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::chunked_encoding::{ + HttpChunkedTransferWriter, HttpChunkedTransferWriterState, +}; +use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::db::blocks::test::make_sample_microblock_stream; +use crate::chainstate::stacks::test::make_codec_test_block; +use crate::chainstate::stacks::{ + StacksTransaction, TokenTransferMemo, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionVersion, +}; +use crate::net::api::getneighbors::{RPCNeighbor, RPCNeighborsInfo}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{ + http_error_from_code_and_text, http_reason, HttpContentType, HttpErrorResponse, + HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePreamble, + HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpMessage, + StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::rpc::ConversationHttp; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_parse_stacks_http_preamble_request_err() { + let tests = vec![ + ( + "GET /foo HTTP/1.1\r\n", + "Not enough bytes to form a HTTP request or response", + ), + ( + "GET /foo HTTP/1.1\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nFoo: Bar\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost:", + "Not enough bytes to form a HTTP request or response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: foo:80\r\nHost: bar:80\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: localhost:6270\r\nfoo: \u{2764}\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "Get /foo HTTP/1.1\r\nHost: localhost:666666\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ( + "GET /foo HTTP/1.1\r\nHost: localhost:8080\r\nConnection: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response", + ), + ]; + + for (data, errstr) in tests.iter() { + let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); + test_debug!("Expect '{}'", errstr); + assert!(sres.is_err(), "{:?}", &sres); + assert!( + sres.as_ref() + .unwrap_err() + .to_string() + .find(errstr) + .is_some(), + "{:?}", + &sres + ); + } +} + +#[test] +fn test_parse_stacks_http_preamble_response_err() { + let tests = vec![ + ("HTTP/1.1 200", + "Not enough bytes to form a HTTP request or response"), + ("HTTP/1.1 200 OK\r\nfoo: \u{2764}\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nfoo: bar\r\nfoo: bar\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nConnection: foo\r\n\r\n", + "Failed to decode HTTP request or HTTP response"), + ]; + + for (data, errstr) in tests.iter() { + let sres = StacksHttpPreamble::consensus_deserialize(&mut data.as_bytes()); + test_debug!("Expect '{}', got: {:?}", errstr, &sres); + assert!(sres.is_err(), "{:?}", &sres); + assert!( + sres.as_ref() + .unwrap_err() + .to_string() + .find(errstr) + .is_some(), + "{:?}", + &sres + ); + } +} + +fn make_test_transaction() -> StacksTransaction { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let addr = auth.origin().address_testnet(); + let recv_addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + + let mut tx_stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::TokenTransfer( + recv_addr.clone().into(), + 123, + TokenTransferMemo([0u8; 34]), + ), + ); + tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.post_condition_mode = TransactionPostConditionMode::Allow; + tx_stx_transfer.set_tx_fee(0); + tx_stx_transfer +} + +#[test] +fn test_http_request_type_codec() { + let convo = ConversationHttp::new( + "127.0.0.1:12345".parse().unwrap(), + None, + PeerHost::DNS("localhost".to_string(), 12345), + &ConnectionOptions::default(), + 100, + 32, + ); + let tx = make_test_transaction(); + let tx_body = tx.serialize_to_vec(); + + let fixtures = vec![ + ( + StacksHttpRequest::new_getneighbors(convo.get_peer_host()), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/neighbors".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getinfo(convo.get_peer_host(), Some(1234)), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/info".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getinfo(convo.get_peer_host(), None), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/info".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getpoxinfo(convo.get_peer_host(), TipRequest::UseLatestUnconfirmedTip), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/pox?tip=latest".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getpoxinfo(convo.get_peer_host(), TipRequest::UseLatestAnchoredTip), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/pox".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getheaders(convo.get_peer_host(), 2100, TipRequest::SpecificTip(StacksBlockId([0x80; 32]))), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + "/v2/headers/2100?tip=8080808080808080808080808080808080808080808080808080808080808080".to_string(), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getblock(convo.get_peer_host(), StacksBlockId([2u8; 32])), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + format!("/v2/blocks/{}", StacksBlockId([2u8; 32]).to_hex()), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_getmicroblocks_indexed(convo.get_peer_host(), StacksBlockId([3u8; 32])), + HttpRequestPreamble::new( + HttpVersion::Http11, + "GET".to_string(), + format!("/v2/microblocks/{}", StacksBlockId([3u8; 32]).to_hex()), + "localhost".to_string(), + 12345, + true, + ), + vec![] + ), + ( + StacksHttpRequest::new_post_transaction(convo.get_peer_host(), tx.clone()), + HttpRequestPreamble::new( + HttpVersion::Http11, + "POST".to_string(), + "/v2/transactions".to_string(), + "localhost".to_string(), + 12345, + true, + ) + .with_content_type(HttpContentType::Bytes) + .with_content_length(tx.serialize_to_vec().len() as u32), + tx_body + ) + ]; + + for (mut test, mut expected_http_preamble, expected_http_body) in fixtures.into_iter() { + if test.preamble().get_request_id().is_none() { + test.preamble_mut().set_request_id(123); + } + expected_http_preamble.set_request_id(test.preamble().get_request_id().unwrap_or(0)); + if let Some(h) = test.preamble().get_canonical_stacks_tip_height() { + expected_http_preamble.set_canonical_stacks_tip_height(Some(h)); + } + + let mut expected_bytes = vec![]; + expected_http_preamble + .consensus_serialize(&mut expected_bytes) + .unwrap(); + + test_debug!( + "Expected preamble:\n{}", + str::from_utf8(&expected_bytes).unwrap() + ); + + if expected_http_body.len() > 0 { + expected_http_preamble.set_content_type(HttpContentType::Bytes); + expected_http_preamble.set_content_length(expected_http_body.len() as u32) + } + + if expected_http_preamble.content_type.is_none() + || expected_http_preamble.content_type != Some(HttpContentType::Bytes) + { + test_debug!( + "Expected http body:\n{}", + str::from_utf8(&expected_http_body).unwrap() + ); + } else { + test_debug!("Expected http body (hex):\n{}", to_hex(&expected_http_body)); + } + + expected_bytes.append(&mut expected_http_body.clone()); + + let mut bytes = vec![]; + let mut http = StacksHttp::new( + "127.0.0.1:12345".parse().unwrap(), + &ConnectionOptions::default(), + ); + http.write_message(&mut bytes, &StacksHttpMessage::Request(test.clone())) + .unwrap(); + + assert_eq!(bytes, expected_bytes); + } +} + +#[test] +fn test_http_request_type_codec_err() { + let bad_content_lengths = vec![ + "GET /v2/neighbors HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/info HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/pox HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/headers/2100 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/blocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "GET /v2/microblocks/1111111111111111111111111111111111111111111111111111111111111111 HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 0\r\n\r\n", + ]; + for bad_content_length in bad_content_lengths { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let (preamble, offset) = http.read_preamble(bad_content_length.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &bad_content_length.as_bytes()[offset..]); + + if let Ok(http_error) = e { + debug!("Got HTTP error: {:?}", &http_error); + + let error_str = format!("{:?}", &http_error); + assert!(error_str.find("-length body").is_some()); + assert!(error_str.find("status_code: 400").is_some()); + } else { + panic!("Expected error"); + } + } + + let bad_content_types = vec![ + "POST /v2/transactions HTTP/1.1\r\nUser-Agent: stacks/2.0\r\nHost: bad:123\r\nContent-Length: 1\r\n\r\nb", + ]; + for bad_content_type in bad_content_types { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let (preamble, offset) = http.read_preamble(bad_content_type.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &bad_content_type.as_bytes()[offset..]); + + if let Ok(http_error) = e { + debug!("Got HTTP error: {:?}", &http_error); + + let error_str = format!("{:?}", &http_error); + assert!(error_str.find("Missing Content-Type").is_some()); + assert!(error_str.find("status_code: 400").is_some()); + } else { + panic!("Expected error"); + } + } +} + +#[test] +fn test_http_response_type_codec() { + let test_neighbors_info = RPCNeighborsInfo { + bootstrap: vec![], + sample: vec![ + RPCNeighbor { + network_id: 1, + peer_version: 2, + addrbytes: PeerAddress([ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, + 0x0d, 0x0e, 0x0f, + ]), + port: 12345, + public_key_hash: Hash160::from_bytes( + &hex_bytes("1111111111111111111111111111111111111111").unwrap(), + ) + .unwrap(), + authenticated: true, + stackerdbs: Some(vec![]), + }, + RPCNeighbor { + network_id: 3, + peer_version: 4, + addrbytes: PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, + 0x02, 0x03, 0x04, + ]), + port: 23456, + public_key_hash: Hash160::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + authenticated: false, + stackerdbs: Some(vec![]), + }, + ], + inbound: vec![], + outbound: vec![], + }; + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let test_block_info = make_codec_test_block(5); + let test_microblock_info = make_sample_microblock_stream(&privk, &test_block_info.block_hash()); + + let mut test_block_info_bytes = vec![]; + test_block_info + .consensus_serialize(&mut test_block_info_bytes) + .unwrap(); + + let mut test_microblock_info_bytes = vec![]; + test_microblock_info + .consensus_serialize(&mut test_microblock_info_bytes) + .unwrap(); + + let tests = vec![ + // length is known + ( + StacksHttpResponse::new_getneighbors(test_neighbors_info.clone(), true), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_getblock(test_block_info.clone(), true), + "GET".to_string(), + format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), + ), + ( + StacksHttpResponse::new_getmicroblocks_indexed(test_microblock_info.clone(), true), + "GET".to_string(), + format!( + "/v2/microblocks/{}", + test_microblock_info[0].block_hash().to_hex() + ), + ), + ( + StacksHttpResponse::new_posttransaction(Txid([0x01; 32]), true), + "POST".to_string(), + "/v2/transactions".to_string(), + ), + // length is unknown + ( + StacksHttpResponse::new_getneighbors(test_neighbors_info.clone(), false), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_getblock(test_block_info.clone(), false), + "GET".to_string(), + format!("/v2/blocks/{}", test_block_info.block_hash().to_hex()), + ), + ( + StacksHttpResponse::new_getmicroblocks_indexed(test_microblock_info.clone(), false), + "GET".to_string(), + format!( + "/v2/microblocks/{}", + test_microblock_info[0].block_hash().to_hex() + ), + ), + ( + StacksHttpResponse::new_posttransaction(Txid([0x01; 32]), false), + "POST".to_string(), + "/v2/transactions".to_string(), + ), + // errors without error messages + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(400, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(401, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(402, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(403, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(404, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(500, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(503, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(502, "".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + // errors with specific messages + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(400, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(401, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(402, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(403, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(404, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(500, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(503, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ( + StacksHttpResponse::new_empty_error(&*http_error_from_code_and_text(502, "foo".into())), + "GET".to_string(), + "/v2/neighbors".to_string(), + ), + ]; + let expected_http_preambles = vec![ + // length is known + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(serde_json::to_string(&test_neighbors_info).unwrap().len() as u32), + HttpContentType::JSON, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(test_block_info.serialize_to_vec().len() as u32), + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some(test_microblock_info_bytes.len() as u32), + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + Some((Txid([0x01; 32]).to_hex().len() + 2) as u32), + HttpContentType::JSON, + true, + ), + // length is unknown + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::JSON, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::Bytes, + true, + ), + HttpResponsePreamble::new( + HttpVersion::Http11, + 200, + "OK".to_string(), + None, + HttpContentType::JSON, + true, + ), + // errors + HttpResponsePreamble::error_text(400, http_reason(400), ""), + HttpResponsePreamble::error_text(401, http_reason(401), ""), + HttpResponsePreamble::error_text(402, http_reason(402), ""), + HttpResponsePreamble::error_text(403, http_reason(403), ""), + HttpResponsePreamble::error_text(404, http_reason(404), ""), + HttpResponsePreamble::error_text(500, http_reason(500), ""), + HttpResponsePreamble::error_text(503, http_reason(503), ""), + // generic error + HttpResponsePreamble::error_text(502, http_reason(502), ""), + // errors with messages + HttpResponsePreamble::error_text(400, http_reason(400), "foo"), + HttpResponsePreamble::error_text(401, http_reason(401), "foo"), + HttpResponsePreamble::error_text(402, http_reason(402), "foo"), + HttpResponsePreamble::error_text(403, http_reason(403), "foo"), + HttpResponsePreamble::error_text(404, http_reason(404), "foo"), + HttpResponsePreamble::error_text(500, http_reason(500), "foo"), + HttpResponsePreamble::error_text(503, http_reason(503), "foo"), + // generic error + HttpResponsePreamble::error_text(502, http_reason(502), "foo"), + ]; + + let expected_http_bodies = vec![ + // with content-length + serde_json::to_string(&test_neighbors_info) + .unwrap() + .as_bytes() + .to_vec(), + test_block_info.serialize_to_vec(), + test_microblock_info_bytes.clone(), + Txid([0x1; 32]).to_hex().as_bytes().to_vec(), + // with transfer-encoding: chunked + serde_json::to_string(&test_neighbors_info) + .unwrap() + .as_bytes() + .to_vec(), + test_block_info.serialize_to_vec(), + test_microblock_info_bytes.clone(), + Txid([0x1; 32]).to_hex().as_bytes().to_vec(), + // errors + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + // errors with messages + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + "foo".as_bytes().to_vec(), + ]; + + for ((test, request_verb, request_path), (expected_http_preamble, _expected_http_body)) in + tests.iter().zip( + expected_http_preambles + .iter() + .zip(expected_http_bodies.iter()), + ) + { + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + let mut bytes = vec![]; + test_debug!("write body:\n{:?}\n", test); + + http.write_message(&mut bytes, &StacksHttpMessage::Response((*test).clone())) + .unwrap(); + + http.set_response_handler(request_verb, request_path); + let (mut preamble, offset) = match http.read_preamble(&bytes) { + Ok((p, o)) => (p, o), + Err(e) => { + test_debug!("first 4096 bytes:\n{:?}\n", &bytes[0..].to_vec()); + test_debug!("error: {:?}", &e); + assert!(false); + unreachable!(); + } + }; + + test_debug!( + "{} {}: read preamble of {} bytes\n{:?}\n", + request_verb, + request_path, + offset, + preamble + ); + + let (mut message, _total_len) = if expected_http_preamble.is_chunked() { + let (msg_opt, len) = http + .stream_payload(&preamble, &mut &bytes[offset..]) + .unwrap(); + (msg_opt.unwrap().0, len) + } else { + http.read_payload(&preamble, &bytes[offset..]).unwrap() + }; + + test_debug!("got message\n{:?}\n", &message); + + // check everything in the parsed preamble except for the extra headers + match preamble { + StacksHttpPreamble::Response(ref mut req) => { + assert_eq!(req.headers.len(), 5); + assert!(req.headers.get("access-control-allow-headers").is_some()); + assert!(req.headers.get("access-control-allow-methods").is_some()); + assert!(req.headers.get("access-control-allow-origin").is_some()); + assert!(req.headers.get("server").is_some()); + assert!(req.headers.get("date").is_some()); + req.headers.clear(); + } + StacksHttpPreamble::Request(_) => { + panic!("parsed a request"); + } + } + + assert_eq!( + preamble, + StacksHttpPreamble::Response((*expected_http_preamble).clone()) + ); + + // note that message's headers contain cors headers and the like, which we don't synthesize + // here + match message { + StacksHttpMessage::Response(ref mut response) => response.clear_headers(), + _ => { + panic!("Not an HTTP response"); + } + } + assert_eq!(message, StacksHttpMessage::Response((*test).clone())); + assert_eq!(http.num_pending(), 0); + } +} + +#[test] +fn test_http_response_type_codec_err() { + let request_paths = vec![ + ( + "GET", + "/v2/blocks/1111111111111111111111111111111111111111111111111111111111111111", + ), + ("POST", "/v2/transactions"), + ("GET", "/v2/neighbors"), + ("GET", "/v2/neighbors"), + ("GET", "/v2/neighbors"), + ]; + let bad_request_payloads = vec![ + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 2\r\n\r\nab", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 4\r\n\r\n\"ab\"", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\n{", + "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nContent-length: 1\r\n\r\na", + "HTTP/1.1 400 Bad Request\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/octet-stream\r\nContent-length: 2\r\n\r\n{}", + ]; + let expected_bad_request_payload_errors = vec![ + "Invalid content-type", + "bad length 2 for hex string", + "Not enough bytes", + "Failed to parse", + "expected text/plain", + ]; + for (test, (expected_error, (request_verb, request_path))) in bad_request_payloads.iter().zip( + expected_bad_request_payload_errors + .iter() + .zip(request_paths), + ) { + test_debug!( + "Expect failure:\n{}\nExpected error: '{}'", + test, + expected_error + ); + + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + http.set_response_handler(request_verb, request_path); + + let (preamble, offset) = http.read_preamble(test.as_bytes()).unwrap(); + let e = http.read_payload(&preamble, &test.as_bytes()[offset..]); + let errstr = format!("{:?}", &e); + assert!(e.is_err()); + assert!( + e.unwrap_err().to_string().find(expected_error).is_some(), + "{}", + errstr + ); + } +} + +#[test] +fn test_http_duplicate_concurrent_streamed_response_fails() { + // do not permit multiple in-flight chunk-encoded HTTP responses with the same request ID. + let valid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n37\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; + let invalid_neighbors_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n10\r\nxxxxxxxxxxxxxxxx\r\n0\r\n\r\n"; + let invalid_chunked_response = "HTTP/1.1 200 OK\r\nServer: stacks/v2.0\r\nX-Request-Id: 123\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n38\r\n{\"bootstrap\":[],\"sample\":[],\"inbound\":[],\"outbound\":[]}\r\n0\r\n\r\n"; + + let mut http = StacksHttp::new( + "127.0.0.1:20443".parse().unwrap(), + &ConnectionOptions::default(), + ); + + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(valid_neighbors_response.as_bytes()) + .unwrap(); + assert_eq!(http.num_pending(), 1); + + // can't do this twice + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // finish reading the body + let msg = http + .stream_payload( + &preamble, + &mut &valid_neighbors_response.as_bytes()[offset..], + ) + .unwrap(); + match msg { + (Some((StacksHttpMessage::Response(response), _)), _) => assert_eq!( + response.decode_rpc_neighbors().unwrap(), + RPCNeighborsInfo { + bootstrap: vec![], + sample: vec![], + inbound: vec![], + outbound: vec![] + } + ), + _ => { + error!("Got {:?}", &msg); + assert!(false); + } + } + assert_eq!(http.num_pending(), 0); + + // can read the preamble again, but only once + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(invalid_neighbors_response.as_bytes()) + .unwrap(); + assert_eq!(http.num_pending(), 1); + + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // reading a corrupt body unlocks the ability to read the preamble again + let res = http.stream_payload( + &preamble, + &mut &invalid_neighbors_response.as_bytes()[offset..], + ); + assert!(res.unwrap_err().to_string().find("JSON").is_some()); + assert_eq!(http.num_pending(), 0); + + // can read the premable again, but only once + http.set_response_handler("GET", "/v2/neighbors"); + let (preamble, offset) = http + .read_preamble(invalid_chunked_response.as_bytes()) + .unwrap(); + + http.set_response_handler("GET", "/v2/neighbors"); + let res = http.read_preamble(valid_neighbors_response.as_bytes()); + + assert!(res.is_err()); + assert!(res.unwrap_err().to_string().find("in progress").is_some()); + + // reading a corrupt chunk stream unlocks the ability to read the preamble again + let res = http.stream_payload( + &preamble, + &mut &invalid_chunked_response.as_bytes()[offset..], + ); + assert!(res + .unwrap_err() + .to_string() + .find("Invalid chunk trailer") + .is_some()); + assert_eq!(http.num_pending(), 0); +} + +#[test] +fn test_http_parse_proof_tip_query() { + let query_txt = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392" + ) + .unwrap() + ), + _ => panic!(), + } + + // last parseable tip is taken + let query_txt_dup = "tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_dup)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1" + ) + .unwrap() + ), + _ => panic!(), + } + + // last parseable tip is taken + let query_txt_dup = "tip=bad&tip=7070f213d719143d6045e08fd80f85014a161f8bbd3a42d1251576740826a392&tip=03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_dup)) + .tip_request(); + match tip_req { + TipRequest::SpecificTip(tip) => assert_eq!( + tip, + StacksBlockId::from_hex( + "03e26bd68a8722f8b3861e2058edcafde094ad059e152754986c3573306698f1" + ) + .unwrap() + ), + _ => panic!(), + } + + // tip can be skipped + let query_txt_bad = "tip=bad"; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_bad)) + .tip_request(); + assert_eq!(tip_req, TipRequest::UseLatestAnchoredTip); + + // tip can be skipped + let query_txt_none = "tip="; + let tip_req = HttpRequestContents::new() + .query_string(Some(query_txt_none)) + .tip_request(); + assert_eq!(tip_req, TipRequest::UseLatestAnchoredTip); +} + +#[test] +fn test_http_parse_proof_request_query() { + let query_txt = ""; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=0"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); + + let query_txt = "proof=0&proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); + + let query_txt = "proof=1&proof=0"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=oops"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(!proof_req); + + let query_txt = "proof=oops&proof=1"; + let proof_req = HttpRequestContents::new() + .query_string(Some(query_txt)) + .get_with_proof(); + assert!(proof_req); +} diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a5cf528989..71aff78ed0 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -14,5 +14,5 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod httpcore; pub mod neighbors; -pub mod stream; diff --git a/stackslib/src/net/tests/stream.rs b/stackslib/src/net/tests/stream.rs deleted file mode 100644 index 31649fbf91..0000000000 --- a/stackslib/src/net/tests/stream.rs +++ /dev/null @@ -1,699 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, StacksPrivateKey}; - -use crate::chainstate::stacks::db::blocks::test::*; -use crate::chainstate::stacks::db::test::instantiate_chainstate; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, -}; -use crate::core::MemPoolDB; -use crate::net::{ExtendedStacksHeader, StreamCursor}; -use crate::util_lib::db::DBConn; - -fn stream_one_header_to_vec( - blocks_conn: &DBConn, - blocks_path: &str, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - if let StreamCursor::Headers(ref mut stream) = stream { - let mut bytes = vec![]; - StacksChainState::stream_one_header(blocks_conn, blocks_path, &mut bytes, stream, count) - .map(|nr| { - assert_eq!(bytes.len(), nr as usize); - - // truncate trailing ',' if it exists - let len = bytes.len(); - if len > 0 { - if bytes[len - 1] == ',' as u8 { - let _ = bytes.pop(); - } - } - bytes - }) - } else { - panic!("not a header stream"); - } -} - -fn stream_one_staging_microblock_to_vec( - blocks_conn: &DBConn, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - if let StreamCursor::Microblocks(ref mut stream) = stream { - let mut bytes = vec![]; - StacksChainState::stream_one_microblock(blocks_conn, &mut bytes, stream, count).map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) - } else { - panic!("not a microblock stream"); - } -} - -fn stream_chunk_to_vec( - blocks_path: &str, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - if let StreamCursor::Block(ref mut stream) = stream { - let mut bytes = vec![]; - StacksChainState::stream_data_from_chunk_store(blocks_path, &mut bytes, stream, count).map( - |nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }, - ) - } else { - panic!("not a block stream"); - } -} - -fn stream_headers_to_vec( - chainstate: &mut StacksChainState, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - let mempool = MemPoolDB::open_test( - chainstate.mainnet, - chainstate.chain_id, - &chainstate.root_path, - ) - .unwrap(); - let mut bytes = vec![]; - stream - .stream_to(&mempool, chainstate, &mut bytes, count) - .map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) -} - -fn stream_unconfirmed_microblocks_to_vec( - chainstate: &mut StacksChainState, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - let mempool = MemPoolDB::open_test( - chainstate.mainnet, - chainstate.chain_id, - &chainstate.root_path, - ) - .unwrap(); - let mut bytes = vec![]; - stream - .stream_to(&mempool, chainstate, &mut bytes, count) - .map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) -} - -fn stream_confirmed_microblocks_to_vec( - chainstate: &mut StacksChainState, - stream: &mut StreamCursor, - count: u64, -) -> Result, chainstate_error> { - let mempool = MemPoolDB::open_test( - chainstate.mainnet, - chainstate.chain_id, - &chainstate.root_path, - ) - .unwrap(); - let mut bytes = vec![]; - stream - .stream_to(&mempool, chainstate, &mut bytes, count) - .map(|nr| { - assert_eq!(bytes.len(), nr as usize); - bytes - }) -} - -#[test] -fn stacks_db_stream_blocks() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let block = make_16k_block(&privk); - - let consensus_hash = ConsensusHash([2u8; 20]); - let parent_consensus_hash = ConsensusHash([1u8; 20]); - let index_block_header = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); - - // can't stream a non-existant block - let mut stream = StreamCursor::new_block(index_block_header.clone()); - assert!(stream_chunk_to_vec(&chainstate.blocks_path, &mut stream, 123).is_err()); - - // stream unmodified - let stream_2 = StreamCursor::new_block(index_block_header.clone()); - assert_eq!(stream, stream_2); - - // store block to staging - store_staging_block( - &mut chainstate, - &consensus_hash, - &block, - &parent_consensus_hash, - 1, - 2, - ); - - // stream it back - let mut all_block_bytes = vec![]; - loop { - let mut next_bytes = stream_chunk_to_vec(&chainstate.blocks_path, &mut stream, 16).unwrap(); - if next_bytes.len() == 0 { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - all_block_bytes.len() - ); - all_block_bytes.append(&mut next_bytes); - } - - // should decode back into the block - let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); - assert_eq!(staging_block, block); - - // accept it - set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); - - // can still stream it - let mut stream = StreamCursor::new_block(index_block_header.clone()); - - // stream from chunk store - let mut all_block_bytes = vec![]; - loop { - let mut next_bytes = stream_chunk_to_vec(&chainstate.blocks_path, &mut stream, 16).unwrap(); - if next_bytes.len() == 0 { - break; - } - test_debug!( - "Got {} more bytes from chunkstore; add to {} total", - next_bytes.len(), - all_block_bytes.len() - ); - all_block_bytes.append(&mut next_bytes); - } - - // should decode back into the block - let staging_block = StacksBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); - assert_eq!(staging_block, block); -} - -#[test] -fn stacks_db_stream_headers() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let mut blocks: Vec = vec![]; - let mut blocks_index_hashes: Vec = vec![]; - - // make a linear stream - for i in 0..32 { - let mut block = make_empty_coinbase_block(&privk); - - if i == 0 { - block.header.total_work.work = 1; - block.header.total_work.burn = 1; - } - if i > 0 { - block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); - block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; - block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 1; - } - - let consensus_hash = ConsensusHash([((i + 1) as u8); 20]); - let parent_consensus_hash = ConsensusHash([(i as u8); 20]); - - store_staging_block( - &mut chainstate, - &consensus_hash, - &block, - &parent_consensus_hash, - i as u64, - i as u64, - ); - - blocks_index_hashes.push(StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash(), - )); - blocks.push(block); - } - - let mut blocks_fork = blocks[0..16].to_vec(); - let mut blocks_fork_index_hashes = blocks_index_hashes[0..16].to_vec(); - - // make a stream that branches off - for i in 16..32 { - let mut block = make_empty_coinbase_block(&privk); - - if i == 16 { - block.header.parent_block = blocks.get(i - 1).unwrap().block_hash(); - block.header.total_work.work = blocks.get(i - 1).unwrap().header.total_work.work + 1; - block.header.total_work.burn = blocks.get(i - 1).unwrap().header.total_work.burn + 2; - } else { - block.header.parent_block = blocks_fork.get(i - 1).unwrap().block_hash(); - block.header.total_work.work = - blocks_fork.get(i - 1).unwrap().header.total_work.work + 1; - block.header.total_work.burn = - blocks_fork.get(i - 1).unwrap().header.total_work.burn + 2; - } - - let consensus_hash = ConsensusHash([((i + 1) as u8) | 0x80; 20]); - let parent_consensus_hash = if i == 16 { - ConsensusHash([(i as u8); 20]) - } else { - ConsensusHash([(i as u8) | 0x80; 20]) - }; - - store_staging_block( - &mut chainstate, - &consensus_hash, - &block, - &parent_consensus_hash, - i as u64, - i as u64, - ); - - blocks_fork_index_hashes.push(StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash(), - )); - blocks_fork.push(block); - } - - // can't stream a non-existant header - assert!(StreamCursor::new_headers(&chainstate, &StacksBlockId([0x11; 32]), 1).is_err()); - - // stream back individual headers - for i in 0..blocks.len() { - let mut stream = - StreamCursor::new_headers(&chainstate, &blocks_index_hashes[i], 1).unwrap(); - let mut next_header_bytes = vec![]; - loop { - // torture test - let mut next_bytes = stream_one_header_to_vec( - &chainstate.db(), - &chainstate.blocks_path, - &mut stream, - 25, - ) - .unwrap(); - if next_bytes.len() == 0 { - break; - } - next_header_bytes.append(&mut next_bytes); - } - test_debug!("Got {} total bytes", next_header_bytes.len()); - let header: ExtendedStacksHeader = - serde_json::from_reader(&mut &next_header_bytes[..]).unwrap(); - - assert_eq!(header.consensus_hash, ConsensusHash([(i + 1) as u8; 20])); - assert_eq!(header.header, blocks[i].header); - - if i > 0 { - assert_eq!(header.parent_block_id, blocks_index_hashes[i - 1]); - } - } - - // stream back a run of headers - let block_expected_headers: Vec = - blocks.iter().rev().map(|blk| blk.header.clone()).collect(); - - let block_expected_index_hashes: Vec = blocks_index_hashes - .iter() - .rev() - .map(|idx| idx.clone()) - .collect(); - - let block_fork_expected_headers: Vec = blocks_fork - .iter() - .rev() - .map(|blk| blk.header.clone()) - .collect(); - - let block_fork_expected_index_hashes: Vec = blocks_fork_index_hashes - .iter() - .rev() - .map(|idx| idx.clone()) - .collect(); - - // get them all -- ask for more than there is - let mut stream = - StreamCursor::new_headers(&chainstate, blocks_index_hashes.last().unwrap(), 4096).unwrap(); - let header_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 1024 * 1024).unwrap(); - - eprintln!( - "headers: {}", - String::from_utf8(header_bytes.clone()).unwrap() - ); - let headers: Vec = - serde_json::from_reader(&mut &header_bytes[..]).unwrap(); - - assert_eq!(headers.len(), block_expected_headers.len()); - for ((i, h), eh) in headers - .iter() - .enumerate() - .zip(block_expected_headers.iter()) - { - assert_eq!(h.header, *eh); - assert_eq!(h.consensus_hash, ConsensusHash([(32 - i) as u8; 20])); - if i + 1 < block_expected_index_hashes.len() { - assert_eq!(h.parent_block_id, block_expected_index_hashes[i + 1]); - } - } - - let mut stream = - StreamCursor::new_headers(&chainstate, blocks_fork_index_hashes.last().unwrap(), 4096) - .unwrap(); - let header_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 1024 * 1024).unwrap(); - let fork_headers: Vec = - serde_json::from_reader(&mut &header_bytes[..]).unwrap(); - - assert_eq!(fork_headers.len(), block_fork_expected_headers.len()); - for ((i, h), eh) in fork_headers - .iter() - .enumerate() - .zip(block_fork_expected_headers.iter()) - { - let consensus_hash = if i >= 16 { - ConsensusHash([((32 - i) as u8); 20]) - } else { - ConsensusHash([((32 - i) as u8) | 0x80; 20]) - }; - - assert_eq!(h.header, *eh); - assert_eq!(h.consensus_hash, consensus_hash); - if i + 1 < block_fork_expected_index_hashes.len() { - assert_eq!(h.parent_block_id, block_fork_expected_index_hashes[i + 1]); - } - } - - assert_eq!(fork_headers[16..32], headers[16..32]); - - // ask for only a few - let mut stream = - StreamCursor::new_headers(&chainstate, blocks_index_hashes.last().unwrap(), 10).unwrap(); - let mut header_bytes = vec![]; - loop { - // torture test - let mut next_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 17).unwrap(); - if next_bytes.len() == 0 { - break; - } - header_bytes.append(&mut next_bytes); - } - - eprintln!( - "header bytes: {}", - String::from_utf8(header_bytes.clone()).unwrap() - ); - - let headers: Vec = - serde_json::from_reader(&mut &header_bytes[..]).unwrap(); - - assert_eq!(headers.len(), 10); - for (i, hdr) in headers.iter().enumerate() { - assert_eq!(hdr.header, block_expected_headers[i]); - assert_eq!(hdr.parent_block_id, block_expected_index_hashes[i + 1]); - } - - // ask for only a few - let mut stream = - StreamCursor::new_headers(&chainstate, blocks_fork_index_hashes.last().unwrap(), 10) - .unwrap(); - let mut header_bytes = vec![]; - loop { - // torture test - let mut next_bytes = stream_headers_to_vec(&mut chainstate, &mut stream, 17).unwrap(); - if next_bytes.len() == 0 { - break; - } - header_bytes.append(&mut next_bytes); - } - let headers: Vec = - serde_json::from_reader(&mut &header_bytes[..]).unwrap(); - - assert_eq!(headers.len(), 10); - for (i, hdr) in headers.iter().enumerate() { - assert_eq!(hdr.header, block_fork_expected_headers[i]); - assert_eq!(hdr.parent_block_id, block_fork_expected_index_hashes[i + 1]); - } -} - -#[test] -fn stacks_db_stream_staging_microblocks() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let block = make_empty_coinbase_block(&privk); - let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); - mblocks.truncate(15); - - let consensus_hash = ConsensusHash([2u8; 20]); - let parent_consensus_hash = ConsensusHash([1u8; 20]); - let index_block_header = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); - - // can't stream a non-existant microblock - if let Err(chainstate_error::NoSuchBlockError) = - StreamCursor::new_microblock_confirmed(&chainstate, index_block_header.clone()) - { - } else { - panic!("Opened nonexistant microblock"); - } - - if let Err(chainstate_error::NoSuchBlockError) = - StreamCursor::new_microblock_unconfirmed(&chainstate, index_block_header.clone(), 0) - { - } else { - panic!("Opened nonexistant microblock"); - } - - // store microblocks to staging and stream them back - for (i, mblock) in mblocks.iter().enumerate() { - store_staging_microblock( - &mut chainstate, - &consensus_hash, - &block.block_hash(), - mblock, - ); - - // read back all the data we have so far, block-by-block - let mut staging_mblocks = vec![]; - for j in 0..(i + 1) { - let mut next_mblock_bytes = vec![]; - let mut stream = StreamCursor::new_microblock_unconfirmed( - &chainstate, - index_block_header.clone(), - j as u16, - ) - .unwrap(); - loop { - let mut next_bytes = - stream_one_staging_microblock_to_vec(&chainstate.db(), &mut stream, 4096) - .unwrap(); - if next_bytes.len() == 0 { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - next_mblock_bytes.len() - ); - next_mblock_bytes.append(&mut next_bytes); - } - test_debug!("Got {} total bytes", next_mblock_bytes.len()); - - // should deserialize to a microblock - let staging_mblock = - StacksMicroblock::consensus_deserialize(&mut &next_mblock_bytes[..]).unwrap(); - staging_mblocks.push(staging_mblock); - } - - assert_eq!(staging_mblocks.len(), mblocks[0..(i + 1)].len()); - for j in 0..(i + 1) { - test_debug!("check {}", j); - assert_eq!(staging_mblocks[j], mblocks[j]) - } - - // can also read partial stream in one shot, from any seq - for k in 0..(i + 1) { - test_debug!("start at seq {}", k); - let mut staging_mblock_bytes = vec![]; - let mut stream = StreamCursor::new_microblock_unconfirmed( - &chainstate, - index_block_header.clone(), - k as u16, - ) - .unwrap(); - loop { - let mut next_bytes = - stream_unconfirmed_microblocks_to_vec(&mut chainstate, &mut stream, 4096) - .unwrap(); - if next_bytes.len() == 0 { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - staging_mblock_bytes.len() - ); - staging_mblock_bytes.append(&mut next_bytes); - } - - test_debug!("Got {} total bytes", staging_mblock_bytes.len()); - - // decode stream - let staging_mblocks = decode_microblock_stream(&staging_mblock_bytes); - - assert_eq!(staging_mblocks.len(), mblocks[k..(i + 1)].len()); - for j in 0..staging_mblocks.len() { - test_debug!("check {}", j); - assert_eq!(staging_mblocks[j], mblocks[k + j]) - } - } - } -} - -#[test] -fn stacks_db_stream_confirmed_microblocks() { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let privk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let block = make_empty_coinbase_block(&privk); - let mut mblocks = make_sample_microblock_stream(&privk, &block.block_hash()); - mblocks.truncate(5); - - let mut child_block = make_empty_coinbase_block(&privk); - child_block.header.parent_block = block.block_hash(); - child_block.header.parent_microblock = mblocks.last().as_ref().unwrap().block_hash(); - child_block.header.parent_microblock_sequence = - mblocks.last().as_ref().unwrap().header.sequence; - - let consensus_hash = ConsensusHash([2u8; 20]); - let parent_consensus_hash = ConsensusHash([1u8; 20]); - let child_consensus_hash = ConsensusHash([3u8; 20]); - - let index_block_header = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()); - - // store microblocks to staging - for (i, mblock) in mblocks.iter().enumerate() { - store_staging_microblock( - &mut chainstate, - &consensus_hash, - &block.block_hash(), - mblock, - ); - } - - // store block to staging - store_staging_block( - &mut chainstate, - &consensus_hash, - &block, - &parent_consensus_hash, - 1, - 2, - ); - - // store child block to staging - store_staging_block( - &mut chainstate, - &child_consensus_hash, - &child_block, - &consensus_hash, - 1, - 2, - ); - - // accept it - set_block_processed(&mut chainstate, &consensus_hash, &block.block_hash(), true); - set_block_processed( - &mut chainstate, - &child_consensus_hash, - &child_block.block_hash(), - true, - ); - - for i in 0..mblocks.len() { - // set different parts of this stream as confirmed - set_microblocks_processed( - &mut chainstate, - &child_consensus_hash, - &child_block.block_hash(), - &mblocks[i].block_hash(), - ); - - // verify that we can stream everything - let microblock_index_header = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &mblocks[i].block_hash()); - let mut stream = - StreamCursor::new_microblock_confirmed(&chainstate, microblock_index_header.clone()) - .unwrap(); - - let mut confirmed_mblock_bytes = vec![]; - loop { - let mut next_bytes = - stream_confirmed_microblocks_to_vec(&mut chainstate, &mut stream, 16).unwrap(); - if next_bytes.len() == 0 { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - confirmed_mblock_bytes.len() - ); - confirmed_mblock_bytes.append(&mut next_bytes); - } - - // decode stream (should be length-prefixed) - let mut confirmed_mblocks = - Vec::::consensus_deserialize(&mut &confirmed_mblock_bytes[..]) - .unwrap(); - - confirmed_mblocks.reverse(); - - assert_eq!(confirmed_mblocks.len(), mblocks[0..(i + 1)].len()); - for j in 0..(i + 1) { - test_debug!("check {}", j); - assert_eq!(confirmed_mblocks[j], mblocks[j]) - } - } -} diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 30ebfb6984..b3c1a1a9d4 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -25,7 +25,8 @@ use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborKey, PeerAddress}; +use stacks::net::{Neighbor, NeighborKey}; +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 70aca0eb7f..517f080cb6 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -187,15 +187,17 @@ use stacks::net::db::{LocalPeer, PeerDB}; use stacks::net::dns::{DNSClient, DNSResolver}; use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; -use stacks::net::rpc::RPCHandlerArgs; use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; -use stacks::net::{Error as NetError, NetworkResult, PeerAddress, PeerNetworkComms, ServiceFlags}; +use stacks::net::{ + Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, +}; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, VRFSeed, }; +use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum}; use stacks_common::util::secp256k1::Secp256k1PrivateKey; diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 0e9f5aa117..b63b4ddbc1 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -1,6 +1,5 @@ use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; -use std::default::Default; use std::net::SocketAddr; use std::thread::JoinHandle; use std::{env, thread, time}; @@ -37,11 +36,11 @@ use stacks::cost_estimates::UnitEstimator; use stacks::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; use stacks::net::db::PeerDB; use stacks::net::p2p::PeerNetwork; -use stacks::net::rpc::RPCHandlerArgs; use stacks::net::stackerdb::StackerDBs; -use stacks::net::{Error as NetError, PeerAddress}; +use stacks::net::{Error as NetError, RPCHandlerArgs}; use stacks::util_lib::strings::UrlString; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, TrieHash, VRFSeed}; +use stacks_common::types::net::PeerAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha256Sum; use stacks_common::util::secp256k1::Secp256k1PrivateKey; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index ef20119a85..c2057d6430 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -25,10 +25,10 @@ use stacks::core::{ StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; -use stacks::net::{ - AccountEntryResponse, CallReadOnlyRequestBody, ContractSrcResponse, - GetIsTraitImplementedResponse, -}; +use stacks::net::api::callreadonly::CallReadOnlyRequestBody; +use stacks::net::api::getaccount::AccountEntryResponse; +use stacks::net::api::getcontractsrc::ContractSrcResponse; +use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 645476f2cd..e9664e46dc 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -39,11 +39,17 @@ use stacks::core::{ BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; -use stacks::net::atlas::{AtlasConfig, AtlasDB, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST}; -use stacks::net::{ - AccountEntryResponse, ContractSrcResponse, GetAttachmentResponse, GetAttachmentsInvResponse, - PostTransactionRequestBody, RPCFeeEstimateResponse, RPCPeerInfoData, RPCPoxInfoData, - StacksBlockAcceptedData, UnconfirmedTransactionResponse, +use stacks::net::api::getaccount::AccountEntryResponse; +use stacks::net::api::getcontractsrc::ContractSrcResponse; +use stacks::net::api::getinfo::RPCPeerInfoData; +use stacks::net::api::getpoxinfo::RPCPoxInfoData; +use stacks::net::api::gettransaction_unconfirmed::UnconfirmedTransactionResponse; +use stacks::net::api::postblock::StacksBlockAcceptedData; +use stacks::net::api::postfeerate::RPCFeeEstimateResponse; +use stacks::net::api::posttransaction::PostTransactionRequestBody; +use stacks::net::atlas::{ + AtlasConfig, AtlasDB, GetAttachmentResponse, GetAttachmentsInvResponse, + MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, }; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 6c9bc48886..f064c5ed84 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -199,8 +199,8 @@ fn test_stackerdb_dkg() { .init(); // Generate Signer Data - let num_signers: u32 = 16; - let num_keys: u32 = 40; + let num_signers: u32 = 100; + let num_keys: u32 = 4000; let publisher_private_key = StacksPrivateKey::new(); let signer_stacks_private_keys = (0..num_signers) .map(|_| StacksPrivateKey::new())