diff --git a/src/daemon.rs b/src/daemon.rs index f04045d0..4c398f32 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -137,6 +137,34 @@ pub struct MempoolAcceptResult { reject_reason: Option, } +#[derive(Serialize, Deserialize, Debug)] +struct MempoolFeesSubmitPackage { + base: f64, + #[serde(rename = "effective-feerate")] + effective_feerate: Option, + #[serde(rename = "effective-includes")] + effective_includes: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct SubmitPackageResult { + package_msg: String, + #[serde(rename = "tx-results")] + tx_results: HashMap, + #[serde(rename = "replaced-transactions")] + replaced_transactions: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct TxResult { + txid: String, + #[serde(rename = "other-wtxid")] + other_wtxid: Option, + vsize: Option, + fees: Option, + error: Option, +} + pub trait CookieGetter: Send + Sync { fn get(&self) -> Result>; } @@ -621,6 +649,25 @@ impl Daemon { .chain_err(|| "invalid testmempoolaccept reply") } + pub fn submit_package( + &self, + txhex: Vec, + maxfeerate: Option, + maxburnamount: Option, + ) -> Result { + let params = match (maxfeerate, maxburnamount) { + (Some(rate), Some(burn)) => { + json!([txhex, format!("{:.8}", rate), format!("{:.8}", burn)]) + } + (Some(rate), None) => json!([txhex, format!("{:.8}", rate)]), + (None, Some(burn)) => json!([txhex, null, format!("{:.8}", burn)]), + (None, None) => json!([txhex]), + }; + let result = self.request("submitpackage", params)?; + serde_json::from_value::(result) + .chain_err(|| "invalid submitpackage reply") + } + // Get estimated feerates for the provided confirmation targets using a batch RPC request // Missing estimates are logged but do not cause a failure, whatever is available is returned #[allow(clippy::float_cmp)] diff --git a/src/new_index/db.rs b/src/new_index/db.rs index 4fe9020c..98de2d30 100644 --- a/src/new_index/db.rs +++ b/src/new_index/db.rs @@ -73,6 +73,67 @@ impl<'a> Iterator for ReverseScanIterator<'a> { } } +pub struct ReverseScanGroupIterator<'a> { + iters: Vec>, + next_rows: Vec>, + value_offset: usize, + done: bool, +} + +impl<'a> ReverseScanGroupIterator<'a> { + pub fn new( + mut iters: Vec>, + value_offset: usize, + ) -> ReverseScanGroupIterator { + let mut next_rows: Vec> = Vec::with_capacity(iters.len()); + for iter in &mut iters { + let next = iter.next(); + next_rows.push(next); + } + let done = next_rows.iter().all(|row| row.is_none()); + ReverseScanGroupIterator { + iters, + next_rows, + value_offset, + done, + } + } +} + +impl<'a> Iterator for ReverseScanGroupIterator<'a> { + type Item = DBRow; + + fn next(&mut self) -> Option { + if self.done { + return None; + } + + let best_index = self + .next_rows + .iter() + .enumerate() + .max_by(|(a_index, a_opt), (b_index, b_opt)| match (a_opt, b_opt) { + (None, None) => a_index.cmp(b_index), + + (Some(_), None) => std::cmp::Ordering::Greater, + + (None, Some(_)) => std::cmp::Ordering::Less, + + (Some(a), Some(b)) => a.key[self.value_offset..].cmp(&(b.key[self.value_offset..])), + }) + .map(|(index, _)| index) + .unwrap_or(0); + + let best = self.next_rows[best_index].take(); + self.next_rows[best_index] = self.iters.get_mut(best_index)?.next(); + if self.next_rows.iter().all(|row| row.is_none()) { + self.done = true; + } + + best + } +} + #[derive(Debug)] pub struct DB { db: rocksdb::DB, @@ -140,6 +201,25 @@ impl DB { } } + pub fn iter_scan_group_reverse( + &self, + prefixes: impl Iterator, Vec)>, + value_offset: usize, + ) -> ReverseScanGroupIterator { + let iters = prefixes + .map(|(prefix, prefix_max)| { + let mut iter = self.db.raw_iterator(); + iter.seek_for_prev(prefix_max); + ReverseScanIterator { + prefix: prefix.to_vec(), + iter, + done: false, + } + }) + .collect(); + ReverseScanGroupIterator::new(iters, value_offset) + } + pub fn write(&self, mut rows: Vec, flush: DBFlush) { debug!( "writing {} rows to {:?}, flush={:?}", diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index d72e27e7..32d27c1f 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -177,6 +177,49 @@ impl Mempool { .collect() } + pub fn history_group( + &self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&Txid>, + limit: usize, + ) -> Vec { + let _timer = self + .latency + .with_label_values(&["history_group"]) + .start_timer(); + scripthashes + .iter() + .filter_map(|scripthash| self.history.get(&scripthash[..])) + .flat_map(|entries| entries.iter()) + .map(|e| e.get_txid()) + .unique() + // TODO seek directly to last seen tx without reading earlier rows + .skip_while(|txid| { + // skip until we reach the last_seen_txid + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .take(limit) + .map(|txid| self.txstore.get(&txid).expect("missing mempool tx")) + .cloned() + .collect() + } + + pub fn history_txids_iter_group<'a>( + &'a self, + scripthashes: &'a [[u8; 32]], + ) -> impl Iterator + 'a { + scripthashes + .iter() + .filter_map(move |scripthash| self.history.get(&scripthash[..])) + .flat_map(|entries| entries.iter()) + .map(|entry| entry.get_txid()) + .unique() + } + pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec { let _timer = self .latency diff --git a/src/new_index/query.rs b/src/new_index/query.rs index 3e314fd1..545f8b76 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -6,7 +6,7 @@ use std::time::{Duration, Instant}; use crate::chain::{Network, OutPoint, Transaction, TxOut, Txid}; use crate::config::Config; -use crate::daemon::{Daemon, MempoolAcceptResult}; +use crate::daemon::{Daemon, MempoolAcceptResult, SubmitPackageResult}; use crate::errors::*; use crate::new_index::{ChainQuery, Mempool, ScriptStats, SpendingInput, Utxo}; use crate::util::{is_spendable, BlockId, Bytes, TransactionStatus}; @@ -95,6 +95,15 @@ impl Query { self.daemon.test_mempool_accept(txhex, maxfeerate) } + pub fn submit_package( + &self, + txhex: Vec, + maxfeerate: Option, + maxburnamount: Option, + ) -> Result { + self.daemon.submit_package(txhex, maxfeerate, maxburnamount) + } + pub fn utxo(&self, scripthash: &[u8]) -> Result> { let mut utxos = self.chain.utxo( scripthash, diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index 089451bf..b2323cee 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -37,6 +37,8 @@ use crate::new_index::fetch::{start_fetcher, BlockEntry, FetchFrom}; #[cfg(feature = "liquid")] use crate::elements::{asset, peg}; +use super::db::ReverseScanGroupIterator; + const MIN_HISTORY_ITEMS_TO_CACHE: usize = 100; pub struct Store { @@ -505,34 +507,47 @@ impl ChainQuery { &TxHistoryRow::prefix_height(code, hash, start_height as u32), ) } - fn history_iter_scan_reverse(&self, code: u8, hash: &[u8]) -> ReverseScanIterator { + fn history_iter_scan_reverse( + &self, + code: u8, + hash: &[u8], + start_height: Option, + ) -> ReverseScanIterator { self.store.history_db.iter_scan_reverse( &TxHistoryRow::filter(code, hash), - &TxHistoryRow::prefix_end(code, hash), + &start_height.map_or(TxHistoryRow::prefix_end(code, hash), |start_height| { + TxHistoryRow::prefix_height_end(code, hash, start_height) + }), ) } - - pub fn summary( + fn history_iter_scan_group_reverse( &self, - scripthash: &[u8], - last_seen_txid: Option<&Txid>, - limit: usize, - ) -> Vec { - // scripthash lookup - self._summary(b'H', scripthash, last_seen_txid, limit) + code: u8, + hashes: &[[u8; 32]], + start_height: Option, + ) -> ReverseScanGroupIterator { + self.store.history_db.iter_scan_group_reverse( + hashes.iter().map(|hash| { + let prefix = TxHistoryRow::filter(code, &hash[..]); + let prefix_max = start_height + .map_or(TxHistoryRow::prefix_end(code, &hash[..]), |start_height| { + TxHistoryRow::prefix_height_end(code, &hash[..], start_height) + }); + (prefix, prefix_max) + }), + 33, + ) } - fn _summary( + fn collate_summaries( &self, - code: u8, - hash: &[u8], + iter: impl Iterator, last_seen_txid: Option<&Txid>, limit: usize, ) -> Vec { - let _timer_scan = self.start_timer("address_summary"); - let rows = self - .history_iter_scan_reverse(code, hash) - .map(TxHistoryRow::from_row) + // collate utxo funding/spending events by transaction + + let rows = iter .map(|row| (row.get_txid(), row.key.txinfo, row.key.tx_position)) .skip_while(|(txid, _, _)| { // skip until we reach the last_seen_txid @@ -546,8 +561,6 @@ impl ChainQuery { self.tx_confirming_block(&txid) .map(|b| (txid, info, b.height, b.time, tx_position)) }); - - // collate utxo funding/spending events by transaction let mut map: HashMap = HashMap::new(); for (txid, info, height, time, tx_position) in rows { if !map.contains_key(&txid) && map.len() == limit { @@ -606,7 +619,6 @@ impl ChainQuery { _ => {} } } - let mut tx_summaries = map.into_values().collect::>(); tx_summaries.sort_by(|a, b| { if a.height == b.height { @@ -622,54 +634,93 @@ impl ChainQuery { tx_summaries } - pub fn history( + pub fn summary( &self, scripthash: &[u8], last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + // scripthash lookup + self._summary(b'H', scripthash, last_seen_txid, start_height, limit) + } + + fn _summary( + &self, + code: u8, + hash: &[u8], + last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + let _timer_scan = self.start_timer("address_summary"); + let rows = self + .history_iter_scan_reverse(code, hash, start_height) + .map(TxHistoryRow::from_row); + + self.collate_summaries(rows, last_seen_txid, limit) + } + + pub fn summary_group( + &self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + // scripthash lookup + let _timer_scan = self.start_timer("address_group_summary"); + let rows = self + .history_iter_scan_group_reverse(b'H', scripthashes, start_height) + .map(TxHistoryRow::from_row); + + self.collate_summaries(rows, last_seen_txid, limit) + } + + pub fn history<'a>( + &'a self, + scripthash: &[u8], + last_seen_txid: Option<&'a Txid>, + start_height: Option, limit: usize, - ) -> Vec<(Transaction, BlockId)> { + ) -> impl rayon::iter::ParallelIterator> + 'a { // scripthash lookup - self._history(b'H', scripthash, last_seen_txid, limit) + self._history(b'H', scripthash, last_seen_txid, start_height, limit) } pub fn history_txids_iter<'a>(&'a self, scripthash: &[u8]) -> impl Iterator + 'a { - self.history_iter_scan_reverse(b'H', scripthash) + self.history_iter_scan_reverse(b'H', scripthash, None) .map(|row| TxHistoryRow::from_row(row).get_txid()) .unique() } - fn _history( - &self, + fn _history<'a>( + &'a self, code: u8, hash: &[u8], - last_seen_txid: Option<&Txid>, + last_seen_txid: Option<&'a Txid>, + start_height: Option, limit: usize, - ) -> Vec<(Transaction, BlockId)> { + ) -> impl rayon::iter::ParallelIterator> + 'a { let _timer_scan = self.start_timer("history"); - let txs_conf = self - .history_iter_scan_reverse(code, hash) - .map(|row| TxHistoryRow::from_row(row).get_txid()) - // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? - .unique() - // TODO seek directly to last seen tx without reading earlier rows - .skip_while(|txid| { - // skip until we reach the last_seen_txid - last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) - }) - .skip(match last_seen_txid { - Some(_) => 1, // skip the last_seen_txid itself - None => 0, - }) - .filter_map(|txid| self.tx_confirming_block(&txid).map(|b| (txid, b))) - .take(limit) - .collect::>(); - self.lookup_txns(&txs_conf) - .expect("failed looking up txs in history index") - .into_iter() - .zip(txs_conf) - .map(|(tx, (_, blockid))| (tx, blockid)) - .collect() + self.lookup_txns( + self.history_iter_scan_reverse(code, hash, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? + .unique() + // TODO seek directly to last seen tx without reading earlier rows + .skip_while(move |txid| { + // skip until we reach the last_seen_txid + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .filter_map(move |txid| self.tx_confirming_block(&txid).map(|b| (txid, b))), + limit, + ) } pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, BlockId)> { @@ -687,6 +738,57 @@ impl ChainQuery { .collect() } + pub fn history_group<'a>( + &'a self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&'a Txid>, + start_height: Option, + limit: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a { + // scripthash lookup + self._history_group(b'H', scripthashes, last_seen_txid, start_height, limit) + } + + pub fn history_txids_iter_group( + &self, + scripthashes: &[[u8; 32]], + start_height: Option, + ) -> impl Iterator + '_ { + self.history_iter_scan_group_reverse(b'H', scripthashes, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + .unique() + } + + fn _history_group<'a>( + &'a self, + code: u8, + hashes: &[[u8; 32]], + last_seen_txid: Option<&'a Txid>, + start_height: Option, + limit: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a { + debug!("limit {} | last_seen {:?}", limit, last_seen_txid); + let _timer_scan = self.start_timer("history_group"); + + self.lookup_txns( + self.history_iter_scan_group_reverse(code, hashes, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? + .unique() + .skip_while(move |txid| { + // we already seeked to the last txid at this height + // now skip just past the last_seen_txid itself + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .filter_map(move |txid| self.tx_confirming_block(&txid).map(|b| (txid, b))), + limit, + ) + } + // TODO: avoid duplication with stats/stats_delta? pub fn utxo(&self, scripthash: &[u8], limit: usize, flush: DBFlush) -> Result> { let _timer = self.start_timer("utxo"); @@ -983,15 +1085,24 @@ impl ChainQuery { // TODO: can we pass txids as a "generic iterable"? // TODO: should also use a custom ThreadPoolBuilder? - pub fn lookup_txns(&self, txids: &[(Txid, BlockId)]) -> Result> { - let _timer = self.start_timer("lookup_txns"); + pub fn lookup_txns<'a, I>( + &'a self, + txids: I, + take: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a + where + I: Iterator + Send + rayon::iter::ParallelBridge + 'a, + { txids - .par_iter() - .map(|(txid, blockid)| { - self.lookup_txn(txid, Some(&blockid.hash)) - .chain_err(|| "missing tx") + .take(take) + .par_bridge() + .map(move |(txid, blockid)| -> Result<_> { + Ok(( + self.lookup_txn(&txid, Some(&blockid.hash)) + .chain_err(|| "missing tx")?, + blockid, + )) }) - .collect::>>() } pub fn lookup_txn(&self, txid: &Txid, blockhash: Option<&BlockHash>) -> Option { @@ -1102,13 +1213,19 @@ impl ChainQuery { } #[cfg(feature = "liquid")] - pub fn asset_history( - &self, - asset_id: &AssetId, - last_seen_txid: Option<&Txid>, + pub fn asset_history<'a>( + &'a self, + asset_id: &'a AssetId, + last_seen_txid: Option<&'a Txid>, limit: usize, - ) -> Vec<(Transaction, BlockId)> { - self._history(b'I', &asset_id.into_inner()[..], last_seen_txid, limit) + ) -> impl rayon::iter::ParallelIterator> + 'a { + self._history( + b'I', + &asset_id.into_inner()[..], + last_seen_txid, + None, + limit, + ) } #[cfg(feature = "liquid")] @@ -1661,6 +1778,12 @@ impl TxHistoryRow { bincode_util::serialize_big(&(code, full_hash(hash), height)).unwrap() } + // prefix representing the end of a given block (used for reverse scans) + fn prefix_height_end(code: u8, hash: &[u8], height: u32) -> Bytes { + // u16::MAX for the tx_position ensures we get all transactions at this height + bincode_util::serialize_big(&(code, full_hash(hash), height, u16::MAX)).unwrap() + } + pub fn into_row(self) -> DBRow { DBRow { key: bincode_util::serialize_big(&self.key).unwrap(), diff --git a/src/rest.rs b/src/rest.rs index 82dd3e68..8cdec5d1 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -19,6 +19,7 @@ use hex::{self, FromHexError}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Response, Server, StatusCode}; use prometheus::{HistogramOpts, HistogramVec}; +use rayon::iter::ParallelIterator; use tokio::sync::oneshot; use hyperlocal::UnixServerExt; @@ -42,6 +43,8 @@ use std::thread; use url::form_urlencoded; const ADDRESS_SEARCH_LIMIT: usize = 10; +// Limit to 300 addresses +const MULTI_ADDRESS_LIMIT: usize = 300; #[cfg(feature = "liquid")] const ASSETS_PER_PAGE: usize = 25; @@ -539,6 +542,27 @@ fn ttl_by_depth(height: Option, query: &Query) -> u32 { }) } +enum TxidLocation { + Mempool, + Chain(u32), // contains height + None, +} + +#[inline] +fn find_txid( + txid: &Txid, + mempool: &crate::new_index::Mempool, + chain: &crate::new_index::ChainQuery, +) -> TxidLocation { + if mempool.lookup_txn(txid).is_some() { + TxidLocation::Mempool + } else if let Some(block) = chain.tx_confirming_block(txid) { + TxidLocation::Chain(block.height as u32) + } else { + TxidLocation::None + } +} + /// Prepare transactions to be serialized in a JSON response /// /// Any transactions with missing prevouts will be filtered out of the response, rather than returned with incorrect data. @@ -887,34 +911,124 @@ fn handle_request( let mut txs = vec![]; - if let Some(given_txid) = &after_txid { - let is_mempool = query - .mempool() - .history_txids_iter(&script_hash[..]) - .any(|txid| given_txid == &txid); - let is_confirmed = if is_mempool { - false + let after_txid_location = if let Some(txid) = &after_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match after_txid_location { + TxidLocation::Mempool => { + txs.extend( + query + .mempool() + .history(&script_hash[..], after_txid.as_ref(), max_txs) + .into_iter() + .map(|tx| (tx, None)), + ); + None + } + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + if txs.len() < max_txs { + let after_txid_ref = if !txs.is_empty() { + // If there are any txs, we know mempool found the + // after_txid IF it exists... so always return None. + None } else { + after_txid.as_ref() + }; + txs.extend( query .chain() - .history_txids_iter(&script_hash[..]) - .any(|txid| given_txid == &txid) - }; - if !is_mempool && !is_confirmed { + .history( + &script_hash[..], + after_txid_ref, + confirmed_block_height, + max_txs - txs.len(), + ) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, + ); + } + + json_response(prepare_txs(txs, query, config), TTL_SHORT) + } + + (&Method::POST, Some(script_types @ &"addresses"), Some(&"txs"), None, None, None) + | (&Method::POST, Some(script_types @ &"scripthashes"), Some(&"txs"), None, None, None) => { + let script_type = match *script_types { + "addresses" => "address", + "scripthashes" => "scripthash", + _ => "", + }; + + if multi_address_too_long(&body) { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + if script_hashes.len() > MULTI_ADDRESS_LIMIT { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec<[u8; 32]> = script_hashes + .iter() + .filter_map(|script_str| { + to_scripthash(script_type, script_str, config.network_type).ok() + }) + .collect(); + + let max_txs = query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_default_max_mempool_txs); + let after_txid = query_params + .get("after_txid") + .and_then(|s| s.parse::().ok()); + + let mut txs = vec![]; + + let after_txid_location = if let Some(txid) = &after_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match after_txid_location { + TxidLocation::Mempool => { + txs.extend( + query + .mempool() + .history_group(&script_hashes, after_txid.as_ref(), max_txs) + .into_iter() + .map(|tx| (tx, None)), + ); + None + } + TxidLocation::None => { return Err(HttpError( StatusCode::UNPROCESSABLE_ENTITY, String::from("after_txid not found"), )); } - } - - txs.extend( - query - .mempool() - .history(&script_hash[..], after_txid.as_ref(), max_txs) - .into_iter() - .map(|tx| (tx, None)), - ); + TxidLocation::Chain(height) => Some(height), + }; if txs.len() < max_txs { let after_txid_ref = if !txs.is_empty() { @@ -927,9 +1041,14 @@ fn handle_request( txs.extend( query .chain() - .history(&script_hash[..], after_txid_ref, max_txs - txs.len()) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))), + .history_group( + &script_hashes, + after_txid_ref, + confirmed_block_height, + max_txs - txs.len(), + ) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, ); } @@ -961,10 +1080,9 @@ fn handle_request( let txs = query .chain() - .history(&script_hash[..], last_seen_txid.as_ref(), max_txs) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))) - .collect(); + .history(&script_hash[..], last_seen_txid.as_ref(), None, max_txs) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?; json_response(prepare_txs(txs, query, config), TTL_SHORT) } @@ -994,9 +1112,110 @@ fn handle_request( .unwrap_or(config.rest_default_max_address_summary_txs), ); - let summary = query - .chain() - .summary(&script_hash[..], last_seen_txid.as_ref(), max_txs); + let last_seen_txid_location = if let Some(txid) = &last_seen_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match last_seen_txid_location { + TxidLocation::Mempool => None, + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + let summary = query.chain().summary( + &script_hash[..], + last_seen_txid.as_ref(), + confirmed_block_height, + max_txs, + ); + + json_response(summary, TTL_SHORT) + } + ( + &Method::POST, + Some(script_types @ &"addresses"), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + None, + ) + | ( + &Method::POST, + Some(script_types @ &"scripthashes"), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + None, + ) => { + let script_type = match *script_types { + "addresses" => "address", + "scripthashes" => "scripthash", + _ => "", + }; + + if multi_address_too_long(&body) { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + if script_hashes.len() > MULTI_ADDRESS_LIMIT { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec<[u8; 32]> = script_hashes + .iter() + .filter_map(|script_str| { + to_scripthash(script_type, script_str, config.network_type).ok() + }) + .collect(); + + let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); + let max_txs = cmp::min( + config.rest_default_max_address_summary_txs, + query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_default_max_address_summary_txs), + ); + + let last_seen_txid_location = if let Some(txid) = &last_seen_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match last_seen_txid_location { + TxidLocation::Mempool => None, + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + let summary = query.chain().summary_group( + &script_hashes, + last_seen_txid.as_ref(), + confirmed_block_height, + max_txs, + ); json_response(summary, TTL_SHORT) } @@ -1255,6 +1474,56 @@ fn handle_request( json_response(result, TTL_SHORT) } + (&Method::POST, Some(&"txs"), Some(&"package"), None, None, None) => { + let txhexes: Vec = + serde_json::from_str(String::from_utf8(body.to_vec())?.as_str())?; + + if txhexes.len() > 25 { + Result::Err(HttpError::from( + "Exceeded maximum of 25 transactions".to_string(), + ))? + } + + let maxfeerate = query_params + .get("maxfeerate") + .map(|s| { + s.parse::() + .map_err(|_| HttpError::from("Invalid maxfeerate".to_string())) + }) + .transpose()?; + + let maxburnamount = query_params + .get("maxburnamount") + .map(|s| { + s.parse::() + .map_err(|_| HttpError::from("Invalid maxburnamount".to_string())) + }) + .transpose()?; + + // pre-checks + txhexes.iter().enumerate().try_for_each(|(index, txhex)| { + // each transaction must be of reasonable size (more than 60 bytes, within 400kWU standardness limit) + if !(120..800_000).contains(&txhex.len()) { + Result::Err(HttpError::from(format!( + "Invalid transaction size for item {}", + index + ))) + } else { + // must be a valid hex string + Vec::::from_hex(txhex) + .map_err(|_| { + HttpError::from(format!("Invalid transaction hex for item {}", index)) + }) + .map(|_| ()) + } + })?; + + let result = query + .submit_package(txhexes, maxfeerate, maxburnamount) + .map_err(|err| HttpError::from(err.description().to_string()))?; + + json_response(result, TTL_SHORT) + } (&Method::GET, Some(&"txs"), Some(&"outspends"), None, None, None) => { let txid_strings: Vec<&str> = query_params .get("txids") @@ -1495,8 +1764,8 @@ fn handle_request( query .chain() .asset_history(&asset_id, None, config.rest_default_chain_txs_per_page) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))), + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, ); json_response(prepare_txs(txs, query, config), TTL_SHORT) @@ -1521,9 +1790,8 @@ fn handle_request( last_seen_txid.as_ref(), config.rest_default_chain_txs_per_page, ) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))) - .collect(); + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?; json_response(prepare_txs(txs, query, config), TTL_SHORT) } @@ -1704,6 +1972,15 @@ fn parse_scripthash(scripthash: &str) -> Result { } } +#[inline] +fn multi_address_too_long(body: &hyper::body::Bytes) -> bool { + // ("",) (3) (quotes and comma between each entry) + // (\n ) (5) (allows for pretty printed JSON with 4 space indent) + // The opening [] and whatnot don't need to be accounted for, we give more than enough leeway + // p2tr and p2wsh are 55 length, scripthashes are 64. + body.len() > (8 + 64) * MULTI_ADDRESS_LIMIT +} + #[derive(Debug)] struct HttpError(StatusCode, String); diff --git a/start b/start index 7e5cd80f..ab9d68fb 100755 --- a/start +++ b/start @@ -19,6 +19,10 @@ case "$(uname -s)" in FreeBSD) OS=FreeBSD NPROC=$(sysctl -n hw.ncpu) + export CC=/usr/local/bin/clang17 + export CXX=/usr/local/bin/clang++17 + export CPP=/usr/local/bin/clang-cpp17 + export RUSTFLAGS="-C linker=clang17" ;; Darwin) OS=Darwin