diff --git a/src/new_index/db.rs b/src/new_index/db.rs index 4fe9020c..98de2d30 100644 --- a/src/new_index/db.rs +++ b/src/new_index/db.rs @@ -73,6 +73,67 @@ impl<'a> Iterator for ReverseScanIterator<'a> { } } +pub struct ReverseScanGroupIterator<'a> { + iters: Vec>, + next_rows: Vec>, + value_offset: usize, + done: bool, +} + +impl<'a> ReverseScanGroupIterator<'a> { + pub fn new( + mut iters: Vec>, + value_offset: usize, + ) -> ReverseScanGroupIterator { + let mut next_rows: Vec> = Vec::with_capacity(iters.len()); + for iter in &mut iters { + let next = iter.next(); + next_rows.push(next); + } + let done = next_rows.iter().all(|row| row.is_none()); + ReverseScanGroupIterator { + iters, + next_rows, + value_offset, + done, + } + } +} + +impl<'a> Iterator for ReverseScanGroupIterator<'a> { + type Item = DBRow; + + fn next(&mut self) -> Option { + if self.done { + return None; + } + + let best_index = self + .next_rows + .iter() + .enumerate() + .max_by(|(a_index, a_opt), (b_index, b_opt)| match (a_opt, b_opt) { + (None, None) => a_index.cmp(b_index), + + (Some(_), None) => std::cmp::Ordering::Greater, + + (None, Some(_)) => std::cmp::Ordering::Less, + + (Some(a), Some(b)) => a.key[self.value_offset..].cmp(&(b.key[self.value_offset..])), + }) + .map(|(index, _)| index) + .unwrap_or(0); + + let best = self.next_rows[best_index].take(); + self.next_rows[best_index] = self.iters.get_mut(best_index)?.next(); + if self.next_rows.iter().all(|row| row.is_none()) { + self.done = true; + } + + best + } +} + #[derive(Debug)] pub struct DB { db: rocksdb::DB, @@ -140,6 +201,25 @@ impl DB { } } + pub fn iter_scan_group_reverse( + &self, + prefixes: impl Iterator, Vec)>, + value_offset: usize, + ) -> ReverseScanGroupIterator { + let iters = prefixes + .map(|(prefix, prefix_max)| { + let mut iter = self.db.raw_iterator(); + iter.seek_for_prev(prefix_max); + ReverseScanIterator { + prefix: prefix.to_vec(), + iter, + done: false, + } + }) + .collect(); + ReverseScanGroupIterator::new(iters, value_offset) + } + pub fn write(&self, mut rows: Vec, flush: DBFlush) { debug!( "writing {} rows to {:?}, flush={:?}", diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index d72e27e7..32d27c1f 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -177,6 +177,49 @@ impl Mempool { .collect() } + pub fn history_group( + &self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&Txid>, + limit: usize, + ) -> Vec { + let _timer = self + .latency + .with_label_values(&["history_group"]) + .start_timer(); + scripthashes + .iter() + .filter_map(|scripthash| self.history.get(&scripthash[..])) + .flat_map(|entries| entries.iter()) + .map(|e| e.get_txid()) + .unique() + // TODO seek directly to last seen tx without reading earlier rows + .skip_while(|txid| { + // skip until we reach the last_seen_txid + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .take(limit) + .map(|txid| self.txstore.get(&txid).expect("missing mempool tx")) + .cloned() + .collect() + } + + pub fn history_txids_iter_group<'a>( + &'a self, + scripthashes: &'a [[u8; 32]], + ) -> impl Iterator + 'a { + scripthashes + .iter() + .filter_map(move |scripthash| self.history.get(&scripthash[..])) + .flat_map(|entries| entries.iter()) + .map(|entry| entry.get_txid()) + .unique() + } + pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec { let _timer = self .latency diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index 089451bf..b2323cee 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -37,6 +37,8 @@ use crate::new_index::fetch::{start_fetcher, BlockEntry, FetchFrom}; #[cfg(feature = "liquid")] use crate::elements::{asset, peg}; +use super::db::ReverseScanGroupIterator; + const MIN_HISTORY_ITEMS_TO_CACHE: usize = 100; pub struct Store { @@ -505,34 +507,47 @@ impl ChainQuery { &TxHistoryRow::prefix_height(code, hash, start_height as u32), ) } - fn history_iter_scan_reverse(&self, code: u8, hash: &[u8]) -> ReverseScanIterator { + fn history_iter_scan_reverse( + &self, + code: u8, + hash: &[u8], + start_height: Option, + ) -> ReverseScanIterator { self.store.history_db.iter_scan_reverse( &TxHistoryRow::filter(code, hash), - &TxHistoryRow::prefix_end(code, hash), + &start_height.map_or(TxHistoryRow::prefix_end(code, hash), |start_height| { + TxHistoryRow::prefix_height_end(code, hash, start_height) + }), ) } - - pub fn summary( + fn history_iter_scan_group_reverse( &self, - scripthash: &[u8], - last_seen_txid: Option<&Txid>, - limit: usize, - ) -> Vec { - // scripthash lookup - self._summary(b'H', scripthash, last_seen_txid, limit) + code: u8, + hashes: &[[u8; 32]], + start_height: Option, + ) -> ReverseScanGroupIterator { + self.store.history_db.iter_scan_group_reverse( + hashes.iter().map(|hash| { + let prefix = TxHistoryRow::filter(code, &hash[..]); + let prefix_max = start_height + .map_or(TxHistoryRow::prefix_end(code, &hash[..]), |start_height| { + TxHistoryRow::prefix_height_end(code, &hash[..], start_height) + }); + (prefix, prefix_max) + }), + 33, + ) } - fn _summary( + fn collate_summaries( &self, - code: u8, - hash: &[u8], + iter: impl Iterator, last_seen_txid: Option<&Txid>, limit: usize, ) -> Vec { - let _timer_scan = self.start_timer("address_summary"); - let rows = self - .history_iter_scan_reverse(code, hash) - .map(TxHistoryRow::from_row) + // collate utxo funding/spending events by transaction + + let rows = iter .map(|row| (row.get_txid(), row.key.txinfo, row.key.tx_position)) .skip_while(|(txid, _, _)| { // skip until we reach the last_seen_txid @@ -546,8 +561,6 @@ impl ChainQuery { self.tx_confirming_block(&txid) .map(|b| (txid, info, b.height, b.time, tx_position)) }); - - // collate utxo funding/spending events by transaction let mut map: HashMap = HashMap::new(); for (txid, info, height, time, tx_position) in rows { if !map.contains_key(&txid) && map.len() == limit { @@ -606,7 +619,6 @@ impl ChainQuery { _ => {} } } - let mut tx_summaries = map.into_values().collect::>(); tx_summaries.sort_by(|a, b| { if a.height == b.height { @@ -622,54 +634,93 @@ impl ChainQuery { tx_summaries } - pub fn history( + pub fn summary( &self, scripthash: &[u8], last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + // scripthash lookup + self._summary(b'H', scripthash, last_seen_txid, start_height, limit) + } + + fn _summary( + &self, + code: u8, + hash: &[u8], + last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + let _timer_scan = self.start_timer("address_summary"); + let rows = self + .history_iter_scan_reverse(code, hash, start_height) + .map(TxHistoryRow::from_row); + + self.collate_summaries(rows, last_seen_txid, limit) + } + + pub fn summary_group( + &self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&Txid>, + start_height: Option, + limit: usize, + ) -> Vec { + // scripthash lookup + let _timer_scan = self.start_timer("address_group_summary"); + let rows = self + .history_iter_scan_group_reverse(b'H', scripthashes, start_height) + .map(TxHistoryRow::from_row); + + self.collate_summaries(rows, last_seen_txid, limit) + } + + pub fn history<'a>( + &'a self, + scripthash: &[u8], + last_seen_txid: Option<&'a Txid>, + start_height: Option, limit: usize, - ) -> Vec<(Transaction, BlockId)> { + ) -> impl rayon::iter::ParallelIterator> + 'a { // scripthash lookup - self._history(b'H', scripthash, last_seen_txid, limit) + self._history(b'H', scripthash, last_seen_txid, start_height, limit) } pub fn history_txids_iter<'a>(&'a self, scripthash: &[u8]) -> impl Iterator + 'a { - self.history_iter_scan_reverse(b'H', scripthash) + self.history_iter_scan_reverse(b'H', scripthash, None) .map(|row| TxHistoryRow::from_row(row).get_txid()) .unique() } - fn _history( - &self, + fn _history<'a>( + &'a self, code: u8, hash: &[u8], - last_seen_txid: Option<&Txid>, + last_seen_txid: Option<&'a Txid>, + start_height: Option, limit: usize, - ) -> Vec<(Transaction, BlockId)> { + ) -> impl rayon::iter::ParallelIterator> + 'a { let _timer_scan = self.start_timer("history"); - let txs_conf = self - .history_iter_scan_reverse(code, hash) - .map(|row| TxHistoryRow::from_row(row).get_txid()) - // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? - .unique() - // TODO seek directly to last seen tx without reading earlier rows - .skip_while(|txid| { - // skip until we reach the last_seen_txid - last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) - }) - .skip(match last_seen_txid { - Some(_) => 1, // skip the last_seen_txid itself - None => 0, - }) - .filter_map(|txid| self.tx_confirming_block(&txid).map(|b| (txid, b))) - .take(limit) - .collect::>(); - self.lookup_txns(&txs_conf) - .expect("failed looking up txs in history index") - .into_iter() - .zip(txs_conf) - .map(|(tx, (_, blockid))| (tx, blockid)) - .collect() + self.lookup_txns( + self.history_iter_scan_reverse(code, hash, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? + .unique() + // TODO seek directly to last seen tx without reading earlier rows + .skip_while(move |txid| { + // skip until we reach the last_seen_txid + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .filter_map(move |txid| self.tx_confirming_block(&txid).map(|b| (txid, b))), + limit, + ) } pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, BlockId)> { @@ -687,6 +738,57 @@ impl ChainQuery { .collect() } + pub fn history_group<'a>( + &'a self, + scripthashes: &[[u8; 32]], + last_seen_txid: Option<&'a Txid>, + start_height: Option, + limit: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a { + // scripthash lookup + self._history_group(b'H', scripthashes, last_seen_txid, start_height, limit) + } + + pub fn history_txids_iter_group( + &self, + scripthashes: &[[u8; 32]], + start_height: Option, + ) -> impl Iterator + '_ { + self.history_iter_scan_group_reverse(b'H', scripthashes, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + .unique() + } + + fn _history_group<'a>( + &'a self, + code: u8, + hashes: &[[u8; 32]], + last_seen_txid: Option<&'a Txid>, + start_height: Option, + limit: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a { + debug!("limit {} | last_seen {:?}", limit, last_seen_txid); + let _timer_scan = self.start_timer("history_group"); + + self.lookup_txns( + self.history_iter_scan_group_reverse(code, hashes, start_height) + .map(|row| TxHistoryRow::from_row(row).get_txid()) + // XXX: unique() requires keeping an in-memory list of all txids, can we avoid that? + .unique() + .skip_while(move |txid| { + // we already seeked to the last txid at this height + // now skip just past the last_seen_txid itself + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip(match last_seen_txid { + Some(_) => 1, // skip the last_seen_txid itself + None => 0, + }) + .filter_map(move |txid| self.tx_confirming_block(&txid).map(|b| (txid, b))), + limit, + ) + } + // TODO: avoid duplication with stats/stats_delta? pub fn utxo(&self, scripthash: &[u8], limit: usize, flush: DBFlush) -> Result> { let _timer = self.start_timer("utxo"); @@ -983,15 +1085,24 @@ impl ChainQuery { // TODO: can we pass txids as a "generic iterable"? // TODO: should also use a custom ThreadPoolBuilder? - pub fn lookup_txns(&self, txids: &[(Txid, BlockId)]) -> Result> { - let _timer = self.start_timer("lookup_txns"); + pub fn lookup_txns<'a, I>( + &'a self, + txids: I, + take: usize, + ) -> impl rayon::iter::ParallelIterator> + 'a + where + I: Iterator + Send + rayon::iter::ParallelBridge + 'a, + { txids - .par_iter() - .map(|(txid, blockid)| { - self.lookup_txn(txid, Some(&blockid.hash)) - .chain_err(|| "missing tx") + .take(take) + .par_bridge() + .map(move |(txid, blockid)| -> Result<_> { + Ok(( + self.lookup_txn(&txid, Some(&blockid.hash)) + .chain_err(|| "missing tx")?, + blockid, + )) }) - .collect::>>() } pub fn lookup_txn(&self, txid: &Txid, blockhash: Option<&BlockHash>) -> Option { @@ -1102,13 +1213,19 @@ impl ChainQuery { } #[cfg(feature = "liquid")] - pub fn asset_history( - &self, - asset_id: &AssetId, - last_seen_txid: Option<&Txid>, + pub fn asset_history<'a>( + &'a self, + asset_id: &'a AssetId, + last_seen_txid: Option<&'a Txid>, limit: usize, - ) -> Vec<(Transaction, BlockId)> { - self._history(b'I', &asset_id.into_inner()[..], last_seen_txid, limit) + ) -> impl rayon::iter::ParallelIterator> + 'a { + self._history( + b'I', + &asset_id.into_inner()[..], + last_seen_txid, + None, + limit, + ) } #[cfg(feature = "liquid")] @@ -1661,6 +1778,12 @@ impl TxHistoryRow { bincode_util::serialize_big(&(code, full_hash(hash), height)).unwrap() } + // prefix representing the end of a given block (used for reverse scans) + fn prefix_height_end(code: u8, hash: &[u8], height: u32) -> Bytes { + // u16::MAX for the tx_position ensures we get all transactions at this height + bincode_util::serialize_big(&(code, full_hash(hash), height, u16::MAX)).unwrap() + } + pub fn into_row(self) -> DBRow { DBRow { key: bincode_util::serialize_big(&self.key).unwrap(), diff --git a/src/rest.rs b/src/rest.rs index 82dd3e68..e503191e 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -19,6 +19,7 @@ use hex::{self, FromHexError}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Response, Server, StatusCode}; use prometheus::{HistogramOpts, HistogramVec}; +use rayon::iter::ParallelIterator; use tokio::sync::oneshot; use hyperlocal::UnixServerExt; @@ -42,6 +43,8 @@ use std::thread; use url::form_urlencoded; const ADDRESS_SEARCH_LIMIT: usize = 10; +// Limit to 300 addresses +const MULTI_ADDRESS_LIMIT: usize = 300; #[cfg(feature = "liquid")] const ASSETS_PER_PAGE: usize = 25; @@ -539,6 +542,27 @@ fn ttl_by_depth(height: Option, query: &Query) -> u32 { }) } +enum TxidLocation { + Mempool, + Chain(u32), // contains height + None, +} + +#[inline] +fn find_txid( + txid: &Txid, + mempool: &crate::new_index::Mempool, + chain: &crate::new_index::ChainQuery, +) -> TxidLocation { + if mempool.lookup_txn(txid).is_some() { + TxidLocation::Mempool + } else if let Some(block) = chain.tx_confirming_block(txid) { + TxidLocation::Chain(block.height as u32) + } else { + TxidLocation::None + } +} + /// Prepare transactions to be serialized in a JSON response /// /// Any transactions with missing prevouts will be filtered out of the response, rather than returned with incorrect data. @@ -887,26 +911,31 @@ fn handle_request( let mut txs = vec![]; - if let Some(given_txid) = &after_txid { - let is_mempool = query - .mempool() - .history_txids_iter(&script_hash[..]) - .any(|txid| given_txid == &txid); - let is_confirmed = if is_mempool { - false - } else { - query - .chain() - .history_txids_iter(&script_hash[..]) - .any(|txid| given_txid == &txid) - }; - if !is_mempool && !is_confirmed { + let after_txid_location = if let Some(txid) = &after_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match after_txid_location { + TxidLocation::Mempool => { + txs.extend( + query + .mempool() + .history(&script_hash[..], after_txid.as_ref(), max_txs) + .into_iter() + .map(|tx| (tx, None)), + ); + None + } + TxidLocation::None => { return Err(HttpError( StatusCode::UNPROCESSABLE_ENTITY, String::from("after_txid not found"), )); } - } + TxidLocation::Chain(height) => Some(height), + }; txs.extend( query @@ -927,9 +956,107 @@ fn handle_request( txs.extend( query .chain() - .history(&script_hash[..], after_txid_ref, max_txs - txs.len()) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))), + .history( + &script_hash[..], + after_txid_ref, + confirmed_block_height, + max_txs - txs.len(), + ) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, + ); + } + + json_response(prepare_txs(txs, query, config), TTL_SHORT) + } + + (&Method::POST, Some(script_types @ &"addresses"), Some(&"txs"), None, None, None) + | (&Method::POST, Some(script_types @ &"scripthashes"), Some(&"txs"), None, None, None) => { + let script_type = match *script_types { + "addresses" => "address", + "scripthashes" => "scripthash", + _ => "", + }; + + if multi_address_too_long(&body) { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + if script_hashes.len() > MULTI_ADDRESS_LIMIT { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec<[u8; 32]> = script_hashes + .iter() + .filter_map(|script_str| { + to_scripthash(script_type, script_str, config.network_type).ok() + }) + .collect(); + + let max_txs = query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_default_max_mempool_txs); + let after_txid = query_params + .get("after_txid") + .and_then(|s| s.parse::().ok()); + + let mut txs = vec![]; + + let after_txid_location = if let Some(txid) = &after_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match after_txid_location { + TxidLocation::Mempool => { + txs.extend( + query + .mempool() + .history_group(&script_hashes, after_txid.as_ref(), max_txs) + .into_iter() + .map(|tx| (tx, None)), + ); + None + } + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + if txs.len() < max_txs { + let after_txid_ref = if !txs.is_empty() { + // If there are any txs, we know mempool found the + // after_txid IF it exists... so always return None. + None + } else { + after_txid.as_ref() + }; + txs.extend( + query + .chain() + .history_group( + &script_hashes, + after_txid_ref, + confirmed_block_height, + max_txs - txs.len(), + ) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, ); } @@ -961,10 +1088,9 @@ fn handle_request( let txs = query .chain() - .history(&script_hash[..], last_seen_txid.as_ref(), max_txs) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))) - .collect(); + .history(&script_hash[..], last_seen_txid.as_ref(), None, max_txs) + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?; json_response(prepare_txs(txs, query, config), TTL_SHORT) } @@ -994,9 +1120,110 @@ fn handle_request( .unwrap_or(config.rest_default_max_address_summary_txs), ); - let summary = query - .chain() - .summary(&script_hash[..], last_seen_txid.as_ref(), max_txs); + let last_seen_txid_location = if let Some(txid) = &last_seen_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match last_seen_txid_location { + TxidLocation::Mempool => None, + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + let summary = query.chain().summary( + &script_hash[..], + last_seen_txid.as_ref(), + confirmed_block_height, + max_txs, + ); + + json_response(summary, TTL_SHORT) + } + ( + &Method::POST, + Some(script_types @ &"addresses"), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + None, + ) + | ( + &Method::POST, + Some(script_types @ &"scripthashes"), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + None, + ) => { + let script_type = match *script_types { + "addresses" => "address", + "scripthashes" => "scripthash", + _ => "", + }; + + if multi_address_too_long(&body) { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + if script_hashes.len() > MULTI_ADDRESS_LIMIT { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("body too long"), + )); + } + + let script_hashes: Vec<[u8; 32]> = script_hashes + .iter() + .filter_map(|script_str| { + to_scripthash(script_type, script_str, config.network_type).ok() + }) + .collect(); + + let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); + let max_txs = cmp::min( + config.rest_default_max_address_summary_txs, + query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_default_max_address_summary_txs), + ); + + let last_seen_txid_location = if let Some(txid) = &last_seen_txid { + find_txid(txid, &query.mempool(), query.chain()) + } else { + TxidLocation::Mempool + }; + + let confirmed_block_height = match last_seen_txid_location { + TxidLocation::Mempool => None, + TxidLocation::None => { + return Err(HttpError( + StatusCode::UNPROCESSABLE_ENTITY, + String::from("after_txid not found"), + )); + } + TxidLocation::Chain(height) => Some(height), + }; + + let summary = query.chain().summary_group( + &script_hashes, + last_seen_txid.as_ref(), + confirmed_block_height, + max_txs, + ); json_response(summary, TTL_SHORT) } @@ -1495,8 +1722,8 @@ fn handle_request( query .chain() .asset_history(&asset_id, None, config.rest_default_chain_txs_per_page) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))), + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?, ); json_response(prepare_txs(txs, query, config), TTL_SHORT) @@ -1521,9 +1748,8 @@ fn handle_request( last_seen_txid.as_ref(), config.rest_default_chain_txs_per_page, ) - .into_iter() - .map(|(tx, blockid)| (tx, Some(blockid))) - .collect(); + .map(|res| res.map(|(tx, blockid)| (tx, Some(blockid)))) + .collect::, _>>()?; json_response(prepare_txs(txs, query, config), TTL_SHORT) } @@ -1704,6 +1930,15 @@ fn parse_scripthash(scripthash: &str) -> Result { } } +#[inline] +fn multi_address_too_long(body: &hyper::body::Bytes) -> bool { + // ("",) (3) (quotes and comma between each entry) + // (\n ) (5) (allows for pretty printed JSON with 4 space indent) + // The opening [] and whatnot don't need to be accounted for, we give more than enough leeway + // p2tr and p2wsh are 55 length, scripthashes are 64. + body.len() > (8 + 64) * MULTI_ADDRESS_LIMIT +} + #[derive(Debug)] struct HttpError(StatusCode, String);