Skip to content

Commit

Permalink
Remove clones and limit max address count
Browse files Browse the repository at this point in the history
  • Loading branch information
junderw committed Oct 7, 2024
1 parent 12599e7 commit 0495706
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 23 deletions.
14 changes: 7 additions & 7 deletions src/new_index/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ impl Mempool {

pub fn history_group(
&self,
scripthashes: Vec<[u8; 32]>,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<Transaction> {
Expand All @@ -188,7 +188,7 @@ impl Mempool {
.with_label_values(&["history_group"])
.start_timer();
scripthashes
.into_iter()
.iter()
.filter_map(|scripthash| self.history.get(&scripthash[..]))
.flat_map(|entries| entries.iter())
.map(|e| e.get_txid())
Expand All @@ -208,12 +208,12 @@ impl Mempool {
.collect()
}

pub fn history_txids_iter_group(
&self,
scripthashes: Vec<[u8; 32]>,
) -> impl Iterator<Item = Txid> + '_ {
pub fn history_txids_iter_group<'a>(
&'a self,
scripthashes: &'a [[u8; 32]],
) -> impl Iterator<Item = Txid> + 'a {
scripthashes
.into_iter()
.iter()
.filter_map(move |scripthash| self.history.get(&scripthash[..]))
.flat_map(|entries| entries.iter())
.map(|entry| entry.get_txid())
Expand Down
14 changes: 7 additions & 7 deletions src/new_index/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -650,14 +650,14 @@ impl ChainQuery {

pub fn summary_group(
&self,
scripthashes: Vec<[u8; 32]>,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<TxHistorySummary> {
// scripthash lookup
let _timer_scan = self.start_timer("address_group_summary");
let rows = self
.history_iter_scan_group_reverse(b'H', &scripthashes)
.history_iter_scan_group_reverse(b'H', scripthashes)
.map(TxHistoryRow::from_row);

self.collate_summaries(rows, last_seen_txid, limit)
Expand Down Expand Up @@ -730,7 +730,7 @@ impl ChainQuery {

pub fn history_group(
&self,
scripthashes: Vec<[u8; 32]>,
scripthashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<(Transaction, BlockId)> {
Expand All @@ -740,24 +740,24 @@ impl ChainQuery {

pub fn history_txids_iter_group(
&self,
scripthashes: Vec<[u8; 32]>,
scripthashes: &[[u8; 32]],
) -> impl Iterator<Item = Txid> + '_ {
self.history_iter_scan_group_reverse(b'H', &scripthashes)
self.history_iter_scan_group_reverse(b'H', scripthashes)
.map(|row| TxHistoryRow::from_row(row).get_txid())
.unique()
}

fn _history_group(
&self,
code: u8,
hashes: Vec<[u8; 32]>,
hashes: &[[u8; 32]],
last_seen_txid: Option<&Txid>,
limit: usize,
) -> Vec<(Transaction, BlockId)> {
print!("limit {} | last_seen {:?}", limit, last_seen_txid);
let _timer_scan = self.start_timer("history_group");
let txs_conf = self
.history_iter_scan_group_reverse(code, &hashes)
.history_iter_scan_group_reverse(code, hashes)
.map(|row| TxHistoryRow::from_row(row).get_txid())
// XXX: unique() requires keeping an in-memory list of all txids, can we avoid that?
.unique()
Expand Down
62 changes: 53 additions & 9 deletions src/rest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ use std::thread;
use url::form_urlencoded;

const ADDRESS_SEARCH_LIMIT: usize = 10;
// Limit to 300 addresses
const MULTI_ADDRESS_LIMIT: usize = 300;

#[cfg(feature = "liquid")]
const ASSETS_PER_PAGE: usize = 25;
Expand Down Expand Up @@ -944,8 +946,24 @@ fn handle_request(
_ => "",
};

let script_hashes: Vec<[u8; 32]> = serde_json::from_slice::<Vec<String>>(&body)
.map_err(|err| HttpError::from(err.to_string()))?
if multi_address_too_long(&body) {
return Err(HttpError(
StatusCode::UNPROCESSABLE_ENTITY,
String::from("body too long"),
));
}

let script_hashes: Vec<String> =
serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?;

if script_hashes.len() > MULTI_ADDRESS_LIMIT {
return Err(HttpError(
StatusCode::UNPROCESSABLE_ENTITY,
String::from("body too long"),
));
}

let script_hashes: Vec<[u8; 32]> = script_hashes
.iter()
.filter_map(|script_str| {
to_scripthash(script_type, script_str, config.network_type).ok()
Expand All @@ -965,14 +983,14 @@ fn handle_request(
if let Some(given_txid) = &after_txid {
let is_mempool = query
.mempool()
.history_txids_iter_group(script_hashes.clone())
.history_txids_iter_group(&script_hashes)
.any(|txid| given_txid == &txid);
let is_confirmed = if is_mempool {
false
} else {
query
.chain()
.history_txids_iter_group(script_hashes.clone())
.history_txids_iter_group(&script_hashes)
.any(|txid| given_txid == &txid)
};
if !is_mempool && !is_confirmed {
Expand All @@ -985,7 +1003,7 @@ fn handle_request(
txs.extend(
query
.mempool()
.history_group(script_hashes.clone(), after_txid.as_ref(), max_txs)
.history_group(&script_hashes, after_txid.as_ref(), max_txs)
.into_iter()
.map(|tx| (tx, None)),
);
Expand All @@ -1001,7 +1019,7 @@ fn handle_request(
txs.extend(
query
.chain()
.history_group(script_hashes, after_txid_ref, max_txs - txs.len())
.history_group(&script_hashes, after_txid_ref, max_txs - txs.len())
.into_iter()
.map(|(tx, blockid)| (tx, Some(blockid))),
);
Expand Down Expand Up @@ -1095,8 +1113,25 @@ fn handle_request(
"scripthashes" => "scripthash",
_ => "",
};
let script_hashes: Vec<[u8; 32]> = serde_json::from_slice::<Vec<String>>(&body)
.map_err(|err| HttpError::from(err.to_string()))?

if multi_address_too_long(&body) {
return Err(HttpError(
StatusCode::UNPROCESSABLE_ENTITY,
String::from("body too long"),
));
}

let script_hashes: Vec<String> =
serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?;

if script_hashes.len() > MULTI_ADDRESS_LIMIT {
return Err(HttpError(
StatusCode::UNPROCESSABLE_ENTITY,
String::from("body too long"),
));
}

let script_hashes: Vec<[u8; 32]> = script_hashes
.iter()
.filter_map(|script_str| {
to_scripthash(script_type, script_str, config.network_type).ok()
Expand All @@ -1115,7 +1150,7 @@ fn handle_request(
let summary =
query
.chain()
.summary_group(script_hashes, last_seen_txid.as_ref(), max_txs);
.summary_group(&script_hashes, last_seen_txid.as_ref(), max_txs);

json_response(summary, TTL_SHORT)
}
Expand Down Expand Up @@ -1823,6 +1858,15 @@ fn parse_scripthash(scripthash: &str) -> Result<FullHash, HttpError> {
}
}

#[inline]
fn multi_address_too_long(body: &hyper::body::Bytes) -> bool {
// ("",) (3) (quotes and comma between each entry)
// (\n ) (5) (allows for pretty printed JSON with 4 space indent)
// The opening [] and whatnot don't need to be accounted for, we give more than enough leeway
// p2tr and p2wsh are 55 length, scripthashes are 64.
body.len() > (8 + 64) * MULTI_ADDRESS_LIMIT
}

#[derive(Debug)]
struct HttpError(StatusCode, String);

Expand Down

0 comments on commit 0495706

Please sign in to comment.