From ab7cb3df119c58015366d86f7199929176e218ef Mon Sep 17 00:00:00 2001 From: junderw Date: Fri, 29 Sep 2023 22:54:18 -0700 Subject: [PATCH 01/51] Fix: Make error messages clearer --- src/electrum/server.rs | 5 ++++- src/errors.rs | 11 ++++++++--- src/new_index/schema.rs | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/electrum/server.rs b/src/electrum/server.rs index 167532d5..6c18aadb 100644 --- a/src/electrum/server.rs +++ b/src/electrum/server.rs @@ -614,7 +614,10 @@ fn get_history( ) -> Result)>> { // to avoid silently trunacting history entries, ask for one extra more than the limit and fail if it exists let history_txids = query.history_txids(scripthash, txs_limit + 1); - ensure!(history_txids.len() <= txs_limit, ErrorKind::TooPopular); + ensure!( + history_txids.len() <= txs_limit, + ErrorKind::TooManyTxs(txs_limit) + ); Ok(history_txids) } diff --git a/src/errors.rs b/src/errors.rs index cec50cce..48274fbb 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -14,9 +14,14 @@ error_chain! { display("Iterrupted by signal {}", sig) } - TooPopular { - description("Too many history entries") - display("Too many history entries") + TooManyUtxos(limit: usize) { + description("Too many unspent transaction outputs. Contact support to raise limits.") + display("Too many unspent transaction outputs (>{}). Contact support to raise limits.", limit) + } + + TooManyTxs(limit: usize) { + description("Too many history transactions. Contact support to raise limits.") + display("Too many history transactions (>{}). Contact support to raise limits.", limit) } #[cfg(feature = "electrum-discovery")] diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index f3eccc9c..318b6209 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -671,7 +671,7 @@ impl ChainQuery { // abort if the utxo set size excedees the limit at any point in time if utxos.len() > limit { - bail!(ErrorKind::TooPopular) + bail!(ErrorKind::TooManyUtxos(limit)) } } From ff4e4530e764cc8de33769206d255b7940fcc17e Mon Sep 17 00:00:00 2001 From: junderw Date: Sun, 1 Oct 2023 19:12:31 -0700 Subject: [PATCH 02/51] REST API blocking async: Solution A, block_in_place --- src/rest.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/rest.rs b/src/rest.rs index 56567393..1fbb6560 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -570,16 +570,18 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive let uri = req.uri().clone(); let body = hyper::body::to_bytes(req.into_body()).await?; - let mut resp = handle_request(method, uri, body, &query, &config) - .unwrap_or_else(|err| { - warn!("{:?}", err); - Response::builder() - .status(err.0) - .header("Content-Type", "text/plain") - .header("X-Powered-By", &**VERSION_STRING) - .body(Body::from(err.1)) - .unwrap() - }); + let mut resp = tokio::task::block_in_place(|| { + handle_request(method, uri, body, &query, &config) + }) + .unwrap_or_else(|err| { + warn!("{:?}", err); + Response::builder() + .status(err.0) + .header("Content-Type", "text/plain") + .header("X-Powered-By", &**VERSION_STRING) + .body(Body::from(err.1)) + .unwrap() + }); if let Some(ref origins) = config.cors { resp.headers_mut() .insert("Access-Control-Allow-Origin", origins.parse().unwrap()); From 2263cb05947ce6b99a534915171772af9dd82608 Mon Sep 17 00:00:00 2001 From: junderw Date: Mon, 2 Oct 2023 21:18:06 -0700 Subject: [PATCH 03/51] Add metrics for REST response times --- src/bin/electrs.rs | 2 +- src/rest.rs | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 76373d2b..9a301be4 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -99,7 +99,7 @@ fn run_server(config: Arc) -> Result<()> { )); // TODO: configuration for which servers to start - let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query)); + let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query), &metrics); let electrum_server = ElectrumRPC::start(Arc::clone(&config), Arc::clone(&query), &metrics); loop { diff --git a/src/rest.rs b/src/rest.rs index 56567393..55b1d119 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1,6 +1,7 @@ use crate::chain::{address, BlockHash, Network, OutPoint, Script, Transaction, TxIn, TxOut, Txid}; use crate::config::{Config, VERSION_STRING}; use crate::errors; +use crate::metrics::Metrics; use crate::new_index::{compute_script_hash, Query, SpendingInput, Utxo}; use crate::util::{ create_socket, electrum_merkle, extract_tx_prevouts, full_hash, get_innerscripts, get_tx_fee, @@ -17,6 +18,7 @@ use bitcoin::hashes::Error as HashError; use hex::{self, FromHexError}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Response, Server, StatusCode}; +use prometheus::{HistogramOpts, HistogramVec}; use tokio::sync::oneshot; use hyperlocal::UnixServerExt; @@ -549,7 +551,12 @@ fn prepare_txs( } #[tokio::main] -async fn run_server(config: Arc, query: Arc, rx: oneshot::Receiver<()>) { +async fn run_server( + config: Arc, + query: Arc, + rx: oneshot::Receiver<()>, + metric: HistogramVec, +) { let addr = &config.http_addr; let socket_file = &config.http_socket_file; @@ -559,11 +566,13 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive let make_service_fn_inn = || { let query = Arc::clone(&query); let config = Arc::clone(&config); + let metric = metric.clone(); async move { Ok::<_, hyper::Error>(service_fn(move |req| { let query = Arc::clone(&query); let config = Arc::clone(&config); + let timer = metric.with_label_values(&["all_methods"]).start_timer(); async move { let method = req.method().clone(); @@ -584,6 +593,7 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive resp.headers_mut() .insert("Access-Control-Allow-Origin", origins.parse().unwrap()); } + timer.observe_duration(); Ok::<_, hyper::Error>(resp) } })) @@ -630,13 +640,17 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive } } -pub fn start(config: Arc, query: Arc) -> Handle { +pub fn start(config: Arc, query: Arc, metrics: &Metrics) -> Handle { let (tx, rx) = oneshot::channel::<()>(); + let response_timer = metrics.histogram_vec( + HistogramOpts::new("electrs_rest_api", "Electrs REST API response timings"), + &["method"], + ); Handle { tx, thread: crate::util::spawn_thread("rest-server", move || { - run_server(config, query, rx); + run_server(config, query, rx, response_timer); }), } } From 83301a2d33a405c85cff94e9bcf8a28172f32660 Mon Sep 17 00:00:00 2001 From: junderw Date: Mon, 2 Oct 2023 21:18:06 -0700 Subject: [PATCH 04/51] Add metrics for REST response times --- src/bin/electrs.rs | 2 +- src/rest.rs | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 76373d2b..9a301be4 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -99,7 +99,7 @@ fn run_server(config: Arc) -> Result<()> { )); // TODO: configuration for which servers to start - let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query)); + let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query), &metrics); let electrum_server = ElectrumRPC::start(Arc::clone(&config), Arc::clone(&query), &metrics); loop { diff --git a/src/rest.rs b/src/rest.rs index 1fbb6560..883d7530 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1,6 +1,7 @@ use crate::chain::{address, BlockHash, Network, OutPoint, Script, Transaction, TxIn, TxOut, Txid}; use crate::config::{Config, VERSION_STRING}; use crate::errors; +use crate::metrics::Metrics; use crate::new_index::{compute_script_hash, Query, SpendingInput, Utxo}; use crate::util::{ create_socket, electrum_merkle, extract_tx_prevouts, full_hash, get_innerscripts, get_tx_fee, @@ -17,6 +18,7 @@ use bitcoin::hashes::Error as HashError; use hex::{self, FromHexError}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Response, Server, StatusCode}; +use prometheus::{HistogramOpts, HistogramVec}; use tokio::sync::oneshot; use hyperlocal::UnixServerExt; @@ -549,7 +551,12 @@ fn prepare_txs( } #[tokio::main] -async fn run_server(config: Arc, query: Arc, rx: oneshot::Receiver<()>) { +async fn run_server( + config: Arc, + query: Arc, + rx: oneshot::Receiver<()>, + metric: HistogramVec, +) { let addr = &config.http_addr; let socket_file = &config.http_socket_file; @@ -559,11 +566,13 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive let make_service_fn_inn = || { let query = Arc::clone(&query); let config = Arc::clone(&config); + let metric = metric.clone(); async move { Ok::<_, hyper::Error>(service_fn(move |req| { let query = Arc::clone(&query); let config = Arc::clone(&config); + let timer = metric.with_label_values(&["all_methods"]).start_timer(); async move { let method = req.method().clone(); @@ -586,6 +595,7 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive resp.headers_mut() .insert("Access-Control-Allow-Origin", origins.parse().unwrap()); } + timer.observe_duration(); Ok::<_, hyper::Error>(resp) } })) @@ -632,13 +642,17 @@ async fn run_server(config: Arc, query: Arc, rx: oneshot::Receive } } -pub fn start(config: Arc, query: Arc) -> Handle { +pub fn start(config: Arc, query: Arc, metrics: &Metrics) -> Handle { let (tx, rx) = oneshot::channel::<()>(); + let response_timer = metrics.histogram_vec( + HistogramOpts::new("electrs_rest_api", "Electrs REST API response timings"), + &["method"], + ); Handle { tx, thread: crate::util::spawn_thread("rest-server", move || { - run_server(config, query, rx); + run_server(config, query, rx, response_timer); }), } } From d0749f37d1765d866b3256da21b3237754a98729 Mon Sep 17 00:00:00 2001 From: Felipe Knorr Kuhn Date: Thu, 12 Oct 2023 07:50:30 -0700 Subject: [PATCH 05/51] Add a Docker image builder workflow --- .github/workflows/on-tag.yml | 97 ++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 .github/workflows/on-tag.yml diff --git a/.github/workflows/on-tag.yml b/.github/workflows/on-tag.yml new file mode 100644 index 00000000..94d6adcf --- /dev/null +++ b/.github/workflows/on-tag.yml @@ -0,0 +1,97 @@ +name: Docker build on tag +env: + DOCKER_CLI_EXPERIMENTAL: enabled + TAG_FMT: "^refs/tags/(((.?[0-9]+){3,4}))$" + DOCKER_BUILDKIT: 0 + COMPOSE_DOCKER_CLI_BUILD: 0 + +on: + push: + tags: + - v[0-9]+.[0-9]+.[0-9]+ + - v[0-9]+.[0-9]+.[0-9]+-* + +permissions: + contents: read + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 120 + name: Build and push to DockerHub + steps: + # Workaround based on JonasAlfredsson/docker-on-tmpfs@v1.0.1 + - name: Replace the current swap file + shell: bash + run: | + sudo swapoff /mnt/swapfile + sudo rm -v /mnt/swapfile + sudo fallocate -l 13G /mnt/swapfile + sudo chmod 600 /mnt/swapfile + sudo mkswap /mnt/swapfile + sudo swapon /mnt/swapfile + + - name: Show current memory and swap status + shell: bash + run: | + sudo free -h + echo + sudo swapon --show + + - name: Mount a tmpfs over /var/lib/docker + shell: bash + run: | + if [ ! -d "/var/lib/docker" ]; then + echo "Directory '/var/lib/docker' not found" + exit 1 + fi + sudo mount -t tmpfs -o size=10G tmpfs /var/lib/docker + sudo systemctl restart docker + sudo df -h | grep docker + + - name: Set env variables + run: echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV + + - name: Show set environment variables + run: | + printf " TAG: %s\n" "$TAG" + + - name: Add SHORT_SHA env property with commit short sha + run: echo "SHORT_SHA=`echo ${GITHUB_SHA} | cut -c1-8`" >> $GITHUB_ENV + + - name: Login to Docker for building + run: echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin + + - name: Checkout project + uses: actions/checkout@v3 + + # - name: Set up QEMU + # uses: docker/setup-qemu-action@v3 + # id: qemu + + - name: Setup Docker buildx action + uses: docker/setup-buildx-action@v3 + id: buildx + + - name: Available platforms + run: echo ${{ steps.buildx.outputs.platforms }} + + - name: Cache Docker layers + uses: actions/cache@v3 + id: cache + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx + restore-keys: | + ${{ runner.os }}-buildx + + - name: Run Docker buildx against tag + run: | + docker buildx build \ + --cache-from "type=local,src=/tmp/.buildx-cache" \ + --cache-to "type=local,dest=/tmp/.buildx-cache" \ + --platform linux/amd64 \ + --tag ${{ secrets.DOCKER_USERNAME }}/electrs:$TAG \ + --tag ${{ secrets.DOCKER_USERNAME }}/electrs:latest \ + --output "type=registry" . \ + --build-arg commitHash=$SORT_SHA From c5c6d17814817a734830f0926f14c33900983e95 Mon Sep 17 00:00:00 2001 From: Felipe Knorr Kuhn Date: Thu, 12 Oct 2023 09:58:02 -0700 Subject: [PATCH 06/51] Fix typo --- .github/workflows/on-tag.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/on-tag.yml b/.github/workflows/on-tag.yml index 94d6adcf..77bb5ed0 100644 --- a/.github/workflows/on-tag.yml +++ b/.github/workflows/on-tag.yml @@ -94,4 +94,4 @@ jobs: --tag ${{ secrets.DOCKER_USERNAME }}/electrs:$TAG \ --tag ${{ secrets.DOCKER_USERNAME }}/electrs:latest \ --output "type=registry" . \ - --build-arg commitHash=$SORT_SHA + --build-arg commitHash=$SHORT_SHA From dceb659a3d27ae8285edc971561c9a1ee6e16e26 Mon Sep 17 00:00:00 2001 From: junderw Date: Sat, 14 Oct 2023 16:14:59 -0700 Subject: [PATCH 07/51] Update README and rename lib to mempool-electrs --- .github/workflows/ci.yml | 2 +- Cargo.lock | 86 ++++++++++++++++++++-------------------- Cargo.toml | 14 +++++-- README.md | 20 +++++----- scripts/checks.sh | 2 +- src/config.rs | 2 +- 6 files changed, 66 insertions(+), 60 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6f96b4b4..6b318ff2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,7 +51,7 @@ jobs: ~/.cargo/git target key: ${{ runner.os }}-cargo-test-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} - - run: cargo test --package electrs --lib --all-features + - run: cargo test --lib --all-features clippy: name: Linter diff --git a/Cargo.lock b/Cargo.lock index 23b2bb57..db6d368a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -397,49 +397,6 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" -[[package]] -name = "electrs" -version = "3.0.0-dev" -dependencies = [ - "arrayref", - "base64 0.13.0", - "bincode", - "bitcoin 0.28.0", - "bounded-vec-deque", - "clap", - "crossbeam-channel", - "dirs", - "electrum-client", - "elements", - "error-chain", - "glob", - "hex", - "hyper", - "hyperlocal", - "itertools", - "lazy_static", - "libc", - "log", - "num_cpus", - "page_size", - "prometheus", - "rayon", - "rocksdb", - "serde", - "serde_derive", - "serde_json", - "sha2", - "signal-hook", - "socket2", - "stderrlog", - "sysconf", - "tempfile", - "time 0.3.9", - "tiny_http", - "tokio", - "url", -] - [[package]] name = "electrum-client" version = "0.8.0" @@ -863,6 +820,49 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "mempool-electrs" +version = "3.0.0-dev" +dependencies = [ + "arrayref", + "base64 0.13.0", + "bincode", + "bitcoin 0.28.0", + "bounded-vec-deque", + "clap", + "crossbeam-channel", + "dirs", + "electrum-client", + "elements", + "error-chain", + "glob", + "hex", + "hyper", + "hyperlocal", + "itertools", + "lazy_static", + "libc", + "log", + "num_cpus", + "page_size", + "prometheus", + "rayon", + "rocksdb", + "serde", + "serde_derive", + "serde_json", + "sha2", + "signal-hook", + "socket2", + "stderrlog", + "sysconf", + "tempfile", + "time 0.3.9", + "tiny_http", + "tokio", + "url", +] + [[package]] name = "minimal-lexical" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index 5872d6ba..99b963ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,16 +1,24 @@ [package] -name = "electrs" +name = "mempool-electrs" version = "3.0.0-dev" -authors = ["Roman Zeyde "] +authors = [ + "Roman Zeyde ", + "Nadav Ivgi ", + "wiz ", + "junderw " +] description = "An efficient re-implementation of Electrum Server in Rust" license = "MIT" homepage = "https://github.com/mempool/electrs" repository = "https://github.com/mempool/electrs" +publish = false keywords = ["bitcoin", "electrum", "server", "index", "database"] -documentation = "https://docs.rs/electrs/" readme = "README.md" edition = "2018" +[lib] +name = "electrs" + [features] default = [] liquid = [ "elements" ] diff --git a/README.md b/README.md index 5cf685ff..d684dc48 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# Esplora - Electrs backend API +# Mempool - Electrs backend API -A block chain index engine and HTTP API written in Rust based on [romanz/electrs](https://github.com/romanz/electrs). +A block chain index engine and HTTP API written in Rust based on [romanz/electrs](https://github.com/romanz/electrs) and [Blockstream/electrs](https://github.com/Blockstream/electrs). -Used as the backend for the [Esplora block explorer](https://github.com/Blockstream/esplora) powering [blockstream.info](https://blockstream.info/). +Used as the backend for the [mempool block explorer](https://github.com/mempool/mempool) powering [mempool.space](https://mempool.space/). -API documentation [is available here](https://github.com/blockstream/esplora/blob/master/API.md). +API documentation [is available here](https://mempool.space/docs/api/rest). Documentation for the database schema and indexing process [is available here](doc/schema.md). @@ -13,8 +13,8 @@ Documentation for the database schema and indexing process [is available here](d Install Rust, Bitcoin Core (no `txindex` needed) and the `clang` and `cmake` packages, then: ```bash -$ git clone https://github.com/blockstream/electrs && cd electrs -$ git checkout new-index +$ git clone https://github.com/mempool/electrs && cd electrs +$ git checkout mempool $ cargo run --release --bin electrs -- -vvvv --daemon-dir ~/.bitcoin # Or for liquid: @@ -24,11 +24,9 @@ $ cargo run --features liquid --release --bin electrs -- -vvvv --network liquid See [electrs's original documentation](https://github.com/romanz/electrs/blob/master/doc/usage.md) for more detailed instructions. Note that our indexes are incompatible with electrs's and has to be created separately. -The indexes require 610GB of storage after running compaction (as of June 2020), but you'll need to have +The indexes require 1.3TB of storage after running compaction (as of October 2023), but you'll need to have free space of about double that available during the index compaction process. -Creating the indexes should take a few hours on a beefy machine with SSD. - -To deploy with Docker, follow the [instructions here](https://github.com/Blockstream/esplora#how-to-build-the-docker-image). +Creating the indexes should take a few hours on a beefy machine with high speed NVMe SSD(s). ### Light mode @@ -78,7 +76,7 @@ Additional options with the `electrum-discovery` feature: - `--electrum-hosts ` - a json map of the public hosts where the electrum server is reachable, in the [`server.features` format](https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server.features). - `--electrum-announce` - announce the electrum server on the electrum p2p server discovery network. -See `$ cargo run --release --bin electrs -- --help` for the full list of options. +See `$ cargo run --bin electrs -- --help` for the full list of options. ## License diff --git a/scripts/checks.sh b/scripts/checks.sh index aeda1261..a80db52e 100755 --- a/scripts/checks.sh +++ b/scripts/checks.sh @@ -59,4 +59,4 @@ cargo clippy $@ -q -F electrum-discovery,liquid TESTNAME="Running cargo test with all features" echo "$TESTNAME" -cargo test $@ -q --package electrs --lib --all-features +cargo test $@ -q --lib --all-features diff --git a/src/config.rs b/src/config.rs index ff81d4ad..9c8cfc26 100644 --- a/src/config.rs +++ b/src/config.rs @@ -85,7 +85,7 @@ impl Config { pub fn from_args() -> Config { let network_help = format!("Select network type ({})", Network::names().join(", ")); - let args = App::new("Electrum Rust Server") + let args = App::new("Mempool Electrum Rust Server") .version(crate_version!()) .arg( Arg::with_name("version") From 7f828cd86dd83e7f2bc0d47b5e318a7a0e9ee4a4 Mon Sep 17 00:00:00 2001 From: junderw Date: Tue, 31 Oct 2023 22:41:10 -0700 Subject: [PATCH 08/51] Use Composite Action to reduce duplication --- .github/actions/ci-rust-setup/action.yml | 50 ++++++++++++++++++++++++ .github/workflows/ci.yml | 43 ++++++-------------- rust-toolchain | 1 + 3 files changed, 63 insertions(+), 31 deletions(-) create mode 100644 .github/actions/ci-rust-setup/action.yml create mode 100644 rust-toolchain diff --git a/.github/actions/ci-rust-setup/action.yml b/.github/actions/ci-rust-setup/action.yml new file mode 100644 index 00000000..03b548bb --- /dev/null +++ b/.github/actions/ci-rust-setup/action.yml @@ -0,0 +1,50 @@ +name: CI Rust Setup +description: 'Sets up the environment for Rust jobs during CI workflow' + +inputs: + cache-name: + description: 'Name of cache artifacts (same name is same cache key) empty to disable cache' + required: false + targets: + description: 'A comma separated list of extra targets you want to install' + required: false + components: + description: 'A comma separated list of extra components you want to install' + required: false + toolchain: + description: 'The toolchain to use. If not specified, the rust-toolchain file will be used' + required: false + +runs: + using: composite + steps: + - name: Get toolchain from input OR rust-toolchain file + id: gettoolchain + shell: bash + run: |- + RUST_TOOLCHAIN="${{ inputs.toolchain }}" + if [ ! -f rust-toolchain ] && [ -z "${RUST_TOOLCHAIN}" ]; then + echo "***ERROR*** NEED toolchain INPUT OR rust-toolchain FILE IN ROOT OF REPOSITORY" >&2 + exit 1 + fi + if [ -z "${RUST_TOOLCHAIN}" ]; then + RUST_TOOLCHAIN="$(cat rust-toolchain)" + fi + echo "toolchain=\"${RUST_TOOLCHAIN}\"" >> $GITHUB_OUTPUT + - name: Install ${{ steps.gettoolchain.outputs.toolchain }} Rust toolchain + id: toolchain + # Commit date is Sep 19, 2023 + uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 + with: + toolchain: ${{ steps.gettoolchain.outputs.toolchain }} + targets: ${{ inputs.targets }} + components: ${{ inputs.components }} + - name: Cache dependencies + uses: actions/cache@v3 + if: inputs.cache-name != '' + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ inputs.cache-name }}-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6b318ff2..c9ef33c5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,16 +13,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - id: toolchain - uses: dtolnay/rust-toolchain@1.70 - - name: Cache dependencies - uses: actions/cache@v3 + - name: Setup Rust + uses: './.github/actions/ci-rust-setup' with: - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} + cache-name: dev - run: cargo check --all-features fmt: @@ -30,8 +24,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - id: toolchain - uses: dtolnay/rust-toolchain@1.70 + - name: Setup Rust + uses: './.github/actions/ci-rust-setup' with: components: rustfmt - run: cargo fmt --all -- --check @@ -41,16 +35,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - id: toolchain - uses: dtolnay/rust-toolchain@1.70 - - name: Cache dependencies - uses: actions/cache@v3 - with: # test cache key is different (adding test cfg is a re-compile) - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-test-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} + - name: Setup Rust + uses: './.github/actions/ci-rust-setup' + with: + cache-name: test - run: cargo test --lib --all-features clippy: @@ -67,17 +55,10 @@ jobs: ] steps: - uses: actions/checkout@v3 - - id: toolchain - uses: dtolnay/rust-toolchain@1.70 + - name: Setup Rust + uses: './.github/actions/ci-rust-setup' with: + cache-name: dev components: clippy - - name: Cache dependencies - uses: actions/cache@v3 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-${{ steps.toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.lock') }} - name: Clippy with Features = ${{ matrix.features }} run: cargo clippy ${{ matrix.features }} -- -D warnings diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 00000000..bfe79d0b --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +1.70 From 45912058340de499aef5f6f7c83a8ea23f369382 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Wed, 16 Aug 2023 17:59:04 +0900 Subject: [PATCH 09/51] Protect internal bulk API endpoints behind /internal-api prefix --- src/rest.rs | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/rest.rs b/src/rest.rs index 56567393..b376a52a 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -51,6 +51,9 @@ const TTL_SHORT: u32 = 10; // ttl for volatie resources const TTL_MEMPOOL_RECENT: u32 = 5; // ttl for GET /mempool/recent const CONF_FINAL: usize = 10; // reorgs deeper than this are considered unlikely +// internal api prefix +const INTERNAL_PREFIX: &str = "internal-api"; + #[derive(Serialize, Deserialize)] struct BlockValue { id: String, @@ -1165,7 +1168,14 @@ fn handle_request( (&Method::GET, Some(&"mempool"), Some(&"txids"), None, None, None) => { json_response(query.mempool().txids(), TTL_SHORT) } - (&Method::GET, Some(&"mempool"), Some(&"txs"), Some(&"all"), None, None) => { + ( + &Method::GET, + Some(&INTERNAL_PREFIX), + Some(&"mempool"), + Some(&"txs"), + Some(&"all"), + None, + ) => { let txs = query .mempool() .txs() @@ -1175,7 +1185,7 @@ fn handle_request( json_response(prepare_txs(txs, query, config), TTL_SHORT) } - (&Method::POST, Some(&"mempool"), Some(&"txs"), None, None, None) => { + (&Method::POST, Some(&INTERNAL_PREFIX), Some(&"mempool"), Some(&"txs"), None, None) => { let txid_strings: Vec = serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; @@ -1198,7 +1208,14 @@ fn handle_request( Err(err) => http_message(StatusCode::BAD_REQUEST, err.to_string(), 0), } } - (&Method::GET, Some(&"mempool"), Some(&"txs"), last_seen_txid, None, None) => { + ( + &Method::GET, + Some(&INTERNAL_PREFIX), + Some(&"mempool"), + Some(&"txs"), + last_seen_txid, + None, + ) => { let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); let txs = query .mempool() From bb7e1ec4771acd8e1e07a2c31eeb9c599db46a29 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 28 Aug 2023 16:45:20 +0900 Subject: [PATCH 10/51] Change internal api prefix to "internal" --- src/rest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rest.rs b/src/rest.rs index b376a52a..b9e6115b 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -52,7 +52,7 @@ const TTL_MEMPOOL_RECENT: u32 = 5; // ttl for GET /mempool/recent const CONF_FINAL: usize = 10; // reorgs deeper than this are considered unlikely // internal api prefix -const INTERNAL_PREFIX: &str = "internal-api"; +const INTERNAL_PREFIX: &str = "internal"; #[derive(Serialize, Deserialize)] struct BlockValue { From 281b6994087aec6e42c40587f33ab0bc419f9bd1 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Wed, 6 Sep 2023 08:26:29 +0900 Subject: [PATCH 11/51] Move bulk /block/:hash/txs behind /internal prefix --- src/rest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rest.rs b/src/rest.rs index b9e6115b..6bc8cc8f 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -729,7 +729,7 @@ fn handle_request( .ok_or_else(|| HttpError::not_found("Block not found".to_string()))?; json_response(txids, TTL_LONG) } - (&Method::GET, Some(&"block"), Some(hash), Some(&"txs"), None, None) => { + (&Method::GET, Some(&INTERNAL_PREFIX), Some(&"block"), Some(hash), Some(&"txs"), None) => { let hash = BlockHash::from_hex(hash)?; let txs = query .chain() From 0695ada6699907b33f5ec766e3d3e1204b97c32d Mon Sep 17 00:00:00 2001 From: Mononaut Date: Sat, 5 Aug 2023 16:12:29 +0900 Subject: [PATCH 12/51] Add a POST /txs bulk query-by-txid endpoint --- src/rest.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/rest.rs b/src/rest.rs index 6bc8cc8f..3a262656 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1023,6 +1023,32 @@ fn handle_request( json_response(tx.remove(0), ttl) } } + (&Method::POST, Some(&"txs"), None, None, None, None) => { + let txid_strings: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + match txid_strings + .into_iter() + .map(|txid| Txid::from_hex(&txid)) + .collect::, _>>() + { + Ok(txids) => { + let txs: Vec<(Transaction, Option)> = { + txids + .iter() + .filter_map(|txid| { + query + .lookup_txn(txid) + .map(|tx| (tx, query.chain().tx_confirming_block(txid))) + }) + .collect() + }; + + json_response(prepare_txs(txs, query, config), 0) + } + Err(err) => http_message(StatusCode::BAD_REQUEST, err.to_string(), 0), + } + } (&Method::GET, Some(&"tx"), Some(hash), Some(out_type @ &"hex"), None, None) | (&Method::GET, Some(&"tx"), Some(hash), Some(out_type @ &"raw"), None, None) => { let hash = Txid::from_hex(hash)?; From 0301e4c60fc4fa486c8a11e2d1567f2b2e6c7b24 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Sun, 6 Aug 2023 15:02:14 +0900 Subject: [PATCH 13/51] remove unnecessary scope --- src/rest.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/rest.rs b/src/rest.rs index 3a262656..320f5b51 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1033,17 +1033,14 @@ fn handle_request( .collect::, _>>() { Ok(txids) => { - let txs: Vec<(Transaction, Option)> = { - txids - .iter() - .filter_map(|txid| { - query - .lookup_txn(txid) - .map(|tx| (tx, query.chain().tx_confirming_block(txid))) - }) - .collect() - }; - + let txs: Vec<(Transaction, Option)> = txids + .iter() + .filter_map(|txid| { + query + .lookup_txn(txid) + .map(|tx| (tx, query.chain().tx_confirming_block(txid))) + }) + .collect(); json_response(prepare_txs(txs, query, config), 0) } Err(err) => http_message(StatusCode::BAD_REQUEST, err.to_string(), 0), From 59be24853631844e0b16fb101a2ae8411c9f5e38 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Wed, 16 Aug 2023 18:05:19 +0900 Subject: [PATCH 14/51] Protect POST /txs bulk query-by-txid endpoint --- src/rest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rest.rs b/src/rest.rs index 320f5b51..09daea0c 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1023,7 +1023,7 @@ fn handle_request( json_response(tx.remove(0), ttl) } } - (&Method::POST, Some(&"txs"), None, None, None, None) => { + (&Method::POST, Some(&INTERNAL_PREFIX), Some(&"txs"), None, None, None) => { let txid_strings: Vec = serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; From d1afa45bcbf8e559572e6b5b5b612d6e3fc584f9 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Fri, 18 Aug 2023 01:48:42 +0900 Subject: [PATCH 15/51] Add internal POST bulk outspends endpoints --- src/rest.rs | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/src/rest.rs b/src/rest.rs index 09daea0c..42d7b172 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1184,6 +1184,69 @@ fn handle_request( json_response(spends, TTL_SHORT) } + ( + &Method::POST, + Some(&INTERNAL_PREFIX), + Some(&"txs"), + Some(&"outspends"), + Some(&"by-txid"), + None, + ) => { + let txid_strings: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + let spends: Vec> = txid_strings + .into_iter() + .map(|txid_str| { + Txid::from_hex(&txid_str) + .ok() + .and_then(|txid| query.lookup_txn(&txid)) + .map_or_else(Vec::new, |tx| { + query + .lookup_tx_spends(tx) + .into_iter() + .map(|spend| { + spend.map_or_else(SpendingValue::default, SpendingValue::from) + }) + .collect() + }) + }) + .collect(); + + json_response(spends, TTL_SHORT) + } + ( + &Method::POST, + Some(&INTERNAL_PREFIX), + Some(&"txs"), + Some(&"outspends"), + Some(&"by-outpoint"), + None, + ) => { + let outpoint_strings: Vec = + serde_json::from_slice(&body).map_err(|err| HttpError::from(err.to_string()))?; + + let spends: Vec = outpoint_strings + .into_iter() + .map(|outpoint_str| { + let mut parts = outpoint_str.split(':'); + let hash_part = parts.next(); + let index_part = parts.next(); + + if let (Some(hash), Some(index)) = (hash_part, index_part) { + if let (Ok(txid), Ok(vout)) = (Txid::from_hex(hash), index.parse::()) { + let outpoint = OutPoint { txid, vout }; + return query + .lookup_spend(&outpoint) + .map_or_else(SpendingValue::default, SpendingValue::from); + } + } + SpendingValue::default() + }) + .collect(); + + json_response(spends, TTL_SHORT) + } (&Method::GET, Some(&"mempool"), None, None, None, None) => { json_response(query.mempool().backlog_stats(), TTL_SHORT) From ae9fc9301f25ec3e7ac915d5324b7136cf9d036b Mon Sep 17 00:00:00 2001 From: Mononaut Date: Wed, 6 Sep 2023 08:34:42 +0900 Subject: [PATCH 16/51] Fix missing conf status & fix TTL for bulk block txs endpoint --- src/rest.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rest.rs b/src/rest.rs index 6bc8cc8f..4447cd36 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -731,15 +731,17 @@ fn handle_request( } (&Method::GET, Some(&INTERNAL_PREFIX), Some(&"block"), Some(hash), Some(&"txs"), None) => { let hash = BlockHash::from_hex(hash)?; + let block_id = query.chain().blockid_by_hash(&hash); let txs = query .chain() .get_block_txs(&hash) .ok_or_else(|| HttpError::not_found("Block not found".to_string()))? .into_iter() - .map(|tx| (tx, None)) + .map(|tx| (tx, block_id.clone())) .collect(); - json_response(prepare_txs(txs, query, config), TTL_SHORT) + let ttl = ttl_by_depth(block_id.map(|b| b.height), query); + json_response(prepare_txs(txs, query, config), ttl) } (&Method::GET, Some(&"block"), Some(hash), Some(&"header"), None, None) => { let hash = BlockHash::from_hex(hash)?; From bc4ce1632f8262d3a61c1ad0320531edd08eb278 Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 13 Nov 2023 15:08:20 +0900 Subject: [PATCH 17/51] ops: Use less threads for mainnet electrs pre-cache --- start | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/start b/start index 1305a865..fe9844a9 100755 --- a/start +++ b/start @@ -37,7 +37,7 @@ esac # which network? case "${1}" in mainnet) - THREADS=$((NPROC * 2)) + THREADS=$((NPROC * 1)) ;; testnet) NETWORK=testnet From 16e99b617f121f0dbf25a2490c087c1af9dfdb9b Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 13 Nov 2023 15:34:51 +0900 Subject: [PATCH 18/51] ops: Further reduce threads used for electrs pre-cache --- start | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/start b/start index fe9844a9..efb530cb 100755 --- a/start +++ b/start @@ -41,23 +41,23 @@ case "${1}" in ;; testnet) NETWORK=testnet - THREADS=$((NPROC * 1)) + THREADS=$((NPROC / 2)) ;; signet) NETWORK=signet - THREADS=$((NPROC * 1)) + THREADS=$((NPROC / 2)) ;; liquid) DAEMON=elements NETWORK=liquid FEATURES=liquid - THREADS=$((NPROC * 1)) + THREADS=$((NPROC / 2)) ;; liquidtestnet) DAEMON=elements NETWORK=liquidtestnet FEATURES=liquid - THREADS=$((NPROC * 1)) + THREADS=$((NPROC / 2)) ;; *) echo "Usage: $0 (mainnet|testnet|signet|liquid|liquidtestnet)" @@ -136,6 +136,6 @@ do --address-search \ --utxos-limit "${UTXOS_LIMIT}" \ --electrum-txs-limit "${ELECTRUM_TXS_LIMIT}" \ - -vvv + -vv sleep 1 done From 7ca62e44454130e4011c95bddae13dafe59aa9fe Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 13 Nov 2023 16:07:14 +0900 Subject: [PATCH 19/51] ops: Further reduce threads used for electrs pre-cache --- start | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/start b/start index efb530cb..827ee09a 100755 --- a/start +++ b/start @@ -37,27 +37,27 @@ esac # which network? case "${1}" in mainnet) - THREADS=$((NPROC * 1)) + THREADS=$((NPROC / 2)) ;; testnet) NETWORK=testnet - THREADS=$((NPROC / 2)) + THREADS=$((NPROC / 4)) ;; signet) NETWORK=signet - THREADS=$((NPROC / 2)) + THREADS=$((NPROC / 4)) ;; liquid) DAEMON=elements NETWORK=liquid FEATURES=liquid - THREADS=$((NPROC / 2)) + THREADS=$((NPROC / 4)) ;; liquidtestnet) DAEMON=elements NETWORK=liquidtestnet FEATURES=liquid - THREADS=$((NPROC / 2)) + THREADS=$((NPROC / 4)) ;; *) echo "Usage: $0 (mainnet|testnet|signet|liquid|liquidtestnet)" From 9190889eff6e0284c0a434f0b9b883988392768d Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 13 Nov 2023 16:24:20 +0900 Subject: [PATCH 20/51] ops: Further reduce threads used for electrs pre-cache --- start | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/start b/start index 827ee09a..c5daad1a 100755 --- a/start +++ b/start @@ -37,27 +37,27 @@ esac # which network? case "${1}" in mainnet) - THREADS=$((NPROC / 2)) + THREADS=$((NPROC / 3)) ;; testnet) NETWORK=testnet - THREADS=$((NPROC / 4)) + THREADS=$((NPROC / 6)) ;; signet) NETWORK=signet - THREADS=$((NPROC / 4)) + THREADS=$((NPROC / 6)) ;; liquid) DAEMON=elements NETWORK=liquid FEATURES=liquid - THREADS=$((NPROC / 4)) + THREADS=$((NPROC / 6)) ;; liquidtestnet) DAEMON=elements NETWORK=liquidtestnet FEATURES=liquid - THREADS=$((NPROC / 4)) + THREADS=$((NPROC / 6)) ;; *) echo "Usage: $0 (mainnet|testnet|signet|liquid|liquidtestnet)" From 93804931207208485649c837e3163d13f83ac3e6 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 13 Nov 2023 08:26:12 +0000 Subject: [PATCH 21/51] Relax error propagation for batch loading mempool txs --- src/daemon.rs | 49 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/src/daemon.rs b/src/daemon.rs index 0be3a8ff..b794a1f9 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -387,19 +387,46 @@ impl Daemon { Ok(result) } - fn handle_request_batch(&self, method: &str, params_list: &[Value]) -> Result> { + fn handle_request_batch( + &self, + method: &str, + params_list: &[Value], + failure_threshold: f64, + ) -> Result> { let id = self.message_id.next(); let chunks = params_list .iter() .map(|params| json!({"method": method, "params": params, "id": id})) .chunks(50_000); // Max Amount of batched requests let mut results = vec![]; + let total_requests = params_list.len(); + let mut failed_requests: u64 = 0; + let threshold = (failure_threshold * total_requests as f64).round() as u64; + let mut n = 0; + for chunk in &chunks { let reqs = chunk.collect(); let mut replies = self.call_jsonrpc(method, &reqs)?; if let Some(replies_vec) = replies.as_array_mut() { for reply in replies_vec { - results.push(parse_jsonrpc_reply(reply.take(), method, id)?) + n += 1; + match parse_jsonrpc_reply(reply.take(), method, id) { + Ok(parsed_reply) => results.push(parsed_reply), + Err(e) => { + failed_requests += 1; + warn!( + "batch request {} {}/{} failed: {}", + method, + n, + total_requests, + e.to_string() + ); + // abort and return the last error once a threshold number of requests have failed + if failed_requests > threshold { + return Err(e); + } + } + } } } else { bail!("non-array replies: {:?}", replies); @@ -409,9 +436,14 @@ impl Daemon { Ok(results) } - fn retry_request_batch(&self, method: &str, params_list: &[Value]) -> Result> { + fn retry_request_batch( + &self, + method: &str, + params_list: &[Value], + failure_threshold: f64, + ) -> Result> { loop { - match self.handle_request_batch(method, params_list) { + match self.handle_request_batch(method, params_list, failure_threshold) { Err(Error(ErrorKind::Connection(msg), _)) => { warn!("reconnecting to bitcoind: {}", msg); self.signal.wait(Duration::from_secs(3), false)?; @@ -425,13 +457,13 @@ impl Daemon { } fn request(&self, method: &str, params: Value) -> Result { - let mut values = self.retry_request_batch(method, &[params])?; + let mut values = self.retry_request_batch(method, &[params], 0.0)?; assert_eq!(values.len(), 1); Ok(values.remove(0)) } fn requests(&self, method: &str, params_list: &[Value]) -> Result> { - self.retry_request_batch(method, params_list) + self.retry_request_batch(method, params_list, 0.0) } // bitcoind JSONRPC API: @@ -506,13 +538,12 @@ impl Daemon { .iter() .map(|txhash| json!([txhash.to_hex(), /*verbose=*/ false])) .collect(); - - let values = self.requests("getrawtransaction", ¶ms_list)?; + let values = self.retry_request_batch("getrawtransaction", ¶ms_list, 0.25)?; let mut txs = vec![]; for value in values { txs.push(tx_from_value(value)?); } - assert_eq!(txhashes.len(), txs.len()); + // missing transactions are skipped, so the number of txs returned may be less than the number of txids requested Ok(txs) } From 168862b6ae52a547e0e613948e9f3f024542ab73 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Wed, 15 Nov 2023 06:55:35 +0000 Subject: [PATCH 22/51] Configurable mempool txs page size --- src/config.rs | 8 ++++++++ src/rest.rs | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 9c8cfc26..c10f6d90 100644 --- a/src/config.rs +++ b/src/config.rs @@ -58,6 +58,7 @@ pub struct Config { pub rest_default_block_limit: usize, pub rest_default_chain_txs_per_page: usize, pub rest_default_max_mempool_txs: usize, + pub rest_max_mempool_page_size: usize, #[cfg(feature = "liquid")] pub parent_network: BNetwork, @@ -231,6 +232,12 @@ impl Config { .help("The default number of mempool transactions returned by the txs endpoints.") .default_value("50") ) + .arg( + Arg::with_name("rest_max_mempool_page_size") + .long("rest-max-mempool-page-size") + .help("The maximum number of transactions returned by the paginated /internal/mempool/txs endpoint.") + .default_value("1000") + ) .arg( Arg::with_name("electrum_txs_limit") .long("electrum-txs-limit") @@ -484,6 +491,7 @@ impl Config { "rest_default_max_mempool_txs", usize ), + rest_max_mempool_page_size: value_t_or_exit!(m, "rest_max_mempool_page_size", usize), jsonrpc_import: m.is_present("jsonrpc_import"), light_mode: m.is_present("light_mode"), address_search: m.is_present("address_search"), diff --git a/src/rest.rs b/src/rest.rs index 145ba346..375d351a 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1305,9 +1305,13 @@ fn handle_request( None, ) => { let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); + let max_txs = query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_max_mempool_page_size); let txs = query .mempool() - .txs_page(10_000, last_seen_txid) + .txs_page(max_txs, last_seen_txid) .into_iter() .map(|tx| (tx, None)) .collect(); From b379d24cb1972976d4b787d6e4c0f664f144bfa6 Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 27 Nov 2023 20:28:49 +0900 Subject: [PATCH 23/51] Fix docker username environment variable in github workflow --- .github/workflows/on-tag.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/on-tag.yml b/.github/workflows/on-tag.yml index 77bb5ed0..f3e200e0 100644 --- a/.github/workflows/on-tag.yml +++ b/.github/workflows/on-tag.yml @@ -91,7 +91,7 @@ jobs: --cache-from "type=local,src=/tmp/.buildx-cache" \ --cache-to "type=local,dest=/tmp/.buildx-cache" \ --platform linux/amd64 \ - --tag ${{ secrets.DOCKER_USERNAME }}/electrs:$TAG \ - --tag ${{ secrets.DOCKER_USERNAME }}/electrs:latest \ + --tag ${{ secrets.DOCKER_HUB_USER }}/electrs:$TAG \ + --tag ${{ secrets.DOCKER_HUB_USER }}/electrs:latest \ --output "type=registry" . \ --build-arg commitHash=$SHORT_SHA From 9b7d7016ab24640339e49a9fe2a522841b82ff52 Mon Sep 17 00:00:00 2001 From: junderw Date: Thu, 30 Nov 2023 01:59:48 -0700 Subject: [PATCH 24/51] Feature: Log every 10k blocks to help show progress during initial sync --- src/new_index/schema.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index 1446d682..7efc9099 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -329,7 +329,12 @@ impl Indexer { .added_blockhashes .write() .unwrap() - .extend(blocks.iter().map(|b| b.entry.hash())); + .extend(blocks.iter().map(|b| { + if b.entry.height() % 10_000 == 0 { + info!("Tx indexing is up to height={}", b.entry.height()); + } + b.entry.hash() + })); } fn index(&self, blocks: &[BlockEntry]) { @@ -342,6 +347,9 @@ impl Indexer { let _timer = self.start_timer("index_process"); let added_blockhashes = self.store.added_blockhashes.read().unwrap(); for b in blocks { + if b.entry.height() % 10_000 == 0 { + info!("History indexing is up to height={}", b.entry.height()); + } let blockhash = b.entry.hash(); // TODO: replace by lookup into txstore_db? if !added_blockhashes.contains(blockhash) { From cbd8a29cd2faa5c6bca881ea5d690a29014858a3 Mon Sep 17 00:00:00 2001 From: junderw Date: Thu, 7 Dec 2023 01:45:10 -0700 Subject: [PATCH 25/51] ThreadPoolBuilder can fail when resources are busy --- src/new_index/schema.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index 1446d682..e92a429d 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -1087,11 +1087,23 @@ fn lookup_txos( outpoints: &BTreeSet, allow_missing: bool, ) -> HashMap { - let pool = rayon::ThreadPoolBuilder::new() - .num_threads(16) // we need to saturate SSD IOPS - .thread_name(|i| format!("lookup-txo-{}", i)) - .build() - .unwrap(); + let mut loop_count = 10; + let pool = loop { + match rayon::ThreadPoolBuilder::new() + .num_threads(16) // we need to saturate SSD IOPS + .thread_name(|i| format!("lookup-txo-{}", i)) + .build() + { + Ok(pool) => break pool, + Err(e) => { + if loop_count == 0 { + panic!("schema::lookup_txos failed to create a ThreadPool: {}", e); + } + std::thread::sleep(std::time::Duration::from_millis(50)); + loop_count -= 1; + } + } + }; pool.install(|| { outpoints .par_iter() From 93e447bb789e4664c13c3015749e2c11a10ccf51 Mon Sep 17 00:00:00 2001 From: junderw Date: Tue, 12 Dec 2023 01:49:06 -0700 Subject: [PATCH 26/51] Add: main_loop_delay arg --- src/bin/electrs.rs | 2 +- src/config.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 68fa47ff..b0329d98 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -107,7 +107,7 @@ fn run_server(config: Arc) -> Result<()> { } loop { - if let Err(err) = signal.wait(Duration::from_millis(500), true) { + if let Err(err) = signal.wait(Duration::from_millis(config.main_loop_delay), true) { info!("stopping server: {}", err); electrs::util::spawn_thread("shutdown-thread-checker", || { diff --git a/src/config.rs b/src/config.rs index c10f6d90..c236b837 100644 --- a/src/config.rs +++ b/src/config.rs @@ -45,6 +45,7 @@ pub struct Config { pub monitoring_addr: SocketAddr, pub jsonrpc_import: bool, pub light_mode: bool, + pub main_loop_delay: u64, pub address_search: bool, pub index_unspendables: bool, pub cors: Option, @@ -168,6 +169,12 @@ impl Config { .long("lightmode") .help("Enable light mode for reduced storage") ) + .arg( + Arg::with_name("main_loop_delay") + .long("main-loop-delay") + .help("The number of milliseconds the main loop will wait between loops. (Can be shortened with SIGUSR1)") + .default_value("500") + ) .arg( Arg::with_name("address_search") .long("address-search") @@ -494,6 +501,7 @@ impl Config { rest_max_mempool_page_size: value_t_or_exit!(m, "rest_max_mempool_page_size", usize), jsonrpc_import: m.is_present("jsonrpc_import"), light_mode: m.is_present("light_mode"), + main_loop_delay: value_t_or_exit!(m, "main_loop_delay", u64), address_search: m.is_present("address_search"), index_unspendables: m.is_present("index_unspendables"), cors: m.value_of("cors").map(|s| s.to_string()), From d430e167c6791bf315fe78f784fafcca1cec7a45 Mon Sep 17 00:00:00 2001 From: junderw Date: Tue, 12 Dec 2023 01:54:23 -0700 Subject: [PATCH 27/51] Feat: Use main loop delay in start script and set node201 to 14000 --- start | 3 +++ 1 file changed, 3 insertions(+) diff --git a/start b/start index c5daad1a..16139f7b 100755 --- a/start +++ b/start @@ -77,6 +77,7 @@ do # prepare run-time variables UTXOS_LIMIT=500 ELECTRUM_TXS_LIMIT=500 + MAIN_LOOP_DELAY=500 DAEMON_CONF="${HOME}/${DAEMON}.conf" HTTP_SOCKET_FILE="${HOME}/socket/esplora-${DAEMON}-${NETWORK}" RPC_SOCKET_FILE="${HOME}/socket/electrum-${DAEMON}-${NETWORK}" @@ -90,6 +91,7 @@ do if [ "${NODENAME}" = "node201" ];then UTXOS_LIMIT=9000 ELECTRUM_TXS_LIMIT=9000 + MAIN_LOOP_DELAY=14000 fi # Run the popular address txt file generator before each run @@ -127,6 +129,7 @@ do --network "${NETWORK}" \ --daemon-dir "${HOME}" \ --db-dir "${DB_FOLDER}" \ + --main-loop-delay "${MAIN_LOOP_DELAY}" \ --rpc-socket-file "${RPC_SOCKET_FILE}" \ --http-socket-file "${HTTP_SOCKET_FILE}" \ --precache-scripts "${POPULAR_SCRIPTS_FILE}" \ From f0c9fa7c5fd4720faaec2c0f6617b23f7163d61c Mon Sep 17 00:00:00 2001 From: junderw Date: Thu, 4 Jan 2024 22:19:23 +0900 Subject: [PATCH 28/51] Fix: Liquid sigops was trying to count for pegins. --- src/util/transaction.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/util/transaction.rs b/src/util/transaction.rs index db9ba066..c9ff29ae 100644 --- a/src/util/transaction.rs +++ b/src/util/transaction.rs @@ -136,11 +136,11 @@ pub(super) mod sigops { let mut prevouts = Vec::with_capacity(input_count); #[cfg(not(feature = "liquid"))] - let is_coinbase = tx.is_coin_base(); + let is_coinbase_or_pegin = tx.is_coin_base(); #[cfg(feature = "liquid")] - let is_coinbase = tx.is_coinbase(); + let is_coinbase_or_pegin = tx.is_coinbase() || tx.input.iter().any(|input| input.is_pegin); - if !is_coinbase { + if !is_coinbase_or_pegin { for idx in 0..input_count { prevouts.push( *prevout_map @@ -311,7 +311,7 @@ pub(super) mod sigops { return Ok(n_sigop_cost); } #[cfg(feature = "liquid")] - if tx.is_coinbase() { + if tx.is_coinbase() || tx.input.iter().any(|input| input.is_pegin) { return Ok(n_sigop_cost); } if tx.input.len() != previous_outputs.len() { From 579a6cc9eb8de9d4548d9fb4fb8815286a2a8351 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Thu, 25 Jan 2024 20:28:35 +0000 Subject: [PATCH 29/51] Add paged mempool txids endpoint --- src/config.rs | 12 ++++++++++++ src/new_index/mempool.rs | 15 +++++++++++++++ src/rest.rs | 11 +++++++++++ 3 files changed, 38 insertions(+) diff --git a/src/config.rs b/src/config.rs index c236b837..2c428048 100644 --- a/src/config.rs +++ b/src/config.rs @@ -60,6 +60,7 @@ pub struct Config { pub rest_default_chain_txs_per_page: usize, pub rest_default_max_mempool_txs: usize, pub rest_max_mempool_page_size: usize, + pub rest_max_mempool_txid_page_size: usize, #[cfg(feature = "liquid")] pub parent_network: BNetwork, @@ -245,6 +246,12 @@ impl Config { .help("The maximum number of transactions returned by the paginated /internal/mempool/txs endpoint.") .default_value("1000") ) + .arg( + Arg::with_name("rest_max_mempool_txid_page_size") + .long("rest-max-mempool-txid-page-size") + .help("The maximum number of transactions returned by the paginated /mempool/txids/page endpoint.") + .default_value("10000") + ) .arg( Arg::with_name("electrum_txs_limit") .long("electrum-txs-limit") @@ -499,6 +506,11 @@ impl Config { usize ), rest_max_mempool_page_size: value_t_or_exit!(m, "rest_max_mempool_page_size", usize), + rest_max_mempool_txid_page_size: value_t_or_exit!( + m, + "rest_max_mempool_txid_page_size", + usize + ), jsonrpc_import: m.is_present("jsonrpc_import"), light_mode: m.is_present("light_mode"), main_loop_delay: value_t_or_exit!(m, "main_loop_delay", u64), diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index ccd50fe1..2d0908f5 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -288,6 +288,21 @@ impl Mempool { self.txstore.keys().collect() } + // Get n txids after the given txid in the mempool + pub fn txids_page(&self, n: usize, start: Option) -> Vec<&Txid> { + let _timer = self.latency.with_label_values(&["txs"]).start_timer(); + let start_bound = match start { + Some(txid) => Excluded(txid), + None => Unbounded, + }; + + self.txstore + .range((start_bound, Unbounded)) + .take(n) + .map(|(k, _v)| k) + .collect() + } + // Get all txs in the mempool pub fn txs(&self) -> Vec { let _timer = self.latency.with_label_values(&["txs"]).start_timer(); diff --git a/src/rest.rs b/src/rest.rs index 375d351a..0ac32f60 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1256,6 +1256,17 @@ fn handle_request( (&Method::GET, Some(&"mempool"), Some(&"txids"), None, None, None) => { json_response(query.mempool().txids(), TTL_SHORT) } + (&Method::GET, Some(&"mempool"), Some(&"txids"), Some(&"page"), last_seen_txid, None) => { + let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); + let max_txs = query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_max_mempool_txid_page_size); + json_response( + query.mempool().txids_page(max_txs, last_seen_txid), + TTL_SHORT, + ) + } ( &Method::GET, Some(&INTERNAL_PREFIX), From 054b2511fd4ab6d71f97faa862bb503568a2e6cc Mon Sep 17 00:00:00 2001 From: Mononaut Date: Thu, 22 Feb 2024 22:24:53 +0000 Subject: [PATCH 30/51] Fix histogram timer labels --- src/new_index/mempool.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index 2d0908f5..c3841d52 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -290,7 +290,10 @@ impl Mempool { // Get n txids after the given txid in the mempool pub fn txids_page(&self, n: usize, start: Option) -> Vec<&Txid> { - let _timer = self.latency.with_label_values(&["txs"]).start_timer(); + let _timer = self + .latency + .with_label_values(&["txids_page"]) + .start_timer(); let start_bound = match start { Some(txid) => Excluded(txid), None => Unbounded, @@ -311,7 +314,7 @@ impl Mempool { // Get n txs after the given txid in the mempool pub fn txs_page(&self, n: usize, start: Option) -> Vec { - let _timer = self.latency.with_label_values(&["txs"]).start_timer(); + let _timer = self.latency.with_label_values(&["txs_page"]).start_timer(); let mut page = Vec::with_capacity(n); let start_bound = match start { Some(txid) => Excluded(txid), From 378e036750ab6fa3e8baf5909153f5ba105bbb6a Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 18 Mar 2024 07:23:02 +0000 Subject: [PATCH 31/51] Add /address/:addr/txs/summary endpoint --- Cargo.lock | 23 +++++++++ Cargo.toml | 5 +- src/config.rs | 12 +++++ src/new_index/schema.rs | 103 ++++++++++++++++++++++++++++++++++++++++ src/rest.rs | 34 ++++++++++++- 5 files changed, 174 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db6d368a..48dd1c34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -426,6 +426,12 @@ dependencies = [ "slip21", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" version = "0.2.8" @@ -567,6 +573,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -663,6 +675,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "indexmap" +version = "2.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +dependencies = [ + "equivalent", + "hashbrown", +] + [[package]] name = "instant" version = "0.1.12" @@ -839,6 +861,7 @@ dependencies = [ "hex", "hyper", "hyperlocal", + "indexmap", "itertools", "lazy_static", "libc", diff --git a/Cargo.toml b/Cargo.toml index 99b963ef..4333b5aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,8 +21,8 @@ name = "electrs" [features] default = [] -liquid = [ "elements" ] -electrum-discovery = [ "electrum-client"] +liquid = ["elements"] +electrum-discovery = ["electrum-client"] [dependencies] arrayref = "0.3.6" @@ -64,6 +64,7 @@ tokio = { version = "1", features = ["sync", "macros"] } # optional dependencies for electrum-discovery electrum-client = { version = "0.8", optional = true } +indexmap = "2.2.5" [dev-dependencies] diff --git a/src/config.rs b/src/config.rs index 2c428048..8278d985 100644 --- a/src/config.rs +++ b/src/config.rs @@ -59,6 +59,7 @@ pub struct Config { pub rest_default_block_limit: usize, pub rest_default_chain_txs_per_page: usize, pub rest_default_max_mempool_txs: usize, + pub rest_default_max_address_summary_txs: usize, pub rest_max_mempool_page_size: usize, pub rest_max_mempool_txid_page_size: usize, @@ -240,6 +241,12 @@ impl Config { .help("The default number of mempool transactions returned by the txs endpoints.") .default_value("50") ) + .arg( + Arg::with_name("rest_default_max_address_summary_txs") + .long("rest-default-max-address-summary-txs") + .help("The default number of transactions returned by the address summary endpoints.") + .default_value("5000") + ) .arg( Arg::with_name("rest_max_mempool_page_size") .long("rest-max-mempool-page-size") @@ -505,6 +512,11 @@ impl Config { "rest_default_max_mempool_txs", usize ), + rest_default_max_address_summary_txs: value_t_or_exit!( + m, + "rest_default_max_address_summary_txs", + usize + ), rest_max_mempool_page_size: value_t_or_exit!(m, "rest_max_mempool_page_size", usize), rest_max_mempool_txid_page_size: value_t_or_exit!( m, diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index 1fbe4830..a776f782 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -2,6 +2,7 @@ use bitcoin::hashes::sha256d::Hash as Sha256dHash; #[cfg(not(feature = "liquid"))] use bitcoin::util::merkleblock::MerkleBlock; use bitcoin::VarInt; +use indexmap::IndexMap; use itertools::Itertools; use rayon::prelude::*; use sha2::{Digest, Sha256}; @@ -512,6 +513,100 @@ impl ChainQuery { ) } + pub fn summary( + &self, + scripthash: &[u8], + last_seen_txid: Option<&Txid>, + limit: usize, + ) -> Vec { + // scripthash lookup + self._summary(b'H', scripthash, last_seen_txid, limit) + } + + fn _summary( + &self, + code: u8, + hash: &[u8], + last_seen_txid: Option<&Txid>, + limit: usize, + ) -> Vec { + let _timer_scan = self.start_timer("address_summary"); + let rows = self + .history_iter_scan_reverse(code, hash) + .map(TxHistoryRow::from_row) + .map(|row| (row.get_txid(), row.key.txinfo)) + .skip_while(|(txid, _)| { + // skip until we reach the last_seen_txid + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid != txid) + }) + .skip_while(|(txid, _)| { + // skip the last_seen_txid itself + last_seen_txid.map_or(false, |last_seen_txid| last_seen_txid == txid) + }) + .filter_map(|(txid, info)| { + self.tx_confirming_block(&txid) + .map(|b| (txid, info, b.height, b.time)) + }); + + // collate utxo funding/spending events by transaction + let mut map: IndexMap = IndexMap::new(); + for (txid, info, height, time) in rows { + if !map.contains_key(&txid) && map.len() == limit { + break; + } + match info { + #[cfg(not(feature = "liquid"))] + TxHistoryInfo::Funding(info) => { + map.entry(txid) + .and_modify(|tx| { + tx.value = tx.value.saturating_add(info.value.try_into().unwrap_or(0)) + }) + .or_insert(TxHistorySummary { + txid, + value: info.value.try_into().unwrap_or(0), + height, + time, + }); + } + #[cfg(not(feature = "liquid"))] + TxHistoryInfo::Spending(info) => { + map.entry(txid) + .and_modify(|tx| { + tx.value = tx.value.saturating_sub(info.value.try_into().unwrap_or(0)) + }) + .or_insert(TxHistorySummary { + txid, + value: 0_i64.saturating_sub(info.value.try_into().unwrap_or(0)), + height, + time, + }); + } + #[cfg(feature = "liquid")] + TxHistoryInfo::Funding(_info) => { + map.entry(txid).or_insert(TxHistorySummary { + txid, + value: 0, + height, + time, + }); + } + #[cfg(feature = "liquid")] + TxHistoryInfo::Spending(_info) => { + map.entry(txid).or_insert(TxHistorySummary { + txid, + value: 0, + height, + time, + }); + } + #[cfg(feature = "liquid")] + _ => {} + } + } + + map.into_values().collect() + } + pub fn history( &self, scripthash: &[u8], @@ -1573,6 +1668,14 @@ impl TxHistoryInfo { } } +#[derive(Serialize, Deserialize)] +pub struct TxHistorySummary { + txid: Txid, + height: usize, + value: i64, + time: u32, +} + #[derive(Serialize, Deserialize)] struct TxEdgeKey { code: u8, diff --git a/src/rest.rs b/src/rest.rs index fdddce6a..c0f46142 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -22,7 +22,7 @@ use prometheus::{HistogramOpts, HistogramVec}; use tokio::sync::oneshot; use hyperlocal::UnixServerExt; -use std::fs; +use std::{cmp, fs}; #[cfg(feature = "liquid")] use { crate::elements::{peg::PegoutValue, AssetSorting, IssuanceValue}, @@ -957,6 +957,38 @@ fn handle_request( json_response(prepare_txs(txs, query, config), TTL_SHORT) } + ( + &Method::GET, + Some(script_type @ &"address"), + Some(script_str), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + ) + | ( + &Method::GET, + Some(script_type @ &"scripthash"), + Some(script_str), + Some(&"txs"), + Some(&"summary"), + last_seen_txid, + ) => { + let script_hash = to_scripthash(script_type, script_str, config.network_type)?; + let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); + let max_txs = cmp::max( + config.rest_default_max_address_summary_txs, + query_params + .get("max_txs") + .and_then(|s| s.parse::().ok()) + .unwrap_or(config.rest_default_max_address_summary_txs), + ); + + let summary = query + .chain() + .summary(&script_hash[..], last_seen_txid.as_ref(), max_txs); + + json_response(summary, TTL_SHORT) + } ( &Method::GET, Some(script_type @ &"address"), From c5e308389b5427d09c6b26b5558bd106701320e2 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Tue, 19 Mar 2024 02:35:03 +0000 Subject: [PATCH 32/51] Fix max/min typo --- src/rest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rest.rs b/src/rest.rs index c0f46142..df085441 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -975,7 +975,7 @@ fn handle_request( ) => { let script_hash = to_scripthash(script_type, script_str, config.network_type)?; let last_seen_txid = last_seen_txid.and_then(|txid| Txid::from_hex(txid).ok()); - let max_txs = cmp::max( + let max_txs = cmp::min( config.rest_default_max_address_summary_txs, query_params .get("max_txs") From 95557427c8b2ff30ae0c77ea16d67bd3a7986c8a Mon Sep 17 00:00:00 2001 From: Mononaut Date: Tue, 19 Mar 2024 08:20:05 +0000 Subject: [PATCH 33/51] Switch IndexMap to HashMap --- Cargo.lock | 23 ----------------------- Cargo.toml | 1 - src/new_index/schema.rs | 13 ++++++++++--- 3 files changed, 10 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48dd1c34..db6d368a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -426,12 +426,6 @@ dependencies = [ "slip21", ] -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - [[package]] name = "errno" version = "0.2.8" @@ -573,12 +567,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -[[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" - [[package]] name = "hermit-abi" version = "0.1.19" @@ -675,16 +663,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "indexmap" -version = "2.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" -dependencies = [ - "equivalent", - "hashbrown", -] - [[package]] name = "instant" version = "0.1.12" @@ -861,7 +839,6 @@ dependencies = [ "hex", "hyper", "hyperlocal", - "indexmap", "itertools", "lazy_static", "libc", diff --git a/Cargo.toml b/Cargo.toml index 4333b5aa..58c5b579 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,7 +64,6 @@ tokio = { version = "1", features = ["sync", "macros"] } # optional dependencies for electrum-discovery electrum-client = { version = "0.8", optional = true } -indexmap = "2.2.5" [dev-dependencies] diff --git a/src/new_index/schema.rs b/src/new_index/schema.rs index a776f782..00ee3e89 100644 --- a/src/new_index/schema.rs +++ b/src/new_index/schema.rs @@ -2,7 +2,6 @@ use bitcoin::hashes::sha256d::Hash as Sha256dHash; #[cfg(not(feature = "liquid"))] use bitcoin::util::merkleblock::MerkleBlock; use bitcoin::VarInt; -use indexmap::IndexMap; use itertools::Itertools; use rayon::prelude::*; use sha2::{Digest, Sha256}; @@ -549,7 +548,7 @@ impl ChainQuery { }); // collate utxo funding/spending events by transaction - let mut map: IndexMap = IndexMap::new(); + let mut map: HashMap = HashMap::new(); for (txid, info, height, time) in rows { if !map.contains_key(&txid) && map.len() == limit { break; @@ -604,7 +603,15 @@ impl ChainQuery { } } - map.into_values().collect() + let mut tx_summaries = map.into_values().collect::>(); + tx_summaries.sort_by(|a, b| { + if a.height == b.height { + a.value.cmp(&b.value) + } else { + b.height.cmp(&a.height) + } + }); + tx_summaries } pub fn history( From 055ac9f4ab88aef71f9d9451dfbfaeb8563d185d Mon Sep 17 00:00:00 2001 From: Mononaut Date: Sat, 23 Mar 2024 09:08:45 +0000 Subject: [PATCH 34/51] Add testmempoolaccept endpoint --- src/daemon.rs | 26 ++++++++++++++++++++++++++ src/new_index/query.rs | 6 +++++- src/rest.rs | 12 ++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/daemon.rs b/src/daemon.rs index b794a1f9..01215a59 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -117,6 +117,26 @@ struct NetworkInfo { relayfee: f64, // in BTC/kB } +#[derive(Serialize, Deserialize, Debug)] +struct MempoolFees { + base: f64, + #[serde(rename = "effective-feerate")] + effective_feerate: f64, + #[serde(rename = "effective-includes")] + effective_includes: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct MempoolAcceptResult { + txid: String, + wtxid: String, + allowed: Option, + vsize: Option, + fees: Option, + #[serde(rename = "reject-reason")] + reject_reason: Option, +} + pub trait CookieGetter: Send + Sync { fn get(&self) -> Result>; } @@ -582,6 +602,12 @@ impl Daemon { .chain_err(|| "failed to parse txid") } + pub fn test_mempool_accept(&self, txhex: Vec) -> Result> { + let result = self.request("testmempoolaccept", json!([txhex]))?; + serde_json::from_value::>(result) + .chain_err(|| "invalid testmempoolaccept reply") + } + // Get estimated feerates for the provided confirmation targets using a batch RPC request // Missing estimates are logged but do not cause a failure, whatever is available is returned #[allow(clippy::float_cmp)] diff --git a/src/new_index/query.rs b/src/new_index/query.rs index 3003a256..f7d2c78d 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -6,7 +6,7 @@ use std::time::{Duration, Instant}; use crate::chain::{Network, OutPoint, Transaction, TxOut, Txid}; use crate::config::Config; -use crate::daemon::Daemon; +use crate::daemon::{Daemon, MempoolAcceptResult}; use crate::errors::*; use crate::new_index::{ChainQuery, Mempool, ScriptStats, SpendingInput, Utxo}; use crate::util::{is_spendable, BlockId, Bytes, TransactionStatus}; @@ -87,6 +87,10 @@ impl Query { Ok(txid) } + pub fn test_mempool_accept(&self, txhex: Vec) -> Result> { + self.daemon.test_mempool_accept(txhex) + } + pub fn utxo(&self, scripthash: &[u8]) -> Result> { let mut utxos = self.chain.utxo( scripthash, diff --git a/src/rest.rs b/src/rest.rs index df085441..f75f34a0 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1202,6 +1202,18 @@ fn handle_request( .map_err(|err| HttpError::from(err.description().to_string()))?; http_message(StatusCode::OK, txid.to_hex(), 0) } + (&Method::POST, Some(&"txs"), Some(&"test"), None, None, None) => { + let txhexes: Vec = String::from_utf8(body.to_vec())? + .split(',') + .map(|s| s.to_string()) + .collect(); + + let result = query + .test_mempool_accept(txhexes) + .map_err(|err| HttpError::from(err.description().to_string()))?; + + json_response(result, TTL_SHORT) + } (&Method::GET, Some(&"txs"), Some(&"outspends"), None, None, None) => { let txid_strings: Vec<&str> = query_params .get("txids") From 946ea714eda165d6c77e5066ef8bfefff9c24622 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Sun, 24 Mar 2024 05:30:24 +0000 Subject: [PATCH 35/51] testmempoolaccept add maxfeerate param --- src/daemon.rs | 12 ++++++++++-- src/new_index/query.rs | 8 ++++++-- src/rest.rs | 9 ++++++++- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/daemon.rs b/src/daemon.rs index 01215a59..7d4dafa3 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -602,8 +602,16 @@ impl Daemon { .chain_err(|| "failed to parse txid") } - pub fn test_mempool_accept(&self, txhex: Vec) -> Result> { - let result = self.request("testmempoolaccept", json!([txhex]))?; + pub fn test_mempool_accept( + &self, + txhex: Vec, + maxfeerate: Option, + ) -> Result> { + let params = match maxfeerate { + Some(rate) => json!([txhex, format!("{:.8}", rate)]), + None => json!([txhex]), + }; + let result = self.request("testmempoolaccept", params)?; serde_json::from_value::>(result) .chain_err(|| "invalid testmempoolaccept reply") } diff --git a/src/new_index/query.rs b/src/new_index/query.rs index f7d2c78d..c64cd6f9 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -87,8 +87,12 @@ impl Query { Ok(txid) } - pub fn test_mempool_accept(&self, txhex: Vec) -> Result> { - self.daemon.test_mempool_accept(txhex) + pub fn test_mempool_accept( + &self, + txhex: Vec, + maxfeerate: Option, + ) -> Result> { + self.daemon.test_mempool_accept(txhex, maxfeerate) } pub fn utxo(&self, scripthash: &[u8]) -> Result> { diff --git a/src/rest.rs b/src/rest.rs index f75f34a0..cb620d4a 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1207,9 +1207,16 @@ fn handle_request( .split(',') .map(|s| s.to_string()) .collect(); + let maxfeerate = query_params + .get("maxfeerate") + .map(|s| { + s.parse::() + .map_err(|_| HttpError::from("Invalid maxfeerate".to_string())) + }) + .transpose()?; let result = query - .test_mempool_accept(txhexes) + .test_mempool_accept(txhexes, maxfeerate) .map_err(|err| HttpError::from(err.description().to_string()))?; json_response(result, TTL_SHORT) From 569af75849cc9e359325b0ba25606e4c6310de95 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Sun, 24 Mar 2024 06:34:15 +0000 Subject: [PATCH 36/51] Add testmempoolaccept pre-checks --- src/rest.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/rest.rs b/src/rest.rs index cb620d4a..5ab38b7a 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1215,6 +1215,19 @@ fn handle_request( }) .transpose()?; + // pre-checks + txhexes.iter().try_for_each(|txhex| { + // each transaction must be of reasonable size (more than 60 bytes, within 400kWU standardness limit) + if !(120..800_000).contains(&txhex.len()) { + Result::Err(HttpError::from("Invalid transaction size".to_string())) + } else { + // must be a valid hex string + Vec::::from_hex(txhex) + .map_err(|_| HttpError::from("Invalid transaction hex".to_string())) + .map(|_| ()) + } + })?; + let result = query .test_mempool_accept(txhexes, maxfeerate) .map_err(|err| HttpError::from(err.description().to_string()))?; From f7385ce392472e3cc6e49a3bac72b1b0cf298c1a Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 25 Mar 2024 05:03:38 +0000 Subject: [PATCH 37/51] testmempoolaccept JSON input, tx limit, error indices --- src/rest.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/rest.rs b/src/rest.rs index 5ab38b7a..71478312 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1203,10 +1203,15 @@ fn handle_request( http_message(StatusCode::OK, txid.to_hex(), 0) } (&Method::POST, Some(&"txs"), Some(&"test"), None, None, None) => { - let txhexes: Vec = String::from_utf8(body.to_vec())? - .split(',') - .map(|s| s.to_string()) - .collect(); + let txhexes: Vec = + serde_json::from_str(String::from_utf8(body.to_vec())?.as_str())?; + + if txhexes.len() > 25 { + Result::Err(HttpError::from( + "Exceeded maximum of 25 transactions".to_string(), + ))? + } + let maxfeerate = query_params .get("maxfeerate") .map(|s| { @@ -1216,14 +1221,19 @@ fn handle_request( .transpose()?; // pre-checks - txhexes.iter().try_for_each(|txhex| { + txhexes.iter().enumerate().try_for_each(|(index, txhex)| { // each transaction must be of reasonable size (more than 60 bytes, within 400kWU standardness limit) if !(120..800_000).contains(&txhex.len()) { - Result::Err(HttpError::from("Invalid transaction size".to_string())) + Result::Err(HttpError::from(format!( + "Invalid transaction size for item {}", + index + ))) } else { // must be a valid hex string Vec::::from_hex(txhex) - .map_err(|_| HttpError::from("Invalid transaction hex".to_string())) + .map_err(|_| { + HttpError::from(format!("Invalid transaction hex for item {}", index)) + }) .map(|_| ()) } })?; From ac32e4b1c3053c4a86fe4415e062ff48a1e5775e Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 25 Mar 2024 05:54:33 +0000 Subject: [PATCH 38/51] testmempoolaccept maxfeerate f32 -> f64 --- src/daemon.rs | 2 +- src/new_index/query.rs | 2 +- src/rest.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/daemon.rs b/src/daemon.rs index 7d4dafa3..254c168e 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -605,7 +605,7 @@ impl Daemon { pub fn test_mempool_accept( &self, txhex: Vec, - maxfeerate: Option, + maxfeerate: Option, ) -> Result> { let params = match maxfeerate { Some(rate) => json!([txhex, format!("{:.8}", rate)]), diff --git a/src/new_index/query.rs b/src/new_index/query.rs index c64cd6f9..3e314fd1 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -90,7 +90,7 @@ impl Query { pub fn test_mempool_accept( &self, txhex: Vec, - maxfeerate: Option, + maxfeerate: Option, ) -> Result> { self.daemon.test_mempool_accept(txhex, maxfeerate) } diff --git a/src/rest.rs b/src/rest.rs index 71478312..eab06de5 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1215,7 +1215,7 @@ fn handle_request( let maxfeerate = query_params .get("maxfeerate") .map(|s| { - s.parse::() + s.parse::() .map_err(|_| HttpError::from("Invalid maxfeerate".to_string())) }) .transpose()?; From e6b0d9ffaa8af71ca3d53e20c99369219ff51d22 Mon Sep 17 00:00:00 2001 From: wiz Date: Tue, 16 Apr 2024 14:03:05 +0900 Subject: [PATCH 39/51] ops: Adjust limits on Fremont site --- start | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/start b/start index 16139f7b..945c6b7c 100755 --- a/start +++ b/start @@ -93,6 +93,10 @@ do ELECTRUM_TXS_LIMIT=9000 MAIN_LOOP_DELAY=14000 fi + if [ "${LOCATION}" = "fmt" ];then + UTXOS_LIMIT=9000 + ELECTRUM_TXS_LIMIT=9000 + fi # Run the popular address txt file generator before each run POPULAR_SCRIPTS_FOLDER="${HOME}/popular-scripts/${NETWORK}" From 1abe9c4e51498abe613e4bd390498d49a2eb6a15 Mon Sep 17 00:00:00 2001 From: Gustavo Spier Landtreter Date: Sat, 20 Apr 2024 01:54:17 -0300 Subject: [PATCH 40/51] Fixed regression introduced by #51 Commit merged as part of #51 introduced a regression that prevents the daemon from breaking from its startup wait loop when running in `regtest` mode, and the blockchain already contains 1 or more blocks (apart from genesis). This commit fixes the regression by only checking the equivalence between blocks and headers as the wait condition when running in `regtest` mode. --- src/daemon.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/daemon.rs b/src/daemon.rs index 254c168e..3f370bf8 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -341,10 +341,10 @@ impl Daemon { let mempool = daemon.getmempoolinfo()?; let ibd_done = if network.is_regtest() { - info.blocks == 0 && info.headers == 0 + info.blocks == info.headers } else { - false - } || !info.initialblockdownload.unwrap_or(false); + !info.initialblockdownload.unwrap_or(false) + }; if mempool.loaded && ibd_done && info.blocks == info.headers { break; From cd9efdff46f51325e4922d04cfb4c2c3b64c2a79 Mon Sep 17 00:00:00 2001 From: wiz Date: Fri, 26 Apr 2024 00:19:53 +0900 Subject: [PATCH 41/51] ops: Increase limits on node213/node214 --- start | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/start b/start index 945c6b7c..25330d24 100755 --- a/start +++ b/start @@ -93,6 +93,14 @@ do ELECTRUM_TXS_LIMIT=9000 MAIN_LOOP_DELAY=14000 fi + if [ "${NODENAME}" = "node213" ];then + UTXOS_LIMIT=9000 + ELECTRUM_TXS_LIMIT=9000 + fi + if [ "${NODENAME}" = "node214" ];then + UTXOS_LIMIT=9000 + ELECTRUM_TXS_LIMIT=9000 + fi if [ "${LOCATION}" = "fmt" ];then UTXOS_LIMIT=9000 ELECTRUM_TXS_LIMIT=9000 From c2445a3462d5c46a91d0578af6ec438218dce887 Mon Sep 17 00:00:00 2001 From: junderw Date: Wed, 1 May 2024 21:08:03 +0900 Subject: [PATCH 42/51] Shorten mempool lock holding for update --- src/bin/electrs.rs | 21 +++++++++- src/new_index/mempool.rs | 91 ++++++++++++++++++++++++---------------- 2 files changed, 75 insertions(+), 37 deletions(-) diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 91a51eef..ec4b387a 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -74,7 +74,18 @@ fn run_server(config: Arc) -> Result<()> { &metrics, Arc::clone(&config), ))); - mempool.write().unwrap().update(&daemon)?; + loop { + match Mempool::update(&mempool, &daemon) { + Ok(_) => break, + Err(e) => { + warn!( + "Error performing initial mempool update, trying again in 5 seconds: {}", + e.display_chain() + ); + signal.wait(Duration::from_secs(5), false)?; + } + } + } #[cfg(feature = "liquid")] let asset_db = config.asset_db_path.as_ref().map(|db_dir| { @@ -136,7 +147,13 @@ fn run_server(config: Arc) -> Result<()> { }; // Update mempool - mempool.write().unwrap().update(&daemon)?; + if let Err(e) = Mempool::update(&mempool, &daemon) { + // Log the error if the result is an Err + warn!( + "Error updating mempool, skipping mempool update: {}", + e.display_chain() + ); + } // Update subscribed clients electrum_server.notify(); diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index c3841d52..b706b3d3 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -9,7 +9,7 @@ use elements::{encode::serialize, AssetId}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::iter::FromIterator; use std::ops::Bound::{Excluded, Unbounded}; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use crate::chain::{deserialize, Network, OutPoint, Transaction, TxOut, Txid}; @@ -343,46 +343,67 @@ impl Mempool { &self.backlog_stats.0 } - pub fn update(&mut self, daemon: &Daemon) -> Result<()> { - let _timer = self.latency.with_label_values(&["update"]).start_timer(); - let new_txids = daemon + pub fn unique_txids(&self) -> HashSet { + return HashSet::from_iter(self.txstore.keys().cloned()); + } + + pub fn update(mempool: &RwLock, daemon: &Daemon) -> Result<()> { + // 1. Start the metrics timer and get the current mempool txids + // [LOCK] Takes read lock for whole scope. + let (_timer, old_txids) = { + let mempool = mempool.read().unwrap(); + ( + mempool.latency.with_label_values(&["update"]).start_timer(), + mempool.unique_txids(), + ) + }; + + // 2. Get all the mempool txids from the RPC. + // [LOCK] No lock taken. Wait for RPC request. Get lists of remove/add txes. + let all_txids = daemon .getmempooltxids() .chain_err(|| "failed to update mempool from daemon")?; - let old_txids = HashSet::from_iter(self.txstore.keys().cloned()); - let to_remove: HashSet<&Txid> = old_txids.difference(&new_txids).collect(); - - // Download and add new transactions from bitcoind's mempool - let txids: Vec<&Txid> = new_txids.difference(&old_txids).collect(); - let to_add = match daemon.gettransactions(&txids) { - Ok(txs) => txs, - Err(err) => { - warn!("failed to get {} transactions: {}", txids.len(), err); // e.g. new block or RBF - return Ok(()); // keep the mempool until next update() + let txids_to_remove: HashSet<&Txid> = old_txids.difference(&all_txids).collect(); + let txids_to_add: Vec<&Txid> = all_txids.difference(&old_txids).collect(); + + // 3. Remove missing transactions. Even if we are unable to download new transactions from + // the daemon, we still want to remove the transactions that are no longer in the mempool. + // [LOCK] Write lock is released at the end of the call to remove(). + mempool.write().unwrap().remove(txids_to_remove); + + // 4. Download the new transactions from the daemon's mempool + // [LOCK] No lock taken, waiting for RPC response. + let txs_to_add = daemon + .gettransactions(&txids_to_add) + .chain_err(|| format!("failed to get {} transactions", txids_to_add.len()))?; + + // 4. Update local mempool to match daemon's state + // [LOCK] Takes Write lock for whole scope. + { + let mut mempool = mempool.write().unwrap(); + // Add new transactions + if txs_to_add.len() > mempool.add(txs_to_add) { + debug!("Mempool update added less transactions than expected"); } - }; - // Add new transactions - if to_add.len() > self.add(to_add) { - debug!("Mempool update added less transactions than expected"); - } - // Remove missing transactions - self.remove(to_remove); - self.count - .with_label_values(&["txs"]) - .set(self.txstore.len() as f64); + mempool + .count + .with_label_values(&["txs"]) + .set(mempool.txstore.len() as f64); + + // Update cached backlog stats (if expired) + if mempool.backlog_stats.1.elapsed() + > Duration::from_secs(mempool.config.mempool_backlog_stats_ttl) + { + let _timer = mempool + .latency + .with_label_values(&["update_backlog_stats"]) + .start_timer(); + mempool.backlog_stats = (BacklogStats::new(&mempool.feeinfo), Instant::now()); + } - // Update cached backlog stats (if expired) - if self.backlog_stats.1.elapsed() - > Duration::from_secs(self.config.mempool_backlog_stats_ttl) - { - let _timer = self - .latency - .with_label_values(&["update_backlog_stats"]) - .start_timer(); - self.backlog_stats = (BacklogStats::new(&self.feeinfo), Instant::now()); + Ok(()) } - - Ok(()) } pub fn add_by_txid(&mut self, daemon: &Daemon, txid: &Txid) -> Result<()> { From 9e0ecad4d4f07c10dad35b91d4aa5b8355052e34 Mon Sep 17 00:00:00 2001 From: junderw Date: Wed, 1 May 2024 23:53:39 +0900 Subject: [PATCH 43/51] Prevent duplicate history events --- src/new_index/mempool.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index b706b3d3..8a94962f 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -439,8 +439,12 @@ impl Mempool { // Phase 1: add to txstore for tx in txs { let txid = tx.txid(); - txids.push(txid); - self.txstore.insert(txid, tx); + // Only push if it doesn't already exist. + // This is important now that update doesn't lock during + // the entire function body. + if self.txstore.insert(txid, tx).is_none() { + txids.push(txid); + } } // Phase 2: index history and spend edges (some txos can be missing) From d486a36539d5c3eac4ce87d78906ceafd85eed0d Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 6 May 2024 22:24:09 +0900 Subject: [PATCH 44/51] Add Testnet4 support --- src/chain.rs | 9 +++++++++ src/config.rs | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/src/chain.rs b/src/chain.rs index de726186..0627512e 100644 --- a/src/chain.rs +++ b/src/chain.rs @@ -32,6 +32,8 @@ pub enum Network { #[cfg(not(feature = "liquid"))] Testnet, #[cfg(not(feature = "liquid"))] + Testnet4, + #[cfg(not(feature = "liquid"))] Regtest, #[cfg(not(feature = "liquid"))] Signet, @@ -135,6 +137,8 @@ pub fn bitcoin_genesis_hash(network: BNetwork) -> bitcoin::BlockHash { genesis_block(BNetwork::Bitcoin).block_hash(); static ref TESTNET_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Testnet).block_hash(); + static ref TESTNET4_GENESIS: bitcoin::BlockHash = + genesis_block(BNetwork::Testnet4).block_hash(); static ref REGTEST_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Regtest).block_hash(); static ref SIGNET_GENESIS: bitcoin::BlockHash = @@ -143,6 +147,7 @@ pub fn bitcoin_genesis_hash(network: BNetwork) -> bitcoin::BlockHash { match network { BNetwork::Bitcoin => *BITCOIN_GENESIS, BNetwork::Testnet => *TESTNET_GENESIS, + BNetwork::Testnet4 => *TESTNET4_GENESIS, BNetwork::Regtest => *REGTEST_GENESIS, BNetwork::Signet => *SIGNET_GENESIS, } @@ -174,6 +179,8 @@ impl From<&str> for Network { #[cfg(not(feature = "liquid"))] "testnet" => Network::Testnet, #[cfg(not(feature = "liquid"))] + "testnet4" => Network::Testnet4, + #[cfg(not(feature = "liquid"))] "regtest" => Network::Regtest, #[cfg(not(feature = "liquid"))] "signet" => Network::Signet, @@ -196,6 +203,7 @@ impl From for BNetwork { match network { Network::Bitcoin => BNetwork::Bitcoin, Network::Testnet => BNetwork::Testnet, + Network::Testnet4 => BNetwork::Testnet4, Network::Regtest => BNetwork::Regtest, Network::Signet => BNetwork::Signet, } @@ -208,6 +216,7 @@ impl From for Network { match network { BNetwork::Bitcoin => Network::Bitcoin, BNetwork::Testnet => Network::Testnet, + BNetwork::Testnet4 => Network::Testnet4, BNetwork::Regtest => Network::Regtest, BNetwork::Signet => Network::Signet, } diff --git a/src/config.rs b/src/config.rs index 8278d985..a27bbf28 100644 --- a/src/config.rs +++ b/src/config.rs @@ -353,6 +353,8 @@ impl Config { Network::Regtest => 18443, #[cfg(not(feature = "liquid"))] Network::Signet => 38332, + #[cfg(not(feature = "liquid"))] + Network::Testnet4 => 48332, #[cfg(feature = "liquid")] Network::Liquid => 7041, @@ -365,6 +367,8 @@ impl Config { #[cfg(not(feature = "liquid"))] Network::Testnet => 60001, #[cfg(not(feature = "liquid"))] + Network::Testnet4 => 40001, + #[cfg(not(feature = "liquid"))] Network::Regtest => 60401, #[cfg(not(feature = "liquid"))] Network::Signet => 60601, @@ -385,6 +389,8 @@ impl Config { Network::Regtest => 3002, #[cfg(not(feature = "liquid"))] Network::Signet => 3003, + #[cfg(not(feature = "liquid"))] + Network::Testnet4 => 3004, #[cfg(feature = "liquid")] Network::Liquid => 3000, @@ -401,6 +407,8 @@ impl Config { #[cfg(not(feature = "liquid"))] Network::Regtest => 24224, #[cfg(not(feature = "liquid"))] + Network::Testnet4 => 44224, + #[cfg(not(feature = "liquid"))] Network::Signet => 54224, #[cfg(feature = "liquid")] @@ -449,6 +457,8 @@ impl Config { #[cfg(not(feature = "liquid"))] Network::Testnet => daemon_dir.push("testnet3"), #[cfg(not(feature = "liquid"))] + Network::Testnet4 => daemon_dir.push("testnet4"), + #[cfg(not(feature = "liquid"))] Network::Regtest => daemon_dir.push("regtest"), #[cfg(not(feature = "liquid"))] Network::Signet => daemon_dir.push("signet"), From 14e427d8e8c5a33b29bc3b1a7cef0292a5c7f243 Mon Sep 17 00:00:00 2001 From: wiz Date: Mon, 6 May 2024 23:00:54 +0900 Subject: [PATCH 45/51] Update start script for testnet4 instance --- start | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/start b/start index 25330d24..867590cd 100755 --- a/start +++ b/start @@ -43,6 +43,10 @@ case "${1}" in NETWORK=testnet THREADS=$((NPROC / 6)) ;; + testnet4) + NETWORK=testnet4 + THREADS=$((NPROC / 6)) + ;; signet) NETWORK=signet THREADS=$((NPROC / 6)) @@ -60,7 +64,7 @@ case "${1}" in THREADS=$((NPROC / 6)) ;; *) - echo "Usage: $0 (mainnet|testnet|signet|liquid|liquidtestnet)" + echo "Usage: $0 (mainnet|testnet|testnet4|signet|liquid|liquidtestnet)" exit 1 ;; esac From cdb60c948a6c54009d5c5a355da1be92590f99e5 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 6 May 2024 15:01:03 +0000 Subject: [PATCH 46/51] Configurable network magic --- src/bin/electrs.rs | 1 + src/bin/tx-fingerprint-stats.rs | 1 + src/config.rs | 10 ++++++++++ src/daemon.rs | 6 +++++- 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 91a51eef..a4c0408b 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -50,6 +50,7 @@ fn run_server(config: Arc) -> Result<()> { config.daemon_rpc_addr, config.cookie_getter(), config.network_type, + config.magic, signal.clone(), &metrics, )?); diff --git a/src/bin/tx-fingerprint-stats.rs b/src/bin/tx-fingerprint-stats.rs index 55cb6797..5b38561f 100644 --- a/src/bin/tx-fingerprint-stats.rs +++ b/src/bin/tx-fingerprint-stats.rs @@ -35,6 +35,7 @@ fn main() { config.daemon_rpc_addr, config.cookie_getter(), config.network_type, + config.magic, signal, &metrics, ) diff --git a/src/config.rs b/src/config.rs index a27bbf28..ac87c598 100644 --- a/src/config.rs +++ b/src/config.rs @@ -33,6 +33,7 @@ pub struct Config { // See below for the documentation of each field: pub log: stderrlog::StdErrLog, pub network_type: Network, + pub magic: Option, pub db_path: PathBuf, pub daemon_dir: PathBuf, pub blocks_dir: PathBuf, @@ -137,6 +138,11 @@ impl Config { .help(&network_help) .takes_value(true), ) + .arg( + Arg::with_name("magic") + .long("magic") + .takes_value(true), + ) .arg( Arg::with_name("electrum_rpc_addr") .long("electrum-rpc-addr") @@ -328,6 +334,9 @@ impl Config { let network_name = m.value_of("network").unwrap_or("mainnet"); let network_type = Network::from(network_name); + let magic: Option = m + .value_of("magic") + .map(|s| u32::from_str_radix(s, 16).expect("invalid network magic")); let db_dir = Path::new(m.value_of("db_dir").unwrap_or("./db")); let db_path = db_dir.join(network_name); @@ -496,6 +505,7 @@ impl Config { let config = Config { log, network_type, + magic, db_path, daemon_dir, blocks_dir, diff --git a/src/daemon.rs b/src/daemon.rs index 3f370bf8..b8bde690 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -284,6 +284,7 @@ pub struct Daemon { daemon_dir: PathBuf, blocks_dir: PathBuf, network: Network, + magic: Option, conn: Mutex, message_id: Counter, // for monotonic JSONRPC 'id' signal: Waiter, @@ -300,6 +301,7 @@ impl Daemon { daemon_rpc_addr: SocketAddr, cookie_getter: Arc, network: Network, + magic: Option, signal: Waiter, metrics: &Metrics, ) -> Result { @@ -307,6 +309,7 @@ impl Daemon { daemon_dir, blocks_dir, network, + magic, conn: Mutex::new(Connection::new( daemon_rpc_addr, cookie_getter, @@ -367,6 +370,7 @@ impl Daemon { daemon_dir: self.daemon_dir.clone(), blocks_dir: self.blocks_dir.clone(), network: self.network, + magic: self.magic, conn: Mutex::new(self.conn.lock().unwrap().reconnect()?), message_id: Counter::new(), signal: self.signal.clone(), @@ -387,7 +391,7 @@ impl Daemon { } pub fn magic(&self) -> u32 { - self.network.magic() + self.magic.unwrap_or_else(|| self.network.magic()) } fn call_jsonrpc(&self, method: &str, request: &Value) -> Result { From 69bfa5beff609f218753f459e6a8892e65a09714 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 6 May 2024 15:01:15 +0000 Subject: [PATCH 47/51] Hardcode testnet4 genesis hash --- src/chain.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/chain.rs b/src/chain.rs index 0627512e..8abf9a4a 100644 --- a/src/chain.rs +++ b/src/chain.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + #[cfg(not(feature = "liquid"))] // use regular Bitcoin data structures pub use bitcoin::{ blockdata::{opcodes, script, witness::Witness}, @@ -131,25 +133,25 @@ pub fn genesis_hash(network: Network) -> BlockHash { return liquid_genesis_hash(network); } -pub fn bitcoin_genesis_hash(network: BNetwork) -> bitcoin::BlockHash { +pub fn bitcoin_genesis_hash(network: Network) -> bitcoin::BlockHash { lazy_static! { static ref BITCOIN_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Bitcoin).block_hash(); static ref TESTNET_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Testnet).block_hash(); static ref TESTNET4_GENESIS: bitcoin::BlockHash = - genesis_block(BNetwork::Testnet4).block_hash(); + BlockHash::from_str("00000000da84f2bafbbc53dee25a72ae507ff4914b867c565be350b0da8bf043").unwrap(); static ref REGTEST_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Regtest).block_hash(); static ref SIGNET_GENESIS: bitcoin::BlockHash = genesis_block(BNetwork::Signet).block_hash(); } match network { - BNetwork::Bitcoin => *BITCOIN_GENESIS, - BNetwork::Testnet => *TESTNET_GENESIS, - BNetwork::Testnet4 => *TESTNET4_GENESIS, - BNetwork::Regtest => *REGTEST_GENESIS, - BNetwork::Signet => *SIGNET_GENESIS, + Network::Bitcoin => *BITCOIN_GENESIS, + Network::Testnet => *TESTNET_GENESIS, + Network::Testnet4 => *TESTNET4_GENESIS, + Network::Regtest => *REGTEST_GENESIS, + Network::Signet => *SIGNET_GENESIS, } } @@ -203,7 +205,7 @@ impl From for BNetwork { match network { Network::Bitcoin => BNetwork::Bitcoin, Network::Testnet => BNetwork::Testnet, - Network::Testnet4 => BNetwork::Testnet4, + Network::Testnet4 => BNetwork::Testnet, Network::Regtest => BNetwork::Regtest, Network::Signet => BNetwork::Signet, } @@ -216,7 +218,6 @@ impl From for Network { match network { BNetwork::Bitcoin => Network::Bitcoin, BNetwork::Testnet => Network::Testnet, - BNetwork::Testnet4 => Network::Testnet4, BNetwork::Regtest => Network::Regtest, BNetwork::Signet => Network::Signet, } From c6b8be94b7c576117d74ef3397a90a1d659a99c6 Mon Sep 17 00:00:00 2001 From: wiz Date: Tue, 7 May 2024 00:08:59 +0900 Subject: [PATCH 48/51] ops: Add magic bytes for testnet4 to start script --- start | 2 ++ 1 file changed, 2 insertions(+) diff --git a/start b/start index 867590cd..ae4ee6b0 100755 --- a/start +++ b/start @@ -45,6 +45,7 @@ case "${1}" in ;; testnet4) NETWORK=testnet4 + MAGIC=283f161c THREADS=$((NPROC / 6)) ;; signet) @@ -152,6 +153,7 @@ do --precache-threads "${THREADS}" \ --cookie "${RPC_USER}:${RPC_PASS}" \ --cors '*' \ + --magic "${MAGIC}" \ --address-search \ --utxos-limit "${UTXOS_LIMIT}" \ --electrum-txs-limit "${ELECTRUM_TXS_LIMIT}" \ From 181785b5c1cb6a9280e8212ddfcc99bd508fabe4 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 6 May 2024 15:16:00 +0000 Subject: [PATCH 49/51] Allow empty magic --- src/config.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/config.rs b/src/config.rs index ac87c598..a5e903ce 100644 --- a/src/config.rs +++ b/src/config.rs @@ -141,6 +141,7 @@ impl Config { .arg( Arg::with_name("magic") .long("magic") + .default_value("") .takes_value(true), ) .arg( @@ -336,6 +337,7 @@ impl Config { let network_type = Network::from(network_name); let magic: Option = m .value_of("magic") + .filter(|s| !s.is_empty()) .map(|s| u32::from_str_radix(s, 16).expect("invalid network magic")); let db_dir = Path::new(m.value_of("db_dir").unwrap_or("./db")); let db_path = db_dir.join(network_name); From 17dc10e0edea401e3f663530d1f057e7a6ae52a5 Mon Sep 17 00:00:00 2001 From: Mononaut Date: Mon, 6 May 2024 16:05:13 +0000 Subject: [PATCH 50/51] Fix testnet4 address lookups --- src/rest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rest.rs b/src/rest.rs index eab06de5..2e061b11 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -1668,7 +1668,7 @@ fn address_to_scripthash(addr: &str, network: Network) -> Result Date: Tue, 7 May 2024 01:39:27 +0900 Subject: [PATCH 51/51] ops: Remove UTXO limit for testnet4 --- start | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/start b/start index ae4ee6b0..7e5cd80f 100755 --- a/start +++ b/start @@ -102,7 +102,11 @@ do UTXOS_LIMIT=9000 ELECTRUM_TXS_LIMIT=9000 fi - if [ "${NODENAME}" = "node214" ];then + if [ "${NODENAME}" = "node213" ];then + UTXOS_LIMIT=9000 + ELECTRUM_TXS_LIMIT=9000 + fi + if [ "${NETWORK}" = "testnet4" ];then UTXOS_LIMIT=9000 ELECTRUM_TXS_LIMIT=9000 fi