diff --git a/.env b/.env index 9f6393cee7..5a5fa4bd3e 100644 --- a/.env +++ b/.env @@ -43,6 +43,10 @@ PG_APPLICATION_NAME=stacks-blockchain-api # Limit to how many concurrent connections can be created, defaults to 10 # PG_CONNECTION_POOL_MAX=10 +# Insert concurrency when processing new blocks +# If your PostgreSQL is operating on SSD and has multiple CPU cores, consider raising this value, for instance, to 8 or 16. +# STACKS_BLOCK_DATA_INSERT_CONCURRENCY=4 + # If specified, controls the Stacks Blockchain API mode. The possible values are: # * `readonly`: Runs the API endpoints without an Event Server that listens to events from a node and # writes them to the local database. The API will only read data from the PG database diff --git a/.nvmrc b/.nvmrc index 3f430af82b..9a2a0e219c 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v18 +v20 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c587dd712..89a25bcf9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,68 @@ +## [7.8.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.1...v7.8.2) (2024-02-19) + + +### Bug Fixes + +* report placeholder in prom metrics for invalid request paths ([#1867](https://github.com/hirosystems/stacks-blockchain-api/issues/1867)) ([7921488](https://github.com/hirosystems/stacks-blockchain-api/commit/79214883a5c58724ddc3e7d7b57381317cb6e27d)) + +## [7.8.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0...v7.8.1) (2024-02-02) + + +### Bug Fixes + +* **rosetta:** use /v2/fees/transaction for fee estimation ([b287b7b](https://github.com/hirosystems/stacks-blockchain-api/commit/b287b7bb3426719553e9ffa3b88178fb24207a6b)) +* sql transactional consistency bug with fetching chaintip in various areas ([#1853](https://github.com/hirosystems/stacks-blockchain-api/issues/1853)) ([07339c0](https://github.com/hirosystems/stacks-blockchain-api/commit/07339c08f3f42bc7b08c2e830939bfadcd308cb0)) + +## [7.8.0](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.7.2...v7.8.0) (2024-01-23) + + +### Features + +* add `/extended/v2/smart-contracts/status` endpoint ([#1833](https://github.com/hirosystems/stacks-blockchain-api/issues/1833)) ([3535c11](https://github.com/hirosystems/stacks-blockchain-api/commit/3535c113e0d3b730b3e0d9df630c51b04e516a7e)) +* run inserts in batch and in parallel when processing new block ([#1818](https://github.com/hirosystems/stacks-blockchain-api/issues/1818)) ([86dfdb5](https://github.com/hirosystems/stacks-blockchain-api/commit/86dfdb5d536fee8d7490ca5213f7005a8800f9fa)) +* upgrade to node 20, use bookworm-slim image ([#1832](https://github.com/hirosystems/stacks-blockchain-api/issues/1832)) ([0a42109](https://github.com/hirosystems/stacks-blockchain-api/commit/0a42109242ab5804004e01338f236f61ef07651b)) + + +### Bug Fixes + +* change all HASH indexes to BTREE to optimize writes ([#1825](https://github.com/hirosystems/stacks-blockchain-api/issues/1825)) ([234936b](https://github.com/hirosystems/stacks-blockchain-api/commit/234936b430640fb7108e6cb57bdb21d1085a65b2)) +* log block event counts after processing ([#1820](https://github.com/hirosystems/stacks-blockchain-api/issues/1820)) ([9c39743](https://github.com/hirosystems/stacks-blockchain-api/commit/9c397439e6eb2830186cda90a213b3ab3d5a4301)), closes [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) +* optimize re-org queries and indexes ([#1821](https://github.com/hirosystems/stacks-blockchain-api/issues/1821)) ([5505d35](https://github.com/hirosystems/stacks-blockchain-api/commit/5505d354ecae6e52c751b3b634752fd56d24642f)) +* parallelize re-org update queries ([#1835](https://github.com/hirosystems/stacks-blockchain-api/issues/1835)) ([340a304](https://github.com/hirosystems/stacks-blockchain-api/commit/340a3043529ca12316198d8f4605128396f02560)) + +## [7.8.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.3...v7.8.0-beta.4) (2024-01-16) + + +### Features + +* upgrade to node 20, use bookworm-slim image ([#1832](https://github.com/hirosystems/stacks-blockchain-api/issues/1832)) ([0a42109](https://github.com/hirosystems/stacks-blockchain-api/commit/0a42109242ab5804004e01338f236f61ef07651b)) + +## [7.8.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.2...v7.8.0-beta.3) (2024-01-12) + + +### Bug Fixes + +* change all HASH indexes to BTREE to optimize writes ([#1825](https://github.com/hirosystems/stacks-blockchain-api/issues/1825)) ([234936b](https://github.com/hirosystems/stacks-blockchain-api/commit/234936b430640fb7108e6cb57bdb21d1085a65b2)) + +## [7.8.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.1...v7.8.0-beta.2) (2024-01-12) + + +### Bug Fixes + +* optimize re-org queries and indexes ([#1821](https://github.com/hirosystems/stacks-blockchain-api/issues/1821)) ([5505d35](https://github.com/hirosystems/stacks-blockchain-api/commit/5505d354ecae6e52c751b3b634752fd56d24642f)) + +## [7.8.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.7.1...v7.8.0-beta.1) (2024-01-11) + + +### Features + +* run inserts in batch and in parallel when processing new block ([#1818](https://github.com/hirosystems/stacks-blockchain-api/issues/1818)) ([86dfdb5](https://github.com/hirosystems/stacks-blockchain-api/commit/86dfdb5d536fee8d7490ca5213f7005a8800f9fa)) + +### Bug Fixes + +* log block event counts after processing ([#1820](https://github.com/hirosystems/stacks-blockchain-api/issues/1820)) ([9c39743](https://github.com/hirosystems/stacks-blockchain-api/commit/9c397439e6eb2830186cda90a213b3ab3d5a4301)), closes [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) + + ## [7.7.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.7.1...v7.7.2) (2024-01-16) diff --git a/Dockerfile b/Dockerfile index c9ec7724ac..672eae2fa2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ -FROM node:18-bullseye +FROM node:20-bookworm-slim WORKDIR /app COPY . . COPY --from=qldrsc/duckdb /usr/local/bin/duckdb /bin/duckdb RUN apt-get update && \ - apt-get install -y git openjdk-11-jre && \ + apt-get install -y git openjdk-17-jre && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* RUN echo "GIT_TAG=$(git tag --points-at HEAD)" >> .env diff --git a/docker/rosetta.Dockerfile b/docker/rosetta.Dockerfile index 24fa3db5f9..09636a41ff 100644 --- a/docker/rosetta.Dockerfile +++ b/docker/rosetta.Dockerfile @@ -12,7 +12,7 @@ ARG ARCHIVE_VERSION=latest ####################################################################### ## Build the stacks-blockchain-api -FROM node:18-buster as stacks-blockchain-api-build +FROM node:20-bookworm-slim as stacks-blockchain-api-build ARG STACKS_API_VERSION ENV STACKS_API_REPO=hirosystems/stacks-blockchain-api ENV STACKS_API_VERSION=${STACKS_API_VERSION} @@ -20,9 +20,10 @@ ENV DEBIAN_FRONTEND noninteractive WORKDIR /app RUN apt-get update -y \ && apt-get install -y \ + git \ curl \ jq \ - openjdk-11-jre-headless \ + openjdk-17-jre-headless \ cmake \ && git clone -b ${STACKS_API_VERSION} https://github.com/${STACKS_API_REPO} . \ && echo "GIT_TAG=$(git tag --points-at HEAD)" >> .env \ @@ -102,7 +103,7 @@ RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen RUN curl -sL https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" > /etc/apt/sources.list.d/pgsql.list \ - && curl -sL https://deb.nodesource.com/setup_16.x | bash - + && curl -sL https://deb.nodesource.com/setup_20.x | bash - RUN apt-get update \ && apt-get install -y \ postgresql-${PG_VERSION} \ diff --git a/docker/standalone-regtest.Dockerfile b/docker/standalone-regtest.Dockerfile index 33532c5d4e..17c7c6ae94 100644 --- a/docker/standalone-regtest.Dockerfile +++ b/docker/standalone-regtest.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -FROM node:18-bullseye as api-builder +FROM node:20-bookworm-slim as api-builder ARG API_GIT_COMMIT ARG STACKS_API_VERSION diff --git a/docs/api/smart-contracts/get-smart-contracts-status.example.json b/docs/api/smart-contracts/get-smart-contracts-status.example.json new file mode 100644 index 0000000000..15f66ee27c --- /dev/null +++ b/docs/api/smart-contracts/get-smart-contracts-status.example.json @@ -0,0 +1,31 @@ +{ + "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1": { + "found": true, + "result": { + "contract_id": "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1", + "status": "success", + "tx_id": "0x8542d28e427256ea3c29dcd8793222891999ceff4ef1bb062e2f21cb6def6884", + "block_height": 111021 + } + }, + "SP1JTCR202ECC6333N7ZXD7MK7E3ZTEEE1MJ73C60.name-registrar": { + "found": true, + "result": { + "contract_id": "SP1JTCR202ECC6333N7ZXD7MK7E3ZTEEE1MJ73C60.name-registrar", + "status": "success", + "tx_id": "0x6e1114cce8c6f2e9c8130f9acd75d67bb667ae584f882acdd2db6dd74e6cbe5e", + "block_height": 113010 + } + }, + "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core-v1": { + "found": true, + "result": { + "contract_id": "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core-v1", + "status": "pending", + "tx_id": "0x10bdcf10ffee72994f493ac36760f4e95a76c8471370182fd4705c2153dc173d" + } + }, + "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core": { + "found": false + } +} diff --git a/docs/api/smart-contracts/get-smart-contracts-status.schema.json b/docs/api/smart-contracts/get-smart-contracts-status.schema.json new file mode 100644 index 0000000000..2385c6afa0 --- /dev/null +++ b/docs/api/smart-contracts/get-smart-contracts-status.schema.json @@ -0,0 +1,15 @@ +{ + "description": "GET request that returns the deployment status of multiple smart contracts", + "title": "SmartContractsStatusResponse", + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "./smart-contract-found.schema.json" + }, + { + "$ref": "./smart-contract-not-found.schema.json" + } + ] + } +} diff --git a/docs/api/smart-contracts/smart-contract-found.schema.json b/docs/api/smart-contracts/smart-contract-found.schema.json new file mode 100644 index 0000000000..6d781711df --- /dev/null +++ b/docs/api/smart-contracts/smart-contract-found.schema.json @@ -0,0 +1,15 @@ +{ + "type": "object", + "title": "SmartContractFound", + "additionalProperties": false, + "required": ["found", "result"], + "properties": { + "found": { + "type": "boolean", + "enum": [true] + }, + "result": { + "$ref": "../../entities/smart-contracts/smart-contract-status.schema.json" + } + } +} diff --git a/docs/api/smart-contracts/smart-contract-not-found.schema.json b/docs/api/smart-contracts/smart-contract-not-found.schema.json new file mode 100644 index 0000000000..4a4d63514c --- /dev/null +++ b/docs/api/smart-contracts/smart-contract-not-found.schema.json @@ -0,0 +1,12 @@ +{ + "type": "object", + "title": "SmartContractNotFound", + "additionalProperties": false, + "properties": { + "found": { + "type": "boolean", + "enum": [false] + } + }, + "required": ["found"] +} diff --git a/docs/entities/smart-contracts/smart-contract-status.example.json b/docs/entities/smart-contracts/smart-contract-status.example.json new file mode 100644 index 0000000000..ac5cb17bbb --- /dev/null +++ b/docs/entities/smart-contracts/smart-contract-status.example.json @@ -0,0 +1,6 @@ +{ + "contract_id": "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1", + "status": "success", + "tx_id": "0x8542d28e427256ea3c29dcd8793222891999ceff4ef1bb062e2f21cb6def6884", + "block_height": 111021 +} diff --git a/docs/entities/smart-contracts/smart-contract-status.schema.json b/docs/entities/smart-contracts/smart-contract-status.schema.json new file mode 100644 index 0000000000..116cf8c65b --- /dev/null +++ b/docs/entities/smart-contracts/smart-contract-status.schema.json @@ -0,0 +1,29 @@ +{ + "title": "SmartContractStatus", + "description": "Deployment status of a smart contract", + "type": "object", + "additionalProperties": false, + "required": [ + "status", + "tx_id", + "contract_id" + ], + "properties": { + "status": { + "type": "string", + "description": "Smart contract deployment transaction status" + }, + "tx_id": { + "type": "string", + "description": "Deployment transaction ID" + }, + "contract_id": { + "type": "string", + "description": "Smart contract ID" + }, + "block_height": { + "type": "integer", + "description": "Height of the transaction confirmation block" + } + } +} diff --git a/docs/generated.d.ts b/docs/generated.d.ts index f9952d5cfa..095800439f 100644 --- a/docs/generated.d.ts +++ b/docs/generated.d.ts @@ -93,6 +93,9 @@ export type SchemaMergeRootStub = | SearchSuccessResult | TxSearchResult | SearchResult + | SmartContractsStatusResponse + | SmartContractFound + | SmartContractNotFound | PoolDelegationsResponse | { [k: string]: unknown | undefined; @@ -177,6 +180,7 @@ export type SchemaMergeRootStub = | RosettaSyncStatus | TransactionIdentifier | RosettaTransaction + | SmartContractStatus | PoolDelegation | NonFungibleTokenHistoryEventWithTxId | NonFungibleTokenHistoryEventWithTxMetadata @@ -3059,6 +3063,40 @@ export interface TxSearchResult { metadata?: Transaction; }; } +/** + * GET request that returns the deployment status of multiple smart contracts + */ +export interface SmartContractsStatusResponse { + [k: string]: (SmartContractFound | SmartContractNotFound) | undefined; +} +export interface SmartContractFound { + found: true; + result: SmartContractStatus; +} +/** + * Deployment status of a smart contract + */ +export interface SmartContractStatus { + /** + * Smart contract deployment transaction status + */ + status: string; + /** + * Deployment transaction ID + */ + tx_id: string; + /** + * Smart contract ID + */ + contract_id: string; + /** + * Height of the transaction confirmation block + */ + block_height?: number; +} +export interface SmartContractNotFound { + found: false; +} /** * GET request that returns stacking pool member details for a given pool (delegator) principal */ diff --git a/docs/openapi.yaml b/docs/openapi.yaml index ad618f9946..345d98c844 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -834,6 +834,36 @@ paths: example: $ref: ./api/transaction/get-transactions.example.json + /extended/v2/smart-contracts/status: + get: + summary: Get smart contracts status + description: | + Retrieves the deployment status of multiple smart contracts. + tags: + - Smart Contracts + operationId: get_smart_contracts_status + parameters: + - name: contract_id + in: query + description: contract ids to fetch status for + required: true + style: form + explode: true + schema: + type: array + example: "SPQZF23W7SEYBFG5JQ496NMY0G7379SRYEDREMSV.Candy" + items: + type: string + responses: + 200: + description: List of smart contract status + content: + application/json: + schema: + $ref: ./api/smart-contracts/get-smart-contracts-status.schema.json + example: + $ref: ./api/smart-contracts/get-smart-contracts-status.example.json + /extended/v1/block: get: summary: Get recent blocks @@ -3367,10 +3397,12 @@ paths: endpoint with an estimation of the final length (in bytes) of the transaction, including any post-conditions and signatures + If the node cannot provide an estimate for the transaction (e.g., if the node has never seen a contract-call for the given contract and function) or if estimation is not configured on this node, a 400 response is returned. + The 400 response will be a JSON error containing a `reason` field which can be one of the following: * `DatabaseError` - this Stacks node has had an internal @@ -3382,6 +3414,7 @@ paths: * `CostEstimationDisabled` - this Stacks node does not perform fee or cost estimation, and it cannot respond on this endpoint. + The 200 response contains the following data: * `estimated_cost` - the estimated multi-dimensional cost of executing the Clarity VM on the provided transaction. @@ -3410,6 +3443,7 @@ paths: If the estimated fees are less than the minimum relay fee `(1 ustx x estimated_len)`, then that minimum relay fee will be returned here instead. + Note: If the final transaction's byte size is larger than supplied to `estimated_len`, then applications should increase this fee amount by: diff --git a/migrations/1705013096459_update-re-org-indexes.js b/migrations/1705013096459_update-re-org-indexes.js new file mode 100644 index 0000000000..64d8ec649d --- /dev/null +++ b/migrations/1705013096459_update-re-org-indexes.js @@ -0,0 +1,91 @@ +/* eslint-disable camelcase */ + +exports.shorthands = undefined; + +exports.up = pgm => { + pgm.dropIndex('txs', 'index_block_hash'); + pgm.createIndex('txs', ['index_block_hash', 'canonical']); + + pgm.dropIndex('miner_rewards', 'index_block_hash'); + pgm.createIndex('miner_rewards', ['index_block_hash', 'canonical']); + + pgm.dropIndex('stx_lock_events', 'index_block_hash'); + pgm.createIndex('stx_lock_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('stx_events', 'index_block_hash'); + pgm.createIndex('stx_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('ft_events', 'index_block_hash'); + pgm.createIndex('ft_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('nft_events', 'index_block_hash'); + pgm.createIndex('nft_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox2_events', 'index_block_hash'); + pgm.createIndex('pox2_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox3_events', 'index_block_hash'); + pgm.createIndex('pox3_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox4_events', 'index_block_hash'); + pgm.createIndex('pox4_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('contract_logs', 'index_block_hash'); + pgm.createIndex('contract_logs', ['index_block_hash', 'canonical']); + + pgm.dropIndex('smart_contracts', 'index_block_hash'); + pgm.createIndex('smart_contracts', ['index_block_hash', 'canonical']); + + pgm.dropIndex('names', 'index_block_hash'); + pgm.createIndex('names', ['index_block_hash', 'canonical']); + + pgm.dropIndex('namespaces', 'index_block_hash'); + pgm.createIndex('namespaces', ['index_block_hash', 'canonical']); + + pgm.dropIndex('subdomains', 'index_block_hash'); + pgm.createIndex('subdomains', ['index_block_hash', 'canonical']); +}; + +exports.down = pgm => { + pgm.dropIndex('txs', ['index_block_hash', 'canonical']); + pgm.createIndex('txs', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('miner_rewards', ['index_block_hash', 'canonical']); + pgm.createIndex('miner_rewards', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('stx_lock_events', ['index_block_hash', 'canonical']); + pgm.createIndex('stx_lock_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('stx_events', ['index_block_hash', 'canonical']); + pgm.createIndex('stx_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('ft_events', ['index_block_hash', 'canonical']); + pgm.createIndex('ft_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('nft_events', ['index_block_hash', 'canonical']); + pgm.createIndex('nft_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox2_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox2_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox3_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox3_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox4_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox4_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('contract_logs', ['index_block_hash', 'canonical']); + pgm.createIndex('contract_logs', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('smart_contracts', ['index_block_hash', 'canonical']); + pgm.createIndex('smart_contracts', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('names', ['index_block_hash', 'canonical']); + pgm.createIndex('names', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('namespaces', ['index_block_hash', 'canonical']); + pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('subdomains', ['index_block_hash', 'canonical']); + pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' }); +}; diff --git a/migrations/1705077567281_remove-hash-indexes.js b/migrations/1705077567281_remove-hash-indexes.js new file mode 100644 index 0000000000..2b07d6954c --- /dev/null +++ b/migrations/1705077567281_remove-hash-indexes.js @@ -0,0 +1,108 @@ +/* eslint-disable camelcase */ + +exports.shorthands = undefined; + +function replaceIndex(pgm, table, column, method = 'btree') { + pgm.dropIndex(table, column); + pgm.createIndex(table, column, { method: method }); +} + +exports.up = pgm => { + pgm.dropIndex('txs', [{ name: 'tx_index', sort: 'DESC' }], { ifExists: true }); + pgm.dropIndex('txs', 'tx_id', { ifExists: true }); + replaceIndex(pgm, 'txs', 'token_transfer_recipient_address'); + replaceIndex(pgm, 'txs', 'sponsor_address'); + replaceIndex(pgm, 'txs', 'smart_contract_contract_id'); + replaceIndex(pgm, 'txs', 'sender_address'); + replaceIndex(pgm, 'txs', 'microblock_hash'); + replaceIndex(pgm, 'txs', 'contract_call_contract_id'); + + replaceIndex(pgm, 'stx_events', 'tx_id'); + replaceIndex(pgm, 'stx_events', 'sender'); + replaceIndex(pgm, 'stx_events', 'recipient'); + replaceIndex(pgm, 'stx_events', 'microblock_hash'); + + replaceIndex(pgm, 'miner_rewards', 'recipient'); + + pgm.dropIndex('stx_lock_events', 'block_height', { ifExists: true }); + replaceIndex(pgm, 'stx_lock_events', 'tx_id'); + replaceIndex(pgm, 'stx_lock_events', 'microblock_hash'); + replaceIndex(pgm, 'stx_lock_events', 'locked_address'); + + replaceIndex(pgm, 'ft_events', 'tx_id'); + replaceIndex(pgm, 'ft_events', 'sender'); + replaceIndex(pgm, 'ft_events', 'recipient'); + replaceIndex(pgm, 'ft_events', 'microblock_hash'); + + replaceIndex(pgm, 'nft_events', 'tx_id'); + replaceIndex(pgm, 'nft_events', 'sender'); + replaceIndex(pgm, 'nft_events', 'recipient'); + replaceIndex(pgm, 'nft_events', 'microblock_hash'); + replaceIndex(pgm, 'nft_events', 'asset_identifier'); + + replaceIndex(pgm, 'contract_logs', 'tx_id'); + replaceIndex(pgm, 'contract_logs', 'microblock_hash'); + + replaceIndex(pgm, 'smart_contracts', 'contract_id'); + replaceIndex(pgm, 'smart_contracts', 'microblock_hash'); + + pgm.dropIndex('principal_stx_txs', 'principal', { ifExists: true }); + replaceIndex(pgm, 'principal_stx_txs', 'tx_id'); + + pgm.dropIndex('mempool_txs', 'tx_id', { ifExists: true }); + replaceIndex(pgm, 'mempool_txs', 'token_transfer_recipient_address'); + replaceIndex(pgm, 'mempool_txs', 'sponsor_address'); + replaceIndex(pgm, 'mempool_txs', 'smart_contract_contract_id'); + replaceIndex(pgm, 'mempool_txs', 'sender_address'); + replaceIndex(pgm, 'mempool_txs', 'contract_call_contract_id'); +}; + +exports.down = pgm => { + pgm.createIndex('txs', [{ name: 'tx_index', sort: 'DESC' }]); + pgm.createIndex('txs', 'tx_id', { method: 'hash' }); + replaceIndex(pgm, 'txs', 'token_transfer_recipient_address', 'hash'); + replaceIndex(pgm, 'txs', 'sponsor_address', 'hash'); + replaceIndex(pgm, 'txs', 'smart_contract_contract_id', 'hash'); + replaceIndex(pgm, 'txs', 'sender_address', 'hash'); + replaceIndex(pgm, 'txs', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'txs', 'contract_call_contract_id', 'hash'); + + replaceIndex(pgm, 'stx_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'stx_events', 'sender', 'hash'); + replaceIndex(pgm, 'stx_events', 'recipient', 'hash'); + replaceIndex(pgm, 'stx_events', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'miner_rewards', 'recipient', 'hash'); + + pgm.createIndex('stx_lock_events', [{ name: 'block_height', sort: 'DESC' }]); + replaceIndex(pgm, 'stx_lock_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'stx_lock_events', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'stx_lock_events', 'locked_address', 'hash'); + + replaceIndex(pgm, 'ft_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'ft_events', 'sender', 'hash'); + replaceIndex(pgm, 'ft_events', 'recipient', 'hash'); + replaceIndex(pgm, 'ft_events', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'nft_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'nft_events', 'sender', 'hash'); + replaceIndex(pgm, 'nft_events', 'recipient', 'hash'); + replaceIndex(pgm, 'nft_events', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'nft_events', 'asset_identifier', 'hash'); + + replaceIndex(pgm, 'contract_logs', 'tx_id', 'hash'); + replaceIndex(pgm, 'contract_logs', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'smart_contracts', 'contract_id', 'hash'); + replaceIndex(pgm, 'smart_contracts', 'microblock_hash', 'hash'); + + pgm.createIndex('principal_stx_txs', 'principal', { method: 'hash' }); + replaceIndex(pgm, 'principal_stx_txs', 'tx_id', 'hash'); + + pgm.createIndex('mempool_txs', 'tx_id', { method: 'hash' }); + replaceIndex(pgm, 'mempool_txs', 'token_transfer_recipient_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'sponsor_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'smart_contract_contract_id', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'sender_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'contract_call_contract_id', 'hash'); +}; diff --git a/package-lock.json b/package-lock.json index 94e15a732c..922bb629b1 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16,10 +16,10 @@ "@promster/types": "3.2.3", "@scure/base": "1.1.1", "@sinclair/typebox": "0.31.28", - "@stacks/common": "6.8.1", - "@stacks/network": "6.8.1", - "@stacks/stacking": "6.9.0", - "@stacks/transactions": "6.9.0", + "@stacks/common": "6.10.0", + "@stacks/network": "6.11.3", + "@stacks/stacking": "6.11.3", + "@stacks/transactions": "6.11.3", "@types/express-list-endpoints": "4.0.1", "@types/lru-cache": "5.1.1", "@types/ws": "7.4.7", @@ -87,7 +87,7 @@ "@types/express": "4.17.13", "@types/is-ci": "3.0.0", "@types/jest": "29.5.6", - "@types/node": "18.13.0", + "@types/node": "20.11.4", "@types/node-fetch": "2.5.12", "@types/pg": "7.14.11", "@types/pg-copy-streams": "1.2.1", @@ -121,7 +121,7 @@ "why-is-node-running": "2.2.0" }, "engines": { - "node": ">=18" + "node": ">=20" }, "optionalDependencies": { "bufferutil": "4.0.5", @@ -2542,23 +2542,31 @@ "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==" }, "node_modules/@stacks/common": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.8.1.tgz", - "integrity": "sha512-ewL9GLZNQYa5a/3K4xSHlHIgHkD4rwWW/QEaPId8zQIaL+1O9qCaF4LX9orNQeOmEk8kvG0x2xGV54fXKCZeWQ==", + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.10.0.tgz", + "integrity": "sha512-6x5Z7AKd9/kj3+DYE9xIDIkFLHihBH614i2wqrZIjN02WxVo063hWSjIlUxlx8P4gl6olVzlOy5LzhLJD9OP0A==", "dependencies": { "@types/bn.js": "^5.1.0", "@types/node": "^18.0.4" } }, + "node_modules/@stacks/common/node_modules/@types/node": { + "version": "18.19.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.7.tgz", + "integrity": "sha512-IGRJfoNX10N/PfrReRZ1br/7SQ+2vF/tK3KXNwzXz82D32z5dMQEoOlFew18nLSN+vMNcLY4GrKfzwi/yWI8/w==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, "node_modules/@stacks/encryption": { - "version": "6.9.0", - "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.9.0.tgz", - "integrity": "sha512-hbpZ47eYgw9ZH5ly+GSgvw2Ffsu9L6d++2XIhvYSzL7yxYl4m1+FV5QYdJthJ2AS3vi8cI5otE254HTfCrhKzg==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.11.3.tgz", + "integrity": "sha512-nUA/21L8NnCw1vPetczWz3fjBCleqRgYfNGJX98AIDs9sjRQkxUfUGYz+3PlbpYgHWHIeRZafitQhMRpVhsbkQ==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", "@scure/bip39": "1.1.0", - "@stacks/common": "^6.8.1", + "@stacks/common": "^6.10.0", "@types/node": "^18.0.4", "base64-js": "^1.5.1", "bs58": "^5.0.0", @@ -2566,6 +2574,14 @@ "varuint-bitcoin": "^1.1.2" } }, + "node_modules/@stacks/encryption/node_modules/@types/node": { + "version": "18.19.14", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.14.tgz", + "integrity": "sha512-EnQ4Us2rmOS64nHDWr0XqAD8DsO6f3XR6lf9UIIrZQpUzPVdN/oPuEzfDWNHSyXLvoGgjuEm/sPwFGSSs35Wtg==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, "node_modules/@stacks/eslint-config": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@stacks/eslint-config/-/eslint-config-1.2.0.tgz", @@ -2876,11 +2892,11 @@ } }, "node_modules/@stacks/network": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.8.1.tgz", - "integrity": "sha512-n8M25pPbLqpSBctabtsLOTBlmPvm9EPQpTI//x7HLdt5lEjDXxauEQt0XGSvDUZwecrmztqt9xNxlciiGApRBw==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.11.3.tgz", + "integrity": "sha512-c4ClCU/QUwuu8NbHtDKPJNa0M5YxauLN3vYaR0+S4awbhVIKFQSxirm9Q9ckV1WBh7FtD6u2S0x+tDQGAODjNg==", "dependencies": { - "@stacks/common": "^6.8.1", + "@stacks/common": "^6.10.0", "cross-fetch": "^3.1.5" } }, @@ -2906,16 +2922,16 @@ } }, "node_modules/@stacks/stacking": { - "version": "6.9.0", - "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.9.0.tgz", - "integrity": "sha512-nxTGwaVBE/M06P8bTlXPXlzeV/bOLaxbMd3ftAnQUu8ubX2UB/iEHywnAkrg/Bj2Qy9ZjKyi6QKw+aG38rVZaw==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.11.3.tgz", + "integrity": "sha512-b9SQ2KO2JFlQ+tnfmrVnmKtFe1b883NvVflKDV88EFOXbFreKjVr1FrRKOOM4x5GxlHDkcsYjgHGHm9hiNxwrg==", "dependencies": { "@scure/base": "1.1.1", - "@stacks/common": "^6.8.1", - "@stacks/encryption": "^6.9.0", - "@stacks/network": "^6.8.1", + "@stacks/common": "^6.10.0", + "@stacks/encryption": "^6.11.3", + "@stacks/network": "^6.11.3", "@stacks/stacks-blockchain-api-types": "^0.61.0", - "@stacks/transactions": "^6.9.0", + "@stacks/transactions": "^6.11.3", "bs58": "^5.0.0" } }, @@ -2925,14 +2941,14 @@ "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" }, "node_modules/@stacks/transactions": { - "version": "6.9.0", - "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.9.0.tgz", - "integrity": "sha512-hSs9+0Ew++GwMZMgPObOx0iVCQRxkiCqI+DHdPEikAmg2utpyLh2/txHOjfSIkQHvcBfJJ6O5KphmxDP4gUqiA==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.11.3.tgz", + "integrity": "sha512-Zb7ONYt8OJPTTdXQHobWqZ2mwTALpGt43PEsy2FpDgQzOodGk1lWDo1Jhzs3hhw/2ib5FE3iDMc6jptKe9miCg==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", - "@stacks/common": "^6.8.1", - "@stacks/network": "^6.8.1", + "@stacks/common": "^6.10.0", + "@stacks/network": "^6.11.3", "c32check": "^2.0.0", "lodash.clonedeep": "^4.5.0" } @@ -3221,9 +3237,12 @@ "dev": true }, "node_modules/@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==" + "version": "20.11.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.4.tgz", + "integrity": "sha512-6I0fMH8Aoy2lOejL3s4LhyIYX34DPwY8bl5xlNjBvUEk8OHrcuzsFt+Ied4LvJihbtXPM+8zUqdydfIti86v9g==", + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/node-fetch": { "version": "2.5.12", @@ -13669,6 +13688,11 @@ "node": ">=14.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unique-filename": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-3.0.0.tgz", diff --git a/package.json b/package.json index 3a3e0cf6a9..19043c5f4c 100644 --- a/package.json +++ b/package.json @@ -70,7 +70,7 @@ "homepage": "https://github.com/hirosystems/stacks-blockchain-api#readme", "prettier": "@stacks/prettier-config", "engines": { - "node": ">=18" + "node": ">=20" }, "engineStrict": true, "commitlint": { @@ -91,10 +91,10 @@ "@promster/types": "3.2.3", "@scure/base": "1.1.1", "@sinclair/typebox": "0.31.28", - "@stacks/common": "6.8.1", - "@stacks/network": "6.8.1", - "@stacks/stacking": "6.9.0", - "@stacks/transactions": "6.9.0", + "@stacks/common": "6.10.0", + "@stacks/network": "6.11.3", + "@stacks/stacking": "6.11.3", + "@stacks/transactions": "6.11.3", "@types/express-list-endpoints": "4.0.1", "@types/lru-cache": "5.1.1", "@types/ws": "7.4.7", @@ -162,7 +162,7 @@ "@types/express": "4.17.13", "@types/is-ci": "3.0.0", "@types/jest": "29.5.6", - "@types/node": "18.13.0", + "@types/node": "20.11.4", "@types/node-fetch": "2.5.12", "@types/pg": "7.14.11", "@types/pg-copy-streams": "1.2.1", diff --git a/src/api/controllers/cache-controller.ts b/src/api/controllers/cache-controller.ts index 470aeceba7..d660e5e60a 100644 --- a/src/api/controllers/cache-controller.ts +++ b/src/api/controllers/cache-controller.ts @@ -252,7 +252,7 @@ async function calculateETag( switch (etagType) { case ETagType.chainTip: try { - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); if (chainTip.block_height === 0) { // This should never happen unless the API is serving requests before it has synced any // blocks. diff --git a/src/api/controllers/db-controller.ts b/src/api/controllers/db-controller.ts index fe9399b09d..b2132286f7 100644 --- a/src/api/controllers/db-controller.ts +++ b/src/api/controllers/db-controller.ts @@ -149,7 +149,9 @@ export function getTxTypeId(typeString: Transaction['tx_type']): DbTxTypeId[] { } } -function getTxStatusString(txStatus: DbTxStatus): TransactionStatus | MempoolTransactionStatus { +export function getTxStatusString( + txStatus: DbTxStatus +): TransactionStatus | MempoolTransactionStatus { switch (txStatus) { case DbTxStatus.Pending: return 'pending'; diff --git a/src/api/init.ts b/src/api/init.ts index 2376235500..6c7915a095 100644 --- a/src/api/init.ts +++ b/src/api/init.ts @@ -53,6 +53,7 @@ import { createV2BlocksRouter } from './routes/v2/blocks'; import { getReqQuery } from './query-helpers'; import { createV2BurnBlocksRouter } from './routes/v2/burn-blocks'; import { createMempoolRouter } from './routes/v2/mempool'; +import { createV2SmartContractsRouter } from './routes/v2/smart-contracts'; export interface ApiServer { expressApp: express.Express; @@ -109,17 +110,16 @@ export async function startApiServer(opts: { // Get the url pathname without a query string or fragment // (note base url doesn't matter, but required by URL constructor) try { - let pathTemplate = new URL(path, 'http://x').pathname; + const pathTemplate = new URL(path, 'http://x').pathname; // Match request url to the Express route, e.g.: // `/extended/v1/address/ST26DR4VGV507V1RZ1JNM7NN4K3DTGX810S62SBBR/stx` to // `/extended/v1/address/:stx_address/stx` for (const pathRegex of routes) { if (pathRegex.regexp.test(pathTemplate)) { - pathTemplate = pathRegex.path; - break; + return pathRegex.path; } } - return pathTemplate; + return ''; } catch (error) { logger.warn(`Warning: ${error}`); return path; @@ -234,6 +234,7 @@ export async function startApiServer(opts: { const v2 = express.Router(); v2.use('/blocks', createV2BlocksRouter(datastore)); v2.use('/burn-blocks', createV2BurnBlocksRouter(datastore)); + v2.use('/smart-contracts', createV2SmartContractsRouter(datastore)); v2.use('/mempool', createMempoolRouter(datastore)); return v2; })() diff --git a/src/api/routes/rosetta/construction.ts b/src/api/routes/rosetta/construction.ts index 011f2ee13d..6af0285a5c 100644 --- a/src/api/routes/rosetta/construction.ts +++ b/src/api/routes/rosetta/construction.ts @@ -37,6 +37,7 @@ import { MessageSignature, noneCV, OptionalCV, + principalCV, someCV, StacksTransaction, standardPrincipalCV, @@ -310,6 +311,8 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): const request: RosettaConstructionMetadataRequest = req.body; const options: RosettaOptions = request.options; + let dummyTransaction: StacksTransaction; + if (options?.sender_address && !isValidC32Address(options.sender_address)) { res.status(400).json(RosettaErrors[RosettaErrorsTypes.invalidSender]); return; @@ -337,6 +340,20 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): res.status(400).json(RosettaErrors[RosettaErrorsTypes.invalidRecipient]); return; } + + // dummy transaction to calculate fee + const dummyTokenTransferTx: UnsignedTokenTransferOptions = { + recipient: recipientAddress, + amount: 1n, // placeholder + publicKey: '000000000000000000000000000000000000000000000000000000000000000000', // placeholder + network: getStacksNetwork(), + nonce: 0, // placeholder + memo: '123456', // placeholder + anchorMode: AnchorMode.Any, + }; + // Do not set fee so that the fee is calculated + dummyTransaction = await makeUnsignedSTXTokenTransfer(dummyTokenTransferTx); + break; case RosettaOperationType.StackStx: { // Getting PoX info @@ -353,6 +370,27 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): options.contract_address = contractAddress; options.contract_name = contractName; options.burn_block_height = burnBlockHeight; + + // dummy transaction to calculate fee + const dummyStackingTx: UnsignedContractCallOptions = { + publicKey: '000000000000000000000000000000000000000000000000000000000000000000', + contractAddress: contractAddress, + contractName: contractName, + functionName: 'stack-stx', + functionArgs: [ + uintCV(0), + poxAddressToTuple('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4'), // placeholder + uintCV(0), + uintCV(1), + ], + validateWithAbi: false, + network: getStacksNetwork(), + nonce: 0, + anchorMode: AnchorMode.Any, + }; + // Do not set fee so that the fee is calculated + dummyTransaction = await makeUnsignedContractCall(dummyStackingTx); + break; } case RosettaOperationType.DelegateStx: { @@ -361,6 +399,27 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): const [contractAddress, contractName] = contract.split('.'); options.contract_address = contractAddress; options.contract_name = contractName; + + // dummy transaction to calculate fee + const dummyDelegateStxTx: UnsignedContractCallOptions = { + publicKey: '000000000000000000000000000000000000000000000000000000000000000000', + contractAddress: 'ST000000000000000000002AMW42H', + contractName: 'pox', + functionName: 'delegate-stx', + functionArgs: [ + uintCV(1), // placeholder + principalCV('SP3FGQ8Z7JY9BWYZ5WM53E0M9NK7WHJF0691NZ159.some-contract-name-v1-2-3-4'), // placeholder, + someCV(uintCV(1)), // placeholder + someCV(poxAddressToTuple('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4')), // placeholder + ], + validateWithAbi: false, + network: getStacksNetwork(), + nonce: 0, + anchorMode: AnchorMode.Any, + }; + // Do not set fee so that the fee is calculated + dummyTransaction = await makeUnsignedContractCall(dummyDelegateStxTx); + break; } default: @@ -392,8 +451,8 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): }; // Getting fee info if not operation fee was given in /preprocess - const feeInfo = await new StacksCoreRpcClient().getEstimatedTransferFee(); - if (feeInfo === undefined || feeInfo === '0') { + const feeValue = dummyTransaction.auth.spendingCondition.fee.toString(); + if (feeValue === undefined || feeValue === '0') { res.status(400).json(RosettaErrors[RosettaErrorsTypes.invalidFee]); return; } @@ -402,7 +461,7 @@ export function createRosettaConstructionRouter(db: PgStore, chainId: ChainID): res.status(400).json(RosettaErrorsTypes.missingTransactionSize); return; } - const feeValue = Math.round(Number(feeInfo) * Number(options.size) * 1.5).toString(); + const currency: RosettaCurrency = { symbol: RosettaConstants.symbol, decimals: RosettaConstants.decimals, diff --git a/src/api/routes/status.ts b/src/api/routes/status.ts index a56dd3e788..2e3e588146 100644 --- a/src/api/routes/status.ts +++ b/src/api/routes/status.ts @@ -19,7 +19,7 @@ export function createStatusRouter(db: PgStore): express.Router { response.pox_v2_unlock_height = poxForceUnlockHeights.result.pox2UnlockHeight as number; response.pox_v3_unlock_height = poxForceUnlockHeights.result.pox3UnlockHeight as number; } - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); if (chainTip.block_height > 0) { response.chain_tip = { block_height: chainTip.block_height, diff --git a/src/api/routes/v2/helpers.ts b/src/api/routes/v2/helpers.ts index 41d0ce2a2e..23ff0d18d3 100644 --- a/src/api/routes/v2/helpers.ts +++ b/src/api/routes/v2/helpers.ts @@ -1,6 +1,8 @@ -import { BurnBlock, NakamotoBlock } from 'docs/generated'; -import { DbBlock, DbBurnBlock } from '../../../datastore/common'; +import { BurnBlock, NakamotoBlock, SmartContractsStatusResponse } from 'docs/generated'; +import { DbBlock, DbBurnBlock, DbSmartContractStatus } from '../../../datastore/common'; import { unixEpochToIso } from '../../../helpers'; +import { SmartContractStatusParams } from './schemas'; +import { getTxStatusString } from '../../../api/controllers/db-controller'; export function parseDbNakamotoBlock(block: DbBlock): NakamotoBlock { const apiBlock: NakamotoBlock = { @@ -35,3 +37,27 @@ export function parseDbBurnBlock(block: DbBurnBlock): BurnBlock { }; return burnBlock; } + +export function parseDbSmartContractStatusArray( + params: SmartContractStatusParams, + status: DbSmartContractStatus[] +): SmartContractsStatusResponse { + const ids = new Set( + Array.isArray(params.contract_id) ? params.contract_id : [params.contract_id] + ); + const response: SmartContractsStatusResponse = {}; + for (const s of status) { + ids.delete(s.smart_contract_contract_id); + response[s.smart_contract_contract_id] = { + found: true, + result: { + contract_id: s.smart_contract_contract_id, + block_height: s.block_height, + status: getTxStatusString(s.status), + tx_id: s.tx_id, + }, + }; + } + for (const missingId of ids) response[missingId] = { found: false }; + return response; +} diff --git a/src/api/routes/v2/schemas.ts b/src/api/routes/v2/schemas.ts index 5be6a3b7e5..689c9bf7e7 100644 --- a/src/api/routes/v2/schemas.ts +++ b/src/api/routes/v2/schemas.ts @@ -125,3 +125,15 @@ const BlockParamsSchema = Type.Object( ); export type BlockParams = Static; export const CompiledBlockParams = ajv.compile(BlockParamsSchema); + +const SmartContractPrincipal = Type.RegExp( + /^[0123456789ABCDEFGHJKMNPQRSTVWXYZ]{28,41}\.[a-zA-Z]([a-zA-Z0-9]|[-_]){0,39}$/ +); +const SmartContractStatusParamsSchema = Type.Object( + { + contract_id: Type.Union([Type.Array(SmartContractPrincipal), SmartContractPrincipal]), + }, + { additionalProperties: false } +); +export type SmartContractStatusParams = Static; +export const CompiledSmartContractStatusParams = ajv.compile(SmartContractStatusParamsSchema); diff --git a/src/api/routes/v2/smart-contracts.ts b/src/api/routes/v2/smart-contracts.ts new file mode 100644 index 0000000000..81a57e5285 --- /dev/null +++ b/src/api/routes/v2/smart-contracts.ts @@ -0,0 +1,30 @@ +import * as express from 'express'; +import { PgStore } from '../../../datastore/pg-store'; +import { getETagCacheHandler, setETagCacheHeaders } from '../../controllers/cache-controller'; +import { asyncHandler } from '../../async-handler'; +import { + validRequestQuery, + CompiledSmartContractStatusParams, + SmartContractStatusParams, +} from './schemas'; +import { parseDbSmartContractStatusArray } from './helpers'; + +export function createV2SmartContractsRouter(db: PgStore): express.Router { + const router = express.Router(); + const cacheHandler = getETagCacheHandler(db); + + router.get( + '/status', + cacheHandler, + asyncHandler(async (req, res) => { + if (!validRequestQuery(req, res, CompiledSmartContractStatusParams)) return; + const query = req.query as SmartContractStatusParams; + + const result = await db.v2.getSmartContractStatus(query); + setETagCacheHeaders(res); + res.json(parseDbSmartContractStatusArray(query, result)); + }) + ); + + return router; +} diff --git a/src/datastore/common.ts b/src/datastore/common.ts index 452e6005fa..7e6bb5abad 100644 --- a/src/datastore/common.ts +++ b/src/datastore/common.ts @@ -1555,3 +1555,10 @@ export enum IndexesState { Off = 0, On = 1, } + +export interface DbSmartContractStatus { + smart_contract_contract_id: string; + tx_id: string; + status: DbTxStatus; + block_height?: number; +} diff --git a/src/datastore/helpers.ts b/src/datastore/helpers.ts index a5aabbaa1e..fba14fdaa8 100644 --- a/src/datastore/helpers.ts +++ b/src/datastore/helpers.ts @@ -1,4 +1,4 @@ -import { parseEnum, unwrapOptionalProp } from '../helpers'; +import { getUintEnvOrDefault, parseEnum, unwrapOptionalProp } from '../helpers'; import { BlockQueryResult, ContractTxQueryResult, @@ -66,6 +66,7 @@ import { PgStoreEventEmitter } from './pg-store-event-emitter'; import { SyntheticPoxEventName } from '../pox-helpers'; import { logger } from '../logger'; import { PgSqlClient } from '@hirosystems/api-toolkit'; +import PQueue from 'p-queue'; export const TX_COLUMNS = [ 'tx_id', @@ -1335,3 +1336,21 @@ export function newReOrgUpdatedEntities(): ReOrgUpdatedEntities { restoredMempoolTxs: 0, }; } + +/** + * Priority queue for parallel Postgres write query execution. This helps performance because it + * parallelizes the work postgres.js has to do when serializing JS types to PG types. + */ +export class PgWriteQueue { + readonly queue: PQueue; + constructor() { + const concurrency = Math.max(1, getUintEnvOrDefault('STACKS_BLOCK_DATA_INSERT_CONCURRENCY', 4)); + this.queue = new PQueue({ concurrency, autoStart: true }); + } + enqueue(task: Parameters[0]): void { + void this.queue.add(task); + } + done(): Promise { + return this.queue.onIdle(); + } +} diff --git a/src/datastore/pg-store-v2.ts b/src/datastore/pg-store-v2.ts index 53b63f4aca..1e7554594b 100644 --- a/src/datastore/pg-store-v2.ts +++ b/src/datastore/pg-store-v2.ts @@ -6,6 +6,7 @@ import { TransactionLimitParamSchema, BlockParams, BlockPaginationQueryParams, + SmartContractStatusParams, } from '../api/routes/v2/schemas'; import { InvalidRequestError, InvalidRequestErrorType } from '../errors'; import { normalizeHashString } from '../helpers'; @@ -16,6 +17,9 @@ import { DbTx, TxQueryResult, DbBurnBlock, + DbTxTypeId, + DbSmartContractStatus, + DbTxStatus, } from './common'; import { BLOCK_COLUMNS, parseBlockQueryResult, TX_COLUMNS, parseTxQueryResult } from './helpers'; @@ -230,4 +234,39 @@ export class PgStoreV2 extends BasePgStoreModule { if (blockQuery.count > 0) return blockQuery[0]; }); } + + async getSmartContractStatus(args: SmartContractStatusParams): Promise { + return await this.sqlTransaction(async sql => { + const statusArray: DbSmartContractStatus[] = []; + const contractArray = Array.isArray(args.contract_id) ? args.contract_id : [args.contract_id]; + + // Search confirmed txs. + const confirmed = await sql` + SELECT DISTINCT ON (smart_contract_contract_id) smart_contract_contract_id, tx_id, block_height, status + FROM txs + WHERE type_id IN ${sql([DbTxTypeId.SmartContract, DbTxTypeId.VersionedSmartContract])} + AND smart_contract_contract_id IN ${sql(contractArray)} + AND canonical = TRUE + AND microblock_canonical = TRUE + ORDER BY smart_contract_contract_id, block_height DESC, microblock_sequence DESC, tx_index DESC, status + `; + statusArray.push(...confirmed); + if (confirmed.count < contractArray.length) { + // Search mempool txs. + const confirmedIds = confirmed.map(c => c.smart_contract_contract_id); + const remainingIds = contractArray.filter(c => !confirmedIds.includes(c)); + const mempool = await sql` + SELECT DISTINCT ON (smart_contract_contract_id) smart_contract_contract_id, tx_id, status + FROM mempool_txs + WHERE pruned = FALSE + AND type_id IN ${sql([DbTxTypeId.SmartContract, DbTxTypeId.VersionedSmartContract])} + AND smart_contract_contract_id IN ${sql(remainingIds)} + ORDER BY smart_contract_contract_id, nonce + `; + statusArray.push(...mempool); + } + + return statusArray; + }); + } } diff --git a/src/datastore/pg-store.ts b/src/datastore/pg-store.ts index 887025bbad..6e2648840c 100644 --- a/src/datastore/pg-store.ts +++ b/src/datastore/pg-store.ts @@ -204,8 +204,8 @@ export class PgStore extends BasePgStore { }); } - async getChainTip(): Promise { - const tipResult = await this.sql`SELECT * FROM chain_tip`; + async getChainTip(sql: PgSqlClient): Promise { + const tipResult = await sql`SELECT * FROM chain_tip`; const tip = tipResult[0]; return { block_height: tip?.block_height ?? 0, @@ -607,7 +607,7 @@ export class PgStore extends BasePgStore { async getUnanchoredTxsInternal(sql: PgSqlClient): Promise<{ txs: DbTx[] }> { // Get transactions that have been streamed in microblocks but not yet accepted or rejected in an anchor block. - const { block_height } = await this.getChainTip(); + const { block_height } = await this.getChainTip(sql); const unanchoredBlockHeight = block_height + 1; const query = await sql` SELECT ${unsafeCols(sql, [...TX_COLUMNS, abiColumn()])} @@ -1402,7 +1402,7 @@ export class PgStore extends BasePgStore { sql: PgSqlClient, { includeUnanchored }: { includeUnanchored: boolean } ): Promise { - const chainTip = await this.getChainTip(); + const chainTip = await this.getChainTip(sql); if (includeUnanchored) { return chainTip.block_height + 1; } else { @@ -2142,7 +2142,7 @@ export class PgStore extends BasePgStore { async getStxBalanceAtBlock(stxAddress: string, blockHeight: number): Promise { return await this.sqlTransaction(async sql => { - const chainTip = await this.getChainTip(); + const chainTip = await this.getChainTip(sql); const blockHeightToQuery = blockHeight > chainTip.block_height ? chainTip.block_height : blockHeight; const blockQuery = await this.getBlockByHeightInternal(sql, blockHeightToQuery); diff --git a/src/datastore/pg-write-store.ts b/src/datastore/pg-write-store.ts index 6694aea04b..ed4c463381 100644 --- a/src/datastore/pg-write-store.ts +++ b/src/datastore/pg-write-store.ts @@ -1,3 +1,4 @@ +import * as assert from 'assert'; import { getOrAdd, I32_MAX, getIbdBlockHeight, getUintEnvOrDefault } from '../helpers'; import { DbBlock, @@ -73,6 +74,7 @@ import { TX_METADATA_TABLES, validateZonefileHash, newReOrgUpdatedEntities, + PgWriteQueue, } from './helpers'; import { PgNotifier } from './pg-notifier'; import { MIGRATIONS_DIR, PgStore } from './pg-store'; @@ -181,18 +183,14 @@ export class PgWriteStore extends PgStore { let batchedTxData: DataStoreTxEventData[] = []; await this.sqlWriteTransaction(async sql => { - const chainTip = await this.getChainTip(); + const chainTip = await this.getChainTip(sql); await this.handleReorg(sql, data.block, chainTip.block_height); const isCanonical = data.block.block_height > chainTip.block_height; if (!isCanonical) { markBlockUpdateDataAsNonCanonical(data); } else { const txIds = data.txs.map(d => d.tx.tx_id); - const pruneRes = await this.pruneMempoolTxs(sql, txIds); - if (pruneRes.removedTxs.length > 0) - logger.debug( - `Removed ${pruneRes.removedTxs.length} txs from mempool table during new block ingestion` - ); + await this.pruneMempoolTxs(sql, txIds); } setTotalBlockUpdateDataExecutionCost(data); @@ -203,8 +201,8 @@ export class PgWriteStore extends PgStore { return !insertedMicroblockHashes.has(entry.tx.microblock_hash); }); - // When processing an immediately-non-canonical block, do not orphan and possible existing microblocks - // which may be still considered canonical by the canonical block at this height. + // When processing an immediately-non-canonical block, do not orphan and possible existing + // microblocks which may be still considered canonical by the canonical block at this height. if (isCanonical) { const { acceptedMicroblockTxs, orphanedMicroblockTxs } = await this.updateMicroCanonical( sql, @@ -220,7 +218,8 @@ export class PgWriteStore extends PgStore { } ); - // Identify any micro-orphaned txs that also didn't make it into this anchor block, and restore them into the mempool + // Identify any micro-orphaned txs that also didn't make it into this anchor block, and + // restore them into the mempool const orphanedAndMissingTxs = orphanedMicroblockTxs.filter( tx => !data.txs.find(r => tx.tx_id === r.tx.tx_id) ); @@ -232,7 +231,8 @@ export class PgWriteStore extends PgStore { logger.info(`Restored micro-orphaned tx to mempool ${txId}`); }); - // Clear accepted microblock txs from the anchor-block update data to avoid duplicate inserts. + // Clear accepted microblock txs from the anchor-block update data to avoid duplicate + // inserts. batchedTxData = batchedTxData.filter(entry => { const matchingTx = acceptedMicroblockTxs.find(tx => tx.tx_id === entry.tx.tx_id); return !matchingTx; @@ -251,27 +251,35 @@ export class PgWriteStore extends PgStore { } } if ((await this.updateBlock(sql, data.block)) !== 0) { - await this.updateMinerRewards(sql, data.minerRewards); - for (const entry of batchedTxData) { - await this.updateTx(sql, entry.tx); - await this.updateStxEvents(sql, entry.tx, entry.stxEvents); - await this.updatePrincipalStxTxs(sql, entry.tx, entry.stxEvents); - await this.updateSmartContractEvents(sql, entry.tx, entry.contractLogEvents); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox2_events', entry.pox2Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox3_events', entry.pox3Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox4_events', entry.pox4Events); - await this.updateStxLockEvents(sql, entry.tx, entry.stxLockEvents); - await this.updateFtEvents(sql, entry.tx, entry.ftEvents); - await this.updateNftEvents(sql, entry.tx, entry.nftEvents); - await this.updateSmartContracts(sql, entry.tx, entry.smartContracts); - await this.updateNamespaces(sql, entry.tx, entry.namespaces); - await this.updateNames(sql, entry.tx, entry.names); - } - const mempoolGarbageResults = await this.deleteGarbageCollectedMempoolTxs(sql); - if (mempoolGarbageResults.deletedTxs.length > 0) { - logger.debug(`Garbage collected ${mempoolGarbageResults.deletedTxs.length} mempool txs`); + const q = new PgWriteQueue(); + q.enqueue(() => this.updateMinerRewards(sql, data.minerRewards)); + if (batchedTxData.length > 0) { + q.enqueue(() => + this.updateTx( + sql, + batchedTxData.map(b => b.tx) + ) + ); + q.enqueue(() => this.updateStxEvents(sql, batchedTxData)); + q.enqueue(() => this.updatePrincipalStxTxs(sql, batchedTxData)); + q.enqueue(() => this.updateSmartContractEvents(sql, batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox2_events', batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox3_events', batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox4_events', batchedTxData)); + q.enqueue(() => this.updateStxLockEvents(sql, batchedTxData)); + q.enqueue(() => this.updateFtEvents(sql, batchedTxData)); + for (const entry of batchedTxData) { + q.enqueue(() => this.updateNftEvents(sql, entry.tx, entry.nftEvents)); + q.enqueue(() => this.updateSmartContracts(sql, entry.tx, entry.smartContracts)); + q.enqueue(() => this.updateNamespaces(sql, entry.tx, entry.namespaces)); + q.enqueue(() => this.updateNames(sql, entry.tx, entry.names)); + } } - garbageCollectedMempoolTxs = mempoolGarbageResults.deletedTxs; + q.enqueue(async () => { + const mempoolGarbageResults = await this.deleteGarbageCollectedMempoolTxs(sql); + garbageCollectedMempoolTxs = mempoolGarbageResults.deletedTxs; + }); + await q.done(); } if (!this.isEventReplay) { @@ -547,7 +555,7 @@ export class PgWriteStore extends PgStore { // Sanity check: ensure incoming microblocks have a `parent_index_block_hash` that matches the // API's current known canonical chain tip. We assume this holds true so incoming microblock // data is always treated as being built off the current canonical anchor block. - const chainTip = await this.getChainTip(); + const chainTip = await this.getChainTip(sql); const nonCanonicalMicroblock = data.microblocks.find( mb => mb.parent_index_block_hash !== chainTip.index_block_hash ); @@ -742,14 +750,37 @@ export class PgWriteStore extends PgStore { logger.info('Updated block zero boot data', tablesUpdates); } - async updatePoxSyntheticEvents( - sql: PgSqlClient, - tx: DbTx, - poxTable: PoxSyntheticEventTable, - events: DbPoxSyntheticEvent[] - ) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values = batch.map(event => { + async updatePoxSyntheticEvents< + T extends PoxSyntheticEventTable, + Entry extends { tx: DbTx } & ('pox2_events' extends T + ? { pox2Events: DbPoxSyntheticEvent[] } + : 'pox3_events' extends T + ? { pox3Events: DbPoxSyntheticEvent[] } + : 'pox4_events' extends T + ? { pox4Events: DbPoxSyntheticEvent[] } + : never) + >(sql: PgSqlClient, poxTable: T, entries: Entry[]) { + const values: PoxSyntheticEventInsertValues[] = []; + for (const entry of entries) { + let events: DbPoxSyntheticEvent[] | null = null; + switch (poxTable) { + case 'pox2_events': + assert('pox2Events' in entry); + events = entry.pox2Events; + break; + case 'pox3_events': + assert('pox3Events' in entry); + events = entry.pox3Events; + break; + case 'pox4_events': + assert('pox4Events' in entry); + events = entry.pox4Events; + break; + default: + throw new Error(`unknown pox table: ${poxTable}`); + } + const tx = entry.tx; + for (const event of events ?? []) { const value: PoxSyntheticEventInsertValues = { event_index: event.event_index, tx_id: event.tx_id, @@ -782,6 +813,7 @@ export class PgWriteStore extends PgStore { reward_cycle: null, amount_ustx: null, }; + // Set event-specific columns switch (event.name) { case SyntheticPoxEventName.HandleUnlock: { @@ -858,63 +890,78 @@ export class PgWriteStore extends PgStore { ); } } - return value; - }); - await sql` - INSERT INTO ${sql(poxTable)} ${sql(values)} + values.push(value); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO ${sql(String(poxTable))} ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } - async updateStxLockEvents(sql: PgSqlClient, tx: DbTx, events: DbStxLockEvent[]) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: StxLockEventInsertValues[] = batch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - locked_amount: event.locked_amount.toString(), - unlock_height: event.unlock_height, - locked_address: event.locked_address, - contract_name: event.contract_name, - })); - await sql` - INSERT INTO stx_lock_events ${sql(values)} + async updateStxLockEvents( + sql: PgSqlClient, + entries: { tx: DbTx; stxLockEvents: DbStxLockEvent[] }[] + ) { + const values: StxLockEventInsertValues[] = []; + for (const { tx, stxLockEvents } of entries) { + for (const event of stxLockEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + locked_amount: event.locked_amount.toString(), + unlock_height: event.unlock_height, + locked_address: event.locked_address, + contract_name: event.contract_name, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO stx_lock_events ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } - async updateStxEvents(sql: PgSqlClient, tx: DbTx, events: DbStxEvent[]) { - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: StxEventInsertValues[] = eventBatch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - amount: event.amount, - memo: event.memo ?? null, - })); + async updateStxEvents(sql: PgSqlClient, entries: { tx: DbTx; stxEvents: DbStxEvent[] }[]) { + const values: StxEventInsertValues[] = []; + for (const { tx, stxEvents } of entries) { + for (const event of stxEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + asset_event_type_id: event.asset_event_type_id, + sender: event.sender ?? null, + recipient: event.recipient ?? null, + amount: event.amount, + memo: event.memo ?? null, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { const res = await sql` - INSERT INTO stx_events ${sql(values)} + INSERT INTO stx_events ${sql(batch)} `; - if (res.count !== eventBatch.length) { - throw new Error(`Expected ${eventBatch.length} inserts, got ${res.count}`); - } + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -922,45 +969,43 @@ export class PgWriteStore extends PgStore { * Update the `principal_stx_tx` table with the latest `tx_id`s that resulted in a STX * transfer relevant to a principal (stx address or contract id). * @param sql - DB client - * @param tx - Transaction - * @param events - Transaction STX events + * @param entries - list of tx and stxEvents */ - async updatePrincipalStxTxs(sql: PgSqlClient, tx: DbTx, events: DbStxEvent[]) { - const insertPrincipalStxTxs = async (principals: string[]) => { - principals = [...new Set(principals)]; // Remove duplicates - const values: PrincipalStxTxsInsertValues[] = principals.map(principal => ({ - principal: principal, - tx_id: tx.tx_id, - block_height: tx.block_height, - index_block_hash: tx.index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - tx_index: tx.tx_index, - canonical: tx.canonical, - microblock_canonical: tx.microblock_canonical, - })); + async updatePrincipalStxTxs(sql: PgSqlClient, entries: { tx: DbTx; stxEvents: DbStxEvent[] }[]) { + const values: PrincipalStxTxsInsertValues[] = []; + for (const { tx, stxEvents } of entries) { + const principals = new Set( + [ + tx.sender_address, + tx.token_transfer_recipient_address, + tx.contract_call_contract_id, + tx.smart_contract_contract_id, + ].filter((p): p is string => !!p) + ); + for (const event of stxEvents) { + if (event.sender) principals.add(event.sender); + if (event.recipient) principals.add(event.recipient); + } + for (const principal of principals) { + values.push({ + principal: principal, + tx_id: tx.tx_id, + block_height: tx.block_height, + index_block_hash: tx.index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + tx_index: tx.tx_index, + canonical: tx.canonical, + microblock_canonical: tx.microblock_canonical, + }); + } + } + + for (const eventBatch of batchIterate(values, INSERT_BATCH_SIZE)) { await sql` - INSERT INTO principal_stx_txs ${sql(values)} + INSERT INTO principal_stx_txs ${sql(eventBatch)} ON CONFLICT ON CONSTRAINT unique_principal_tx_id_index_block_hash_microblock_hash DO NOTHING `; - }; - // Insert tx data - await insertPrincipalStxTxs( - [ - tx.sender_address, - tx.token_transfer_recipient_address, - tx.contract_call_contract_id, - tx.smart_contract_contract_id, - ].filter((p): p is string => !!p) // Remove undefined - ); - // Insert stx_event data - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const principals: string[] = []; - for (const event of eventBatch) { - if (event.sender) principals.push(event.sender); - if (event.recipient) principals.push(event.recipient); - } - await insertPrincipalStxTxs(principals); } } @@ -999,9 +1044,10 @@ export class PgWriteStore extends PgStore { ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO UPDATE SET zonefile = EXCLUDED.zonefile `; - if (result.count !== zonefileValues.length) { - throw new Error(`Expected ${result.count} zonefile inserts, got ${zonefileValues.length}`); - } + assert( + result.count === zonefileValues.length, + `Expecting ${result.count} zonefile inserts, got ${zonefileValues.length}` + ); } async updateBatchSubdomains( @@ -1057,9 +1103,10 @@ export class PgWriteStore extends PgStore { microblock_sequence = EXCLUDED.microblock_sequence, microblock_canonical = EXCLUDED.microblock_canonical `; - if (result.count !== subdomainValues.length) { - throw new Error(`Expected ${subdomainValues.length} subdomain inserts, got ${result.count}`); - } + assert( + result.count === subdomainValues.length, + `Expecting ${subdomainValues.length} subdomain inserts, got ${result.count}` + ); } async resolveBnsSubdomains( @@ -1079,51 +1126,34 @@ export class PgWriteStore extends PgStore { }); } - async updateStxEvent(sql: PgSqlClient, tx: DbTx, event: DbStxEvent) { - const values: StxEventInsertValues = { - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - amount: event.amount, - memo: event.memo ?? null, - }; - await sql` - INSERT INTO stx_events ${sql(values)} - `; - } - - async updateFtEvents(sql: PgSqlClient, tx: DbTx, events: DbFtEvent[]) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: FtEventInsertValues[] = batch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - asset_identifier: event.asset_identifier, - amount: event.amount.toString(), - })); - await sql` - INSERT INTO ft_events ${sql(values)} + async updateFtEvents(sql: PgSqlClient, entries: { tx: DbTx; ftEvents: DbFtEvent[] }[]) { + const values: FtEventInsertValues[] = []; + for (const { tx, ftEvents } of entries) { + for (const event of ftEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + asset_event_type_id: event.asset_event_type_id, + sender: event.sender ?? null, + recipient: event.recipient ?? null, + asset_identifier: event.asset_identifier, + amount: event.amount.toString(), + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO ft_events ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -1222,29 +1252,35 @@ export class PgWriteStore extends PgStore { } } - async updateSmartContractEvents(sql: PgSqlClient, tx: DbTx, events: DbSmartContractEvent[]) { - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: SmartContractEventInsertValues[] = eventBatch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - contract_identifier: event.contract_identifier, - topic: event.topic, - value: event.value, - })); + async updateSmartContractEvents( + sql: PgSqlClient, + entries: { tx: DbTx; contractLogEvents: DbSmartContractEvent[] }[] + ) { + const values: SmartContractEventInsertValues[] = []; + for (const { tx, contractLogEvents } of entries) { + for (const event of contractLogEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + contract_identifier: event.contract_identifier, + topic: event.topic, + value: event.value, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { const res = await sql` - INSERT INTO contract_logs ${sql(values)} + INSERT INTO contract_logs ${sql(batch)} `; - if (res.count !== eventBatch.length) { - throw new Error(`Expected ${eventBatch.length} inserts, got ${res.count}`); - } + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -1371,8 +1407,10 @@ export class PgWriteStore extends PgStore { acceptedMicroblocks: string[]; orphanedMicroblocks: string[]; }> { - // Find the parent microblock if this anchor block points to one. If not, perform a sanity check for expected block headers in this case: - // > Anchored blocks that do not have parent microblock streams will have their parent microblock header hashes set to all 0's, and the parent microblock sequence number set to 0. + // Find the parent microblock if this anchor block points to one. If not, perform a sanity check + // for expected block headers in this case: Anchored blocks that do not have parent microblock + // streams will have their parent microblock header hashes set to all 0's, and the parent + // microblock sequence number set to 0. let acceptedMicroblockTip: DbMicroblock | undefined; if (BigInt(blockData.parentMicroblockHash) === 0n) { if (blockData.parentMicroblockSequence !== 0) { @@ -1530,8 +1568,9 @@ export class PgWriteStore extends PgStore { } } - async updateTx(sql: PgSqlClient, tx: DbTxRaw): Promise { - const values: TxInsertValues = { + async updateTx(sql: PgSqlClient, txs: DbTxRaw | DbTxRaw[]): Promise { + if (!Array.isArray(txs)) txs = [txs]; + const values: TxInsertValues[] = txs.map(tx => ({ tx_id: tx.tx_id, raw_tx: tx.raw_tx, tx_index: tx.tx_index, @@ -1587,12 +1626,17 @@ export class PgWriteStore extends PgStore { execution_cost_runtime: tx.execution_cost_runtime, execution_cost_write_count: tx.execution_cost_write_count, execution_cost_write_length: tx.execution_cost_write_length, - }; - const result = await sql` - INSERT INTO txs ${sql(values)} - ON CONFLICT ON CONSTRAINT unique_tx_id_index_block_hash_microblock_hash DO NOTHING - `; - return result.count; + })); + + let count = 0; + for (const eventBatch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO txs ${sql(eventBatch)} + ON CONFLICT ON CONSTRAINT unique_tx_id_index_block_hash_microblock_hash DO NOTHING + `; + count += res.count; + } + return count; } async insertDbMempoolTxs( @@ -1753,7 +1797,7 @@ export class PgWriteStore extends PgStore { async updateMempoolTxs({ mempoolTxs: txs }: { mempoolTxs: DbMempoolTxRaw[] }): Promise { const updatedTxIds: string[] = []; await this.sqlWriteTransaction(async sql => { - const chainTip = await this.getChainTip(); + const chainTip = await this.getChainTip(sql); updatedTxIds.push(...(await this.insertDbMempoolTxs(txs, chainTip, sql))); }); if (!this.isEventReplay) { @@ -1978,9 +2022,10 @@ export class PgWriteStore extends PgStore { const res = await sql` INSERT INTO token_offering_locked ${sql(lockedInfos, 'address', 'value', 'block')} `; - if (res.count !== lockedInfos.length) { - throw new Error(`Expected ${lockedInfos.length} inserts, got ${res.count}`); - } + assert( + res.count === lockedInfos.length, + `Expecting ${lockedInfos.length} inserts, got ${res.count}` + ); } catch (e: any) { logger.error(e, `Locked Info errors ${e.message}`); throw e; @@ -2107,26 +2152,33 @@ export class PgWriteStore extends PgStore { } } - for (const entry of txs) { - const rowsUpdated = await this.updateTx(sql, entry.tx); - if (rowsUpdated !== 1) { - throw new Error( - `Unexpected amount of rows updated for microblock tx insert: ${rowsUpdated}` + if (txs.length > 0) { + const q = new PgWriteQueue(); + q.enqueue(async () => { + const rowsUpdated = await this.updateTx( + sql, + txs.map(t => t.tx) ); + if (rowsUpdated !== txs.length) + throw new Error( + `Unexpected amount of rows updated for microblock tx insert: ${rowsUpdated}, expecting ${txs.length}` + ); + }); + q.enqueue(() => this.updateStxEvents(sql, txs)); + q.enqueue(() => this.updatePrincipalStxTxs(sql, txs)); + q.enqueue(() => this.updateSmartContractEvents(sql, txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox2_events', txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox3_events', txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox4_events', txs)); + q.enqueue(() => this.updateStxLockEvents(sql, txs)); + q.enqueue(() => this.updateFtEvents(sql, txs)); + for (const entry of txs) { + q.enqueue(() => this.updateNftEvents(sql, entry.tx, entry.nftEvents, true)); + q.enqueue(() => this.updateSmartContracts(sql, entry.tx, entry.smartContracts)); + q.enqueue(() => this.updateNamespaces(sql, entry.tx, entry.namespaces)); + q.enqueue(() => this.updateNames(sql, entry.tx, entry.names)); } - - await this.updateStxEvents(sql, entry.tx, entry.stxEvents); - await this.updatePrincipalStxTxs(sql, entry.tx, entry.stxEvents); - await this.updateSmartContractEvents(sql, entry.tx, entry.contractLogEvents); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox2_events', entry.pox2Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox3_events', entry.pox3Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox4_events', entry.pox4Events); - await this.updateStxLockEvents(sql, entry.tx, entry.stxLockEvents); - await this.updateFtEvents(sql, entry.tx, entry.ftEvents); - await this.updateNftEvents(sql, entry.tx, entry.nftEvents, true); - await this.updateSmartContracts(sql, entry.tx, entry.smartContracts); - await this.updateNamespaces(sql, entry.tx, entry.namespaces); - await this.updateNames(sql, entry.tx, entry.names); + await q.done(); } } @@ -2454,6 +2506,8 @@ export class PgWriteStore extends PgStore { ) SELECT tx_id FROM pruned `; + const txIds = deletedTxResults.map(r => r.tx_id); + if (txIds.length > 0) logger.debug(`Garbage collected ${txIds.length} mempool txs`); return { deletedTxs: deletedTxResults.map(r => r.tx_id) }; } @@ -2463,181 +2517,199 @@ export class PgWriteStore extends PgStore { canonical: boolean, updatedEntities: ReOrgUpdatedEntities ): Promise<{ txsMarkedCanonical: string[]; txsMarkedNonCanonical: string[] }> { - const txResult = await sql` - UPDATE txs - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - RETURNING ${sql(TX_COLUMNS)} - `; - const txIds = txResult.map(row => parseTxQueryResult(row)); - if (canonical) { - updatedEntities.markedCanonical.txs += txResult.length; - } else { - updatedEntities.markedNonCanonical.txs += txResult.length; - } - for (const txId of txIds) { - logger.debug(`Marked tx as ${canonical ? 'canonical' : 'non-canonical'}: ${txId.tx_id}`); - } - if (txIds.length) { - await sql` - UPDATE principal_stx_txs + const result: { txsMarkedCanonical: string[]; txsMarkedNonCanonical: string[] } = { + txsMarkedCanonical: [], + txsMarkedNonCanonical: [], + }; + + const q = new PgWriteQueue(); + q.enqueue(async () => { + const txResult = await sql<{ tx_id: string }[]>` + UPDATE txs SET canonical = ${canonical} - WHERE tx_id IN ${sql(txIds.map(tx => tx.tx_id))} - AND index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + RETURNING tx_id `; - } - - const minerRewardResults = await sql` - UPDATE miner_rewards - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.minerRewards += minerRewardResults.count; - } else { - updatedEntities.markedNonCanonical.minerRewards += minerRewardResults.count; - } - - const stxLockResults = await sql` - UPDATE stx_lock_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.stxLockEvents += stxLockResults.count; - } else { - updatedEntities.markedNonCanonical.stxLockEvents += stxLockResults.count; - } - - const stxResults = await sql` - UPDATE stx_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.stxEvents += stxResults.count; - } else { - updatedEntities.markedNonCanonical.stxEvents += stxResults.count; - } - - const ftResult = await sql` - UPDATE ft_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.ftEvents += ftResult.count; - } else { - updatedEntities.markedNonCanonical.ftEvents += ftResult.count; - } - - const nftResult = await sql` - UPDATE nft_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.nftEvents += nftResult.count; - } else { - updatedEntities.markedNonCanonical.nftEvents += nftResult.count; - } - await this.updateNftCustodyFromReOrg(sql, { - index_block_hash: indexBlockHash, - microblocks: [], + const txIds = txResult.map(row => row.tx_id); + if (canonical) { + updatedEntities.markedCanonical.txs += txResult.count; + result.txsMarkedCanonical = txIds; + } else { + updatedEntities.markedNonCanonical.txs += txResult.count; + result.txsMarkedNonCanonical = txIds; + } + if (txResult.count) + await sql` + UPDATE principal_stx_txs + SET canonical = ${canonical} + WHERE tx_id IN ${sql(txIds)} + AND index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; }); + q.enqueue(async () => { + const minerRewardResults = await sql` + UPDATE miner_rewards + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.minerRewards += minerRewardResults.count; + } else { + updatedEntities.markedNonCanonical.minerRewards += minerRewardResults.count; + } + }); + q.enqueue(async () => { + const stxLockResults = await sql` + UPDATE stx_lock_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.stxLockEvents += stxLockResults.count; + } else { + updatedEntities.markedNonCanonical.stxLockEvents += stxLockResults.count; + } + }); + q.enqueue(async () => { + const stxResults = await sql` + UPDATE stx_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.stxEvents += stxResults.count; + } else { + updatedEntities.markedNonCanonical.stxEvents += stxResults.count; + } + }); + q.enqueue(async () => { + const ftResult = await sql` + UPDATE ft_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.ftEvents += ftResult.count; + } else { + updatedEntities.markedNonCanonical.ftEvents += ftResult.count; + } + }); + q.enqueue(async () => { + const nftResult = await sql` + UPDATE nft_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.nftEvents += nftResult.count; + } else { + updatedEntities.markedNonCanonical.nftEvents += nftResult.count; + } + if (nftResult.count) + await this.updateNftCustodyFromReOrg(sql, { + index_block_hash: indexBlockHash, + microblocks: [], + }); + }); + q.enqueue(async () => { + const pox2Result = await sql` + UPDATE pox2_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox2Events += pox2Result.count; + } else { + updatedEntities.markedNonCanonical.pox2Events += pox2Result.count; + } + }); + q.enqueue(async () => { + const pox3Result = await sql` + UPDATE pox3_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox3Events += pox3Result.count; + } else { + updatedEntities.markedNonCanonical.pox3Events += pox3Result.count; + } + }); + q.enqueue(async () => { + const pox4Result = await sql` + UPDATE pox4_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox4Events += pox4Result.count; + } else { + updatedEntities.markedNonCanonical.pox4Events += pox4Result.count; + } + }); + q.enqueue(async () => { + const contractLogResult = await sql` + UPDATE contract_logs + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.contractLogs += contractLogResult.count; + } else { + updatedEntities.markedNonCanonical.contractLogs += contractLogResult.count; + } + }); + q.enqueue(async () => { + const smartContractResult = await sql` + UPDATE smart_contracts + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.smartContracts += smartContractResult.count; + } else { + updatedEntities.markedNonCanonical.smartContracts += smartContractResult.count; + } + }); + q.enqueue(async () => { + const nameResult = await sql` + UPDATE names + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.names += nameResult.count; + } else { + updatedEntities.markedNonCanonical.names += nameResult.count; + } + }); + q.enqueue(async () => { + const namespaceResult = await sql` + UPDATE namespaces + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.namespaces += namespaceResult.count; + } else { + updatedEntities.markedNonCanonical.namespaces += namespaceResult.count; + } + }); + q.enqueue(async () => { + const subdomainResult = await sql` + UPDATE subdomains + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.subdomains += subdomainResult.count; + } else { + updatedEntities.markedNonCanonical.subdomains += subdomainResult.count; + } + }); + await q.done(); - const pox2Result = await sql` - UPDATE pox2_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox2Events += pox2Result.count; - } else { - updatedEntities.markedNonCanonical.pox2Events += pox2Result.count; - } - - const pox3Result = await sql` - UPDATE pox3_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox3Events += pox3Result.count; - } else { - updatedEntities.markedNonCanonical.pox3Events += pox3Result.count; - } - - const pox4Result = await sql` - UPDATE pox4_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox4Events += pox4Result.count; - } else { - updatedEntities.markedNonCanonical.pox4Events += pox4Result.count; - } - - const contractLogResult = await sql` - UPDATE contract_logs - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.contractLogs += contractLogResult.count; - } else { - updatedEntities.markedNonCanonical.contractLogs += contractLogResult.count; - } - - const smartContractResult = await sql` - UPDATE smart_contracts - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.smartContracts += smartContractResult.count; - } else { - updatedEntities.markedNonCanonical.smartContracts += smartContractResult.count; - } - - const nameResult = await sql` - UPDATE names - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.names += nameResult.count; - } else { - updatedEntities.markedNonCanonical.names += nameResult.count; - } - - const namespaceResult = await sql` - UPDATE namespaces - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.namespaces += namespaceResult.count; - } else { - updatedEntities.markedNonCanonical.namespaces += namespaceResult.count; - } - - const subdomainResult = await sql` - UPDATE subdomains - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.subdomains += subdomainResult.count; - } else { - updatedEntities.markedNonCanonical.subdomains += subdomainResult.count; - } - - return { - txsMarkedCanonical: canonical ? txIds.map(t => t.tx_id) : [], - txsMarkedNonCanonical: canonical ? [] : txIds.map(t => t.tx_id), - }; + return result; } async restoreOrphanedChain( diff --git a/src/event-replay/parquet-based/importers/new-block-importer.ts b/src/event-replay/parquet-based/importers/new-block-importer.ts index ff7e76a005..a0b3ba4f82 100644 --- a/src/event-replay/parquet-based/importers/new-block-importer.ts +++ b/src/event-replay/parquet-based/importers/new-block-importer.ts @@ -376,7 +376,7 @@ const populateBatchInserters = (db: PgWriteStore) => { const insertStxLockEvents = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updateStxLockEvents(db.sql, entry.tx, entry.stxLockEvents); + await db.updateStxLockEvents(db.sql, [entry]); } }; @@ -386,19 +386,19 @@ const populateBatchInserters = (db: PgWriteStore) => { const insertPox2Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox2_events', entry.pox2Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox2_events', [entry]); } }; const insertPox3Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox3_events', entry.pox3Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox3_events', [entry]); } }; const insertPox4Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox4_events', entry.pox4Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox4_events', [entry]); } }; diff --git a/src/event-stream/event-server.ts b/src/event-stream/event-server.ts index 56b6b648d9..2fb0309e50 100644 --- a/src/event-stream/event-server.ts +++ b/src/event-stream/event-server.ts @@ -46,6 +46,7 @@ import { CoreNodeMsgBlockData, parseMicroblocksFromTxs, isPoxPrintEvent, + newCoreNoreBlockEventCounts, } from './reader'; import { decodeTransaction, @@ -230,6 +231,7 @@ async function handleBlockMessage( db: PgWriteStore ): Promise { const ingestionTimer = stopwatch(); + const counts = newCoreNoreBlockEventCounts(); const parsedTxs: CoreNodeParsedTxMessage[] = []; const blockData: CoreNodeMsgBlockData = { ...msg, @@ -238,8 +240,42 @@ async function handleBlockMessage( const parsedTx = parseMessageTransaction(chainId, item, blockData, msg.events); if (parsedTx) { parsedTxs.push(parsedTx); + counts.tx_total += 1; + switch (parsedTx.parsed_tx.payload.type_id) { + case TxPayloadTypeID.Coinbase: + counts.txs.coinbase += 1; + break; + case TxPayloadTypeID.CoinbaseToAltRecipient: + counts.txs.coinbase_to_alt_recipient += 1; + break; + case TxPayloadTypeID.ContractCall: + counts.txs.contract_call += 1; + break; + case TxPayloadTypeID.NakamotoCoinbase: + counts.txs.nakamoto_coinbase += 1; + break; + case TxPayloadTypeID.PoisonMicroblock: + counts.txs.poison_microblock += 1; + break; + case TxPayloadTypeID.SmartContract: + counts.txs.smart_contract += 1; + break; + case TxPayloadTypeID.TenureChange: + counts.txs.tenure_change += 1; + break; + case TxPayloadTypeID.TokenTransfer: + counts.txs.token_transfer += 1; + break; + case TxPayloadTypeID.VersionedSmartContract: + counts.txs.versioned_smart_contract += 1; + break; + } } }); + for (const event of msg.events) { + counts.event_total += 1; + counts.events[event.type] += 1; + } const dbBlock: DbBlock = { canonical: true, @@ -281,6 +317,7 @@ async function handleBlockMessage( tx_fees_streamed_produced: BigInt(minerReward.tx_fees_streamed_produced), }; dbMinerRewards.push(dbMinerReward); + counts.miner_rewards += 1; } logger.debug(`Received ${dbMinerRewards.length} matured miner rewards`); @@ -304,18 +341,10 @@ async function handleBlockMessage( index_block_hash: msg.index_block_hash, block_hash: msg.block_hash, }; + counts.microblocks += 1; return microblock; }); - parsedTxs.forEach(tx => { - logger.debug(`Received anchor block mined tx: ${tx.core_tx.txid}`); - logger.info('Transaction confirmed', { - txid: tx.core_tx.txid, - in_microblock: tx.microblock_hash != '', - stacks_height: dbBlock.block_height, - }); - }); - const dbData: DataStoreBlockUpdateData = { block: dbBlock, microblocks: dbMicroblocks, @@ -328,7 +357,10 @@ async function handleBlockMessage( await db.update(dbData); const ingestionTime = ingestionTimer.getElapsed(); - logger.info(`Ingested block ${msg.block_height} (${msg.block_hash}) in ${ingestionTime}ms`); + logger.info( + counts, + `Ingested block ${msg.block_height} (${msg.block_hash}) in ${ingestionTime}ms` + ); } function parseDataStoreTxEventData( @@ -856,7 +888,7 @@ export async function startEventServer(opts: { if (ibdHeight) { app.use(IBD_PRUNABLE_ROUTES, async (req, res, next) => { try { - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); if (chainTip.block_height > ibdHeight) { next(); } else { diff --git a/src/event-stream/reader.ts b/src/event-stream/reader.ts index 1a2eb06374..7ebb922b3e 100644 --- a/src/event-stream/reader.ts +++ b/src/event-stream/reader.ts @@ -847,3 +847,67 @@ export function isPoxPrintEvent(event: SmartContractEvent): boolean { if (event.contract_event.topic !== 'print') return false; return PoxContractIdentifiers.includes(event.contract_event.contract_identifier); } + +interface CoreNodeBlockEventCounts { + microblocks: number; + tx_total: number; + txs: { + token_transfer: number; + smart_contract: number; + contract_call: number; + poison_microblock: number; + coinbase: number; + coinbase_to_alt_recipient: number; + versioned_smart_contract: number; + tenure_change: number; + nakamoto_coinbase: number; + }; + event_total: number; + events: { + contract_event: number; + stx_transfer_event: number; + stx_mint_event: number; + stx_burn_event: number; + stx_lock_event: number; + nft_transfer_event: number; + nft_mint_event: number; + nft_burn_event: number; + ft_transfer_event: number; + ft_mint_event: number; + ft_burn_event: number; + }; + miner_rewards: number; +} + +export function newCoreNoreBlockEventCounts(): CoreNodeBlockEventCounts { + return { + microblocks: 0, + tx_total: 0, + txs: { + token_transfer: 0, + smart_contract: 0, + contract_call: 0, + poison_microblock: 0, + coinbase: 0, + coinbase_to_alt_recipient: 0, + versioned_smart_contract: 0, + tenure_change: 0, + nakamoto_coinbase: 0, + }, + event_total: 0, + events: { + contract_event: 0, + stx_transfer_event: 0, + stx_mint_event: 0, + stx_burn_event: 0, + stx_lock_event: 0, + nft_transfer_event: 0, + nft_mint_event: 0, + nft_burn_event: 0, + ft_transfer_event: 0, + ft_mint_event: 0, + ft_burn_event: 0, + }, + miner_rewards: 0, + }; +} diff --git a/src/tests-event-replay/import-export-tests.ts b/src/tests-event-replay/import-export-tests.ts index ed2fe9d03f..cdb8f29a8f 100644 --- a/src/tests-event-replay/import-export-tests.ts +++ b/src/tests-event-replay/import-export-tests.ts @@ -28,7 +28,7 @@ describe('import/export tests', () => { test('event import and export cycle', async () => { // Import from mocknet TSV await importEventsFromTsv('src/tests-event-replay/tsv/mocknet.tsv', 'archival', true, true); - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); expect(chainTip.block_height).toBe(28); expect(chainTip.index_block_hash).toBe( '0x76cd67a65c0dfd5ea450bb9efe30da89fa125bfc077c953802f718353283a533' @@ -50,7 +50,7 @@ describe('import/export tests', () => { // Re-import with exported TSV and check that chain tip matches. try { await importEventsFromTsv(`${tmpDir}/export.tsv`, 'archival', true, true); - const newChainTip = await db.getChainTip(); + const newChainTip = await db.getChainTip(db.sql); expect(newChainTip.block_height).toBe(28); expect(newChainTip.index_block_hash).toBe( '0x76cd67a65c0dfd5ea450bb9efe30da89fa125bfc077c953802f718353283a533' @@ -196,14 +196,14 @@ describe('IBD', () => { process.env.IBD_MODE_UNTIL_BLOCK = '1000'; // TSV has 1 microblock message. await expect(getIbdInterceptCountFromTsvEvents()).resolves.toBe(1); - await expect(db.getChainTip()).resolves.toHaveProperty('block_height', 28); + await expect(db.getChainTip(db.sql)).resolves.toHaveProperty('block_height', 28); }); test('IBD mode does NOT block certain API routes once the threshold number of blocks are ingested', async () => { process.env.IBD_MODE_UNTIL_BLOCK = '1'; // Microblock processed normally. await expect(getIbdInterceptCountFromTsvEvents()).resolves.toBe(0); - await expect(db.getChainTip()).resolves.toHaveProperty('block_height', 28); + await expect(db.getChainTip(db.sql)).resolves.toHaveProperty('block_height', 28); }); test('IBD mode covers prune mode', async () => { diff --git a/src/tests-event-replay/poison-microblock-tests.ts b/src/tests-event-replay/poison-microblock-tests.ts index 5bd511ae56..9c403c9c43 100644 --- a/src/tests-event-replay/poison-microblock-tests.ts +++ b/src/tests-event-replay/poison-microblock-tests.ts @@ -25,7 +25,7 @@ describe('poison microblock for height 80743', () => { true ); const poisonTxId = '0x58ffe62029f94f7101b959536ea4953b9bce0ec3f6e2a06254c511bdd5cfa9e7'; - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); // query the txs table and check the transaction type const searchResult = await db.searchHash({ hash: poisonTxId }); let entityData: any; diff --git a/src/tests-rosetta-construction/construction.ts b/src/tests-rosetta-construction/construction.ts index 274fbb8e11..833b6b0660 100644 --- a/src/tests-rosetta-construction/construction.ts +++ b/src/tests-rosetta-construction/construction.ts @@ -349,7 +349,7 @@ describe('Rosetta Construction', () => { expect(result.type).toBe('application/json'); expect(JSON.parse(result.text)).toHaveProperty('metadata'); expect(JSON.parse(result.text)).toHaveProperty('suggested_fee'); - expect(JSON.parse(result.text).suggested_fee[0].value).toBe('270'); + expect(parseInt(JSON.parse(result.text).suggested_fee[0].value)).toBeGreaterThan(100); expect(JSON.parse(result.text).metadata.memo).toBe('SAMPLE MEMO'); }); @@ -1584,7 +1584,7 @@ describe('Rosetta Construction', () => { expect(JSON.parse(result.text).metadata).toHaveProperty('contract_address'); expect(JSON.parse(result.text).metadata).toHaveProperty('contract_name'); expect(JSON.parse(result.text).metadata).toHaveProperty('burn_block_height'); - expect(JSON.parse(result.text).suggested_fee[0].value).toBe('390'); + expect(parseInt(JSON.parse(result.text).suggested_fee[0].value)).toBeGreaterThan(100); }); test('construction/metadata - delegate_stacking', async () => { @@ -1638,7 +1638,9 @@ describe('Rosetta Construction', () => { account_sequence: nonce, recent_block_hash: '0x969e494d5aee0166016836f97bbeb3d9473bea8427e477e9de253f78d3212354', }, - suggested_fee: [{ value: '390', currency: { symbol: 'STX', decimals: 6 } }], + suggested_fee: [ + { value: expect.stringMatching(/^\d+$/), currency: { symbol: 'STX', decimals: 6 } }, + ], }; expect(result.body).toHaveProperty('metadata'); @@ -1808,7 +1810,7 @@ describe('Rosetta Construction', () => { expect(JSON.parse(resultMetadata.text).metadata).toHaveProperty('contract_address'); expect(JSON.parse(resultMetadata.text).metadata).toHaveProperty('contract_name'); expect(JSON.parse(resultMetadata.text).metadata).toHaveProperty('burn_block_height'); - expect(JSON.parse(resultMetadata.text).suggested_fee[0].value).toBe('390'); + expect(parseInt(JSON.parse(resultMetadata.text).suggested_fee[0].value)).toBeGreaterThan(100); //payloads const contract_address = resultMetadata.body.metadata.contract_address; @@ -2255,7 +2257,9 @@ describe('Rosetta Construction', () => { account_sequence: nonce, recent_block_hash: '0x969e494d5aee0166016836f97bbeb3d9473bea8427e477e9de253f78d3212354', }, - suggested_fee: [{ value: '380', currency: { symbol: 'STX', decimals: 6 } }], + suggested_fee: [ + { value: expect.stringMatching(/^\d+$/), currency: { symbol: 'STX', decimals: 6 } }, + ], }; expect(resultMetadata.body).toHaveProperty('metadata'); expect(resultMetadata.body.suggested_fee).toStrictEqual(metadataResponse.suggested_fee); diff --git a/src/tests/cache-control-tests.ts b/src/tests/cache-control-tests.ts index 4b1f5e3450..b35b6e6fa4 100644 --- a/src/tests/cache-control-tests.ts +++ b/src/tests/cache-control-tests.ts @@ -321,7 +321,7 @@ describe('cache-control tests', () => { ], }); - const chainTip2 = await db.getChainTip(); + const chainTip2 = await db.getChainTip(db.sql); expect(chainTip2.block_hash).toBe(block1.block_hash); expect(chainTip2.block_height).toBe(block1.block_height); expect(chainTip2.index_block_hash).toBe(block1.index_block_hash); diff --git a/src/tests/datastore-tests.ts b/src/tests/datastore-tests.ts index f5466b7762..e5ac20a2fa 100644 --- a/src/tests/datastore-tests.ts +++ b/src/tests/datastore-tests.ts @@ -167,7 +167,7 @@ describe('postgres datastore', () => { createStxEvent('addrA', 'addrC', 35), ]; for (const event of events) { - await db.updateStxEvent(client, tx, event); + await db.updateStxEvents(client, [{ tx, stxEvents: [event] }]); } const createStxLockEvent = ( @@ -195,7 +195,7 @@ describe('postgres datastore', () => { createStxLockEvent('addrA', 222n, 1), createStxLockEvent('addrB', 333n, 1), ]; - await db.updateStxLockEvents(client, tx, stxLockEvents); + await db.updateStxLockEvents(client, [{ tx, stxLockEvents }]); await db.updateTx(client, tx); await db.updateTx(client, tx2); @@ -3546,7 +3546,7 @@ describe('postgres datastore', () => { } // insert stx lock events directly - await db.updateStxLockEvents(client, tx1, [stxLockEvent1]); + await db.updateStxLockEvents(client, [{ tx: tx1, stxLockEvents: [stxLockEvent1] }]); const block5: DbBlock = { block_hash: '0x55', @@ -4101,7 +4101,7 @@ describe('postgres datastore', () => { const blockQuery1 = await db.getBlock({ hash: block2b.block_hash }); expect(blockQuery1.result?.canonical).toBe(false); - const chainTip1 = await db.getChainTip(); + const chainTip1 = await db.getChainTip(db.sql); expect(chainTip1).toEqual({ block_hash: '0x33', block_height: 3, @@ -4169,7 +4169,7 @@ describe('postgres datastore', () => { const blockQuery2 = await db.getBlock({ hash: block3b.block_hash }); expect(blockQuery2.result?.canonical).toBe(false); // Chain tip doesn't change yet. - const chainTip2 = await db.getChainTip(); + const chainTip2 = await db.getChainTip(db.sql); expect(chainTip2).toEqual({ block_hash: '0x33', block_height: 3, @@ -4220,7 +4220,7 @@ describe('postgres datastore', () => { const blockQuery3 = await db.getBlock({ hash: block3b.block_hash }); expect(blockQuery3.result?.canonical).toBe(true); - const chainTip3 = await db.getChainTip(); + const chainTip3 = await db.getChainTip(db.sql); expect(chainTip3).toEqual({ block_count: 4, block_hash: '0x44bb', diff --git a/src/tests/mempool-tests.ts b/src/tests/mempool-tests.ts index 1436ce9aa5..91073598b0 100644 --- a/src/tests/mempool-tests.ts +++ b/src/tests/mempool-tests.ts @@ -1666,7 +1666,7 @@ describe('mempool tests', () => { // Simulate the bug with a txs being in the mempool at confirmed at the same time by // directly inserting the mempool-tx and mined-tx, bypassing the normal update functions. await db.updateBlock(db.sql, dbBlock1); - const chainTip = await db.getChainTip(); + const chainTip = await db.getChainTip(db.sql); await db.insertDbMempoolTxs([mempoolTx], chainTip, db.sql); await db.updateTx(db.sql, dbTx1); @@ -1828,7 +1828,7 @@ describe('mempool tests', () => { await db.updateMempoolTxs({ mempoolTxs: [mempoolTx] }); - let chainTip = await db.getChainTip(); + let chainTip = await db.getChainTip(db.sql); expect(chainTip.mempool_tx_count).toBe(1); // Verify tx shows up in mempool (non-pruned) @@ -1852,7 +1852,7 @@ describe('mempool tests', () => { expect(mempoolResult2.body.results).toHaveLength(0); const mempoolCount2 = await supertest(api.server).get(`/extended/v1/tx/mempool`); expect(mempoolCount2.body.total).toBe(0); - chainTip = await db.getChainTip(); + chainTip = await db.getChainTip(db.sql); expect(chainTip.mempool_tx_count).toBe(0); // Re-broadcast mempool tx @@ -1865,7 +1865,7 @@ describe('mempool tests', () => { expect(mempoolResult3.body.results[0].tx_id).toBe(txId); const mempoolCount3 = await supertest(api.server).get(`/extended/v1/tx/mempool`); expect(mempoolCount3.body.total).toBe(1); - chainTip = await db.getChainTip(); + chainTip = await db.getChainTip(db.sql); expect(chainTip.mempool_tx_count).toBe(1); // Mine tx in block to prune from mempool @@ -1898,7 +1898,7 @@ describe('mempool tests', () => { expect(mempoolResult4.body.results).toHaveLength(0); const mempoolCount4 = await supertest(api.server).get(`/extended/v1/tx/mempool`); expect(mempoolCount4.body.total).toBe(0); - chainTip = await db.getChainTip(); + chainTip = await db.getChainTip(db.sql); expect(chainTip.mempool_tx_count).toBe(0); // Verify tx is mined @@ -1931,7 +1931,7 @@ describe('mempool tests', () => { expect(mempoolResult5.body.results[0].tx_id).toBe(txId); const mempoolCount5 = await supertest(api.server).get(`/extended/v1/tx/mempool`); expect(mempoolCount5.body.total).toBe(1); - chainTip = await db.getChainTip(); + chainTip = await db.getChainTip(db.sql); expect(chainTip.mempool_tx_count).toBe(1); // Re-broadcast mempool tx diff --git a/src/tests/microblock-tests.ts b/src/tests/microblock-tests.ts index 2f6bfe176d..5d123687cd 100644 --- a/src/tests/microblock-tests.ts +++ b/src/tests/microblock-tests.ts @@ -386,7 +386,7 @@ describe('microblock tests', () => { ], }); - const chainTip1 = await db.getChainTip(); + const chainTip1 = await db.getChainTip(db.sql); expect(chainTip1.block_hash).toBe(block1.block_hash); expect(chainTip1.block_height).toBe(block1.block_height); expect(chainTip1.index_block_hash).toBe(block1.index_block_hash); @@ -549,7 +549,7 @@ describe('microblock tests', () => { ], }); - const chainTip2 = await db.getChainTip(); + const chainTip2 = await db.getChainTip(db.sql); expect(chainTip2.block_hash).toBe(block1.block_hash); expect(chainTip2.block_height).toBe(block1.block_height); expect(chainTip2.index_block_hash).toBe(block1.index_block_hash); diff --git a/src/tests/other-tests.ts b/src/tests/other-tests.ts index d64bf877de..19489d4d47 100644 --- a/src/tests/other-tests.ts +++ b/src/tests/other-tests.ts @@ -157,7 +157,7 @@ describe('other tests', () => { event_type: DbEventTypeId.StxAsset, amount: 10_000_000_000_000n, }; - await db.updateStxEvent(client, tx, stxBurnEvent1); + await db.updateStxEvents(client, [{ tx, stxEvents: [stxBurnEvent1] }]); const expectedTotalStx2 = stxMintEvent1.amount + stxMintEvent2.amount - stxBurnEvent1.amount; const result2 = await supertest(api.server).get(`/extended/v1/stx_supply`); expect(result2.status).toBe(200); diff --git a/src/tests/search-tests.ts b/src/tests/search-tests.ts index 5135630497..ada372a422 100644 --- a/src/tests/search-tests.ts +++ b/src/tests/search-tests.ts @@ -717,7 +717,7 @@ describe('search tests', () => { recipient: addr3, sender: 'none', }; - await db.updateStxEvent(client, stxTx1, stxEvent1); + await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent1] }]); // test address as a stx event recipient const searchResult3 = await supertest(api.server).get(`/extended/v1/search/${addr3}`); @@ -745,7 +745,7 @@ describe('search tests', () => { sender: addr4, }; - await db.updateStxEvent(client, stxTx1, stxEvent2); + await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent2] }]); // test address as a stx event sender const searchResult4 = await supertest(api.server).get(`/extended/v1/search/${addr4}`); @@ -773,7 +773,7 @@ describe('search tests', () => { recipient: addr5, sender: 'none', }; - await db.updateFtEvents(client, stxTx1, [ftEvent1]); + await db.updateFtEvents(client, [{ tx: stxTx1, ftEvents: [ftEvent1] }]); // test address as a ft event recipient const searchResult5 = await supertest(api.server).get(`/extended/v1/search/${addr5}`); @@ -801,7 +801,7 @@ describe('search tests', () => { recipient: 'none', sender: addr6, }; - await db.updateFtEvents(client, stxTx1, [ftEvent2]); + await db.updateFtEvents(client, [{ tx: stxTx1, ftEvents: [ftEvent2] }]); // test address as a ft event sender const searchResult6 = await supertest(api.server).get(`/extended/v1/search/${addr6}`); diff --git a/src/tests/smart-contract-tests.ts b/src/tests/smart-contract-tests.ts index d3b32161af..2cab93e7ec 100644 --- a/src/tests/smart-contract-tests.ts +++ b/src/tests/smart-contract-tests.ts @@ -13,6 +13,7 @@ import { I32_MAX } from '../helpers'; import { PgWriteStore } from '../datastore/pg-write-store'; import { bufferToHex, PgSqlClient, waiter } from '@hirosystems/api-toolkit'; import { migrate } from '../test-utils/test-helpers'; +import { TestBlockBuilder, testMempoolTx } from '../test-utils/test-builders'; describe('smart contract tests', () => { let db: PgWriteStore; @@ -1715,4 +1716,112 @@ describe('smart contract tests', () => { ); expect(query.status).toBe(431); }); + + test('status for multiple contracts', async () => { + const block1 = new TestBlockBuilder({ block_height: 1, index_block_hash: '0x01' }) + .addTx({ + tx_id: '0x1234', + type_id: DbTxTypeId.SmartContract, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + smart_contract_source_code: '(some-contract-src)', + }) + .addTxSmartContract({ + tx_id: '0x1234', + block_height: 1, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + contract_source: '(some-contract-src)', + }) + .build(); + await db.update(block1); + const block2 = new TestBlockBuilder({ + block_height: 2, + index_block_hash: '0x02', + parent_index_block_hash: '0x01', + }) + .addTx({ + tx_id: '0x1222', + type_id: DbTxTypeId.SmartContract, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + smart_contract_source_code: '(some-contract-src)', + }) + .addTxSmartContract({ + tx_id: '0x1222', + block_height: 2, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + contract_source: '(some-contract-src)', + }) + .build(); + await db.update(block2); + + // Contracts are found + let query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1&contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2` + ); + expect(query.status).toBe(200); + let json = JSON.parse(query.text); + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1': { + found: true, + result: { + block_height: 1, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + status: 'success', + tx_id: '0x1234', + }, + }, + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2': { + found: true, + result: { + block_height: 2, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + status: 'success', + tx_id: '0x1222', + }, + }, + }); + + // Assume two contract attempts on the mempool + const mempoolTx1 = testMempoolTx({ + tx_id: '0x111111', + type_id: DbTxTypeId.SmartContract, + nonce: 5, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + }); + const mempoolTx2 = testMempoolTx({ + tx_id: '0x111122', + type_id: DbTxTypeId.SmartContract, + nonce: 6, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + }); + await db.updateMempoolTxs({ mempoolTxs: [mempoolTx1, mempoolTx2] }); + query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3` + ); + expect(query.status).toBe(200); + json = JSON.parse(query.text); + // Only the first one is reported. + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3': { + found: true, + result: { + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + status: 'pending', + tx_id: '0x111111', + }, + }, + }); + + // Check found = false + query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.abcde` + ); + expect(query.status).toBe(200); + json = JSON.parse(query.text); + // Only the first one is reported. + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.abcde': { + found: false, + }, + }); + }); });