diff --git a/.github/workflows/manual-deploy-obscuro-gateway.yml b/.github/workflows/manual-deploy-obscuro-gateway.yml
index 39376b734e..20f059305e 100644
--- a/.github/workflows/manual-deploy-obscuro-gateway.yml
+++ b/.github/workflows/manual-deploy-obscuro-gateway.yml
@@ -137,5 +137,5 @@ jobs:
&& docker run -d -p 80:80 -p 81:81 --name ${{ github.event.inputs.testnet_type }}-OG-${{ GITHUB.RUN_NUMBER }} \
-e OBSCURO_GATEWAY_VERSION="${{ GITHUB.RUN_NUMBER }}-${{ GITHUB.SHA }}" \
${{ vars.DOCKER_BUILD_TAG_GATEWAY }} \
- ./wallet_extension_linux -host=0.0.0.0 -port=80 -portWS=81 -nodeHost=${{ vars.L2_RPC_URL_VALIDATOR }} \
+ -host=0.0.0.0 -port=8080 -portWS=81 -nodeHost=${{ vars.L2_RPC_URL_VALIDATOR }} \
-logPath=sys_out -dbType=mariaDB -dbConnectionURL="obscurouser:${{ secrets.OBSCURO_GATEWAY_MARIADB_USER_PWD }}@tcp(obscurogateway-mariadb-${{ github.event.inputs.testnet_type }}.uksouth.cloudapp.azure.com:3306)/ogdb"'
diff --git a/.github/workflows/manual-deploy-testnet-l2.yml b/.github/workflows/manual-deploy-testnet-l2.yml
index 14a09b9948..ecbb900159 100644
--- a/.github/workflows/manual-deploy-testnet-l2.yml
+++ b/.github/workflows/manual-deploy-testnet-l2.yml
@@ -112,6 +112,17 @@ jobs:
inlineScript: |
$(az resource list --tag ${{ vars.AZURE_DEPLOY_GROUP_L2 }}=true --query '[]."id"' -o tsv | xargs -n1 az resource delete --verbose -g Testnet --ids) || true
+ # Delete old database tables from previous deployment
+ - name: 'Delete host databases'
+ uses: azure/CLI@v1
+ with:
+ inlineScript: |
+ databases=$(az postgres flexible-server db list --resource-group Testnet --server-name postgres-ten-${{ github.event.inputs.testnet_type }} --query "[?starts_with(name, 'host_')].[name]" -o tsv)
+
+ for db in $databases; do
+ az postgres flexible-server db delete --database-name "$db" --resource-group Testnet --server-name postgres-ten-${{ github.event.inputs.testnet_type }} --yes
+ done
+
- name: 'Upload L1 deployer container logs'
uses: actions/upload-artifact@v3
with:
@@ -249,6 +260,7 @@ jobs:
-max_batch_interval=${{ vars.L2_MAX_BATCH_INTERVAL }} \
-rollup_interval=${{ vars.L2_ROLLUP_INTERVAL }} \
-l1_chain_id=${{ vars.L1_CHAIN_ID }} \
+ -postgres_db_host=postgres://tenuser:${{ secrets.TEN_POSTGRES_USER_PWD }}@postgres-ten-${{ github.event.inputs.testnet_type }}.postgres.database.azure.com:5432/ \
start'
check-obscuro-is-healthy:
diff --git a/README.md b/README.md
index 2608827dc9..7c07a3db16 100644
--- a/README.md
+++ b/README.md
@@ -209,7 +209,7 @@ root
│ │ ├── erc20contractlib: Understand ERC20 transactions.
│ │ └── mgmtcontractlib: Understand Ten Management contrract transactions.
│ ├── host: The standalone host process.
-│ │ ├── db: The host's database.
+│ │ ├── db: The host's database.
│ │ ├── hostrunner: The entry point.
│ │ ├── main: Main
│ │ ├── node: The host implementation.
diff --git a/design/host/host_db_requirements.md b/design/host/host_db_requirements.md
new file mode 100644
index 0000000000..95a556f701
--- /dev/null
+++ b/design/host/host_db_requirements.md
@@ -0,0 +1,154 @@
+# Moving Host DB to SQL
+
+The current implementation uses the `ethdb.KeyValueStore` which provides fast access but is not sufficient for the
+querying capabilities required by Tenscan. We want to move to an SQL implementation similar to what the Enclave uses.
+
+## Current Storage
+### Schema Keys
+```go
+var (
+ blockHeaderPrefix = []byte("b")
+ blockNumberHeaderPrefix = []byte("bnh")
+ batchHeaderPrefix = []byte("ba")
+ batchHashPrefix = []byte("bh")
+ batchNumberPrefix = []byte("bn")
+ batchPrefix = []byte("bp")
+ batchHashForSeqNoPrefix = []byte("bs")
+ batchTxHashesPrefix = []byte("bt")
+ headBatch = []byte("hb")
+ totalTransactionsKey = []byte("t")
+ rollupHeaderPrefix = []byte("rh")
+ rollupHeaderBlockPrefix = []byte("rhb")
+ tipRollupHash = []byte("tr")
+ blockHeadedAtTip = []byte("bht")
+)
+```
+Some of the schema keys are dummy keys for entries where we only have one entry that is updated such as totals or tip
+data. The rest of the schema keys are used as prefixes appended with the `byte[]` representation of the key.
+
+| Data Type | Description | Schema | Key | Value (Encoded) |
+|------------------|---------------------------------|--------|------------------------------|--------------------|
+| **Batch** | Batch hash to headers | ba | BatchHeader.Hash() | BatchHeader |
+| **Batch** | Batch hash to ExtBatch | bp | ExtBatch.Hash() | ExtBatch |
+| **Batch** | Batch hash to TX hashes | bt | ExtBatch.Hash() | ExtBatch.TxHashes |
+| **Batch** | Batch number to batch hash | bh | BatchHeader.Number | BatchHeader.Hash() |
+| **Batch** | Batch seq no to batch hash | bs | BatchHeader.SequencerOrderNo | BatchHeader.Hash() |
+| **Batch** | TX hash to batch number | bn | ExtBatch.TxHashes[i] | BatchHeader.Number |
+| **Batch** | Head Batch | hb | "hb" | ExtBatch.Hash() |
+| **Block** | L1 Block hash to block header | b | Header.Hash() | Header |
+| **Block** | L1 Block height to block header | bnh | Header.Number | Header |
+| **Block** | Latest Block | bht | "bht" | Header.Hash() |
+| **Rollup** | Rollup hash to header | rh | RollupHeader.Hash() | RollupHeader |
+| **Rollup** | L1 Block hash to rollup header | rhb | L1Block.Hash() | RollupHeader |
+| **Rollup** | Tip rollup header | tr | "tr" | RollupHeader |
+| **Transactions** | Total number of transactions | t | "t" | Int |
+
+## Tenscan Functionality Requirements
+
+### Mainnet Features
+#### Currently supported
+* Return the list of batches in descending order
+* View details within the batch (BatchHeader and ExtBatch)
+* Return the number of transactions within the batch
+* Return the list of transactions in descending order
+
+### Not currently supported
+* Return a list of rollups in descending order
+* View details of the rollup (probably needs to be ExtBatch for user )
+* Navigate to the L1 block on etherscan from the rollup
+* Return the list of batches within the rollup
+* Navigate from the transaction to the batch it was included in
+* Navigate from the batch to the rollup that it was included in
+* TODO Cross chain messaging - Arbiscan shows L1>L2 and L2>L1
+
+### Testnet-Only Features
+#### Currently supported
+* Copy the encrypted TX blob to a new page and decrypt there
+
+#### Not currently supported
+* From the batch you should be able to optionally decrypt the transactions within the batch
+* Navigate into the transaction details from the decrypted transaction
+* We want to be able to navigate up the chain from TX to batch to rollup
+
+## SQL Schema
+
+There are some considerations here around the behaviour of tenscan for testnet vs mainnet. Because we are able to decrypt
+the encrypted blob on testnet we are able to retrieve the number of transactions but on mainnet this wont be possible so
+we need to store the TxCount in
+
+### Rollup
+```sql
+create table if not exists rollup_host
+(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ hash binary(16) NOT NULL UNIQUE,
+ start_seq int NOT NULL,
+ end_seq int NOT NULL,
+ time_stamp int NOT NULL,
+ ext_rollup blob NOT NULL,
+ compression_block binary(32) NOT NULL
+);
+
+create index IDX_ROLLUP_HASH_HOST on rollup_host (hash);
+create index IDX_ROLLUP_PROOF_HOST on rollup_host (compression_block);
+create index IDX_ROLLUP_SEQ_HOST on rollup_host (start_seq, end_seq);
+```
+
+Calculating the `L1BlockHeight` as done in `calculateL1HeightsFromDeltas` will be quite computationally expensive so we
+can just order them by `end_seq`.
+
+### Batch
+Storing the encoded ext batch so that we can provide rich data to the UI including gas, receipt, cross-chain hash etc.
+```sql
+create table if not exists batch_host
+(
+ sequence int primary key,
+ full_hash binary(32) NOT NULL,
+ hash binary(16) NOT NULL unique,
+ height int NOT NULL,
+ ext_batch mediumblob NOT NULL
+ );
+
+create index IDX_BATCH_HEIGHT_HOST on batch_host (height);
+
+```
+
+### Transactions
+
+We need to store these separately for efficient lookup of the batch by tx hash and vice versa.
+
+Because we are able to decrypt the encrypted blob on testnet we are able to retrieve the number of transactions that way
+but on mainnet this won't be possible, so we need to store the `tx_count` in this table. There is a plan to remove
+`ExtBatch.TxHashes` and expose a new Enclave API to retrieve this.
+
+```sql
+create table if not exists transactions_host
+(
+ hash binary(32) primary key,
+ b_sequence int REFERENCES batch_host
+);
+
+create table if not exists transaction_count
+(
+ id int NOT NULL primary key,
+ total int NOT NULL
+);
+
+```
+
+## Database Choice
+
+The obvious choice is MariaDB as this is what is used by the gateway so we would have consistency across the stack. It
+would make deployment simpler as the scripts are already there. Main benefits of MariaDB:
+
+* Offer performance improvements through the use of aria storage engine which is not available through MySQL
+* Strong security focus with RBAC and data-at-rest encryption
+* Supports a large number of concurrent connections
+
+Postgres would be the obvious other alternative but given it is favoured for advanced data types, complex queries and
+geospatial capabilities, it doesn't offer us any benefit for this use case over MariaDB.
+
+## Cross Chain Messages
+
+We want to display L2 > L1 and L1 > L2 transaction data. We will expose an API to retrieve these and the implementation
+for retrieving the data will either be via subscriptions to the events API or we will store them in the database. TBC
\ No newline at end of file
diff --git a/dockerfiles/host.Dockerfile b/dockerfiles/host.Dockerfile
index f0f68361dc..e50a99c9c7 100644
--- a/dockerfiles/host.Dockerfile
+++ b/dockerfiles/host.Dockerfile
@@ -36,7 +36,11 @@ FROM alpine:3.18
# Copy over just the binary from the previous build stage into this one.
COPY --from=build-host \
/home/obscuro/go-obscuro/go/host/main /home/obscuro/go-obscuro/go/host/main
-
+
+# Workaround to fix postges filepath issue
+COPY --from=build-host \
+ /home/obscuro/go-obscuro/go/host/storage/init/postgres /home/obscuro/go-obscuro/go/host/storage/init/postgres
+
WORKDIR /home/obscuro/go-obscuro/go/host/main
# expose the http and the ws ports to the host
diff --git a/go.mod b/go.mod
index 06516ba0ce..b0e7e6ca13 100644
--- a/go.mod
+++ b/go.mod
@@ -107,6 +107,7 @@ require (
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
+ github.com/lib/pq v1.10.9 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
diff --git a/go.sum b/go.sum
index f1251d5bde..99b58b8aae 100644
--- a/go.sum
+++ b/go.sum
@@ -302,6 +302,8 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
diff --git a/go/common/batches.go b/go/common/batches.go
index cccacdb283..914cf14872 100644
--- a/go/common/batches.go
+++ b/go/common/batches.go
@@ -12,7 +12,7 @@ import (
// todo (#718) - expand this structure to contain the required fields.
type ExtBatch struct {
Header *BatchHeader
- // todo - remove
+ // todo - remove and replace with enclave API
TxHashes []TxHash // The hashes of the transactions included in the batch.
EncryptedTxBlob EncryptedTransactions
hash atomic.Value
@@ -32,7 +32,7 @@ func (b *ExtBatch) Hash() L2BatchHash {
func (b *ExtBatch) Encoded() ([]byte, error) {
return rlp.EncodeToBytes(b)
}
-
+func (b *ExtBatch) SeqNo() *big.Int { return new(big.Int).Set(b.Header.SequencerOrderNo) }
func DecodeExtBatch(encoded []byte) (*ExtBatch, error) {
var batch ExtBatch
if err := rlp.DecodeBytes(encoded, &batch); err != nil {
diff --git a/go/common/host/host.go b/go/common/host/host.go
index 08799ab104..f016b1d95b 100644
--- a/go/common/host/host.go
+++ b/go/common/host/host.go
@@ -4,7 +4,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ten-protocol/go-ten/go/common"
"github.com/ten-protocol/go-ten/go/config"
- "github.com/ten-protocol/go-ten/go/host/db"
+ "github.com/ten-protocol/go-ten/go/host/storage"
"github.com/ten-protocol/go-ten/go/responses"
"github.com/ten-protocol/go-ten/lib/gethfork/rpc"
)
@@ -12,9 +12,8 @@ import (
// Host is the half of the Obscuro node that lives outside the enclave.
type Host interface {
Config() *config.HostConfig
- DB() *db.DB
EnclaveClient() common.Enclave
-
+ Storage() storage.Storage
// Start initializes the main loop of the host.
Start() error
// SubmitAndBroadcastTx submits an encrypted transaction to the enclave, and broadcasts it to the other hosts on the network.
diff --git a/go/common/query_types.go b/go/common/query_types.go
index 63f127beb5..5d16362d9e 100644
--- a/go/common/query_types.go
+++ b/go/common/query_types.go
@@ -25,11 +25,21 @@ type BatchListingResponse struct {
Total uint64
}
+type BatchListingResponseDeprecated struct {
+ BatchesData []PublicBatchDeprecated
+ Total uint64
+}
+
type BlockListingResponse struct {
BlocksData []PublicBlock
Total uint64
}
+type RollupListingResponse struct {
+ RollupsData []PublicRollup
+ Total uint64
+}
+
type PublicTransaction struct {
TransactionHash TxHash
BatchHeight *big.Int
@@ -38,10 +48,31 @@ type PublicTransaction struct {
}
type PublicBatch struct {
+ SequencerOrderNo *big.Int `json:"sequence"`
+ Hash []byte `json:"hash"`
+ FullHash common.Hash `json:"fullHash"`
+ Height *big.Int `json:"height"`
+ TxCount *big.Int `json:"txCount"`
+ Header *BatchHeader `json:"header"`
+ EncryptedTxBlob EncryptedTransactions `json:"encryptedTxBlob"`
+}
+
+// TODO (@will) remove when tenscan UI has been updated
+type PublicBatchDeprecated struct {
BatchHeader
TxHashes []TxHash `json:"txHashes"`
}
+type PublicRollup struct {
+ ID *big.Int
+ Hash []byte
+ FirstSeq *big.Int
+ LastSeq *big.Int
+ Timestamp uint64
+ Header *RollupHeader
+ L1Hash []byte
+}
+
type PublicBlock struct {
BlockHeader types.Header `json:"blockHeader"`
RollupHash common.Hash `json:"rollupHash"`
diff --git a/tools/walletextension/storage/database/migration.go b/go/common/storage/migration.go
similarity index 98%
rename from tools/walletextension/storage/database/migration.go
rename to go/common/storage/migration.go
index a89bb0a4b9..b0c3be5ad5 100644
--- a/tools/walletextension/storage/database/migration.go
+++ b/go/common/storage/migration.go
@@ -1,4 +1,4 @@
-package database
+package storage
import (
"database/sql"
diff --git a/go/config/host_config.go b/go/config/host_config.go
index 0367b3ef11..cdb569e2d3 100644
--- a/go/config/host_config.go
+++ b/go/config/host_config.go
@@ -75,8 +75,8 @@ type HostInputConfig struct {
// UseInMemoryDB sets whether the host should use in-memory or persistent storage
UseInMemoryDB bool
- // LevelDBPath path for the levelDB persistence dir (can be empty if a throwaway file in /tmp/ is acceptable, or if using InMemory DB)
- LevelDBPath string
+ // PostgresDBHost db url for connecting to Postgres host database
+ PostgresDBHost string
// DebugNamespaceEnabled enables the debug namespace handler in the host rpc server
DebugNamespaceEnabled bool
@@ -132,7 +132,7 @@ func (p HostInputConfig) ToHostConfig() *HostConfig {
MetricsEnabled: p.MetricsEnabled,
MetricsHTTPPort: p.MetricsHTTPPort,
UseInMemoryDB: p.UseInMemoryDB,
- LevelDBPath: p.LevelDBPath,
+ PostgresDBHost: p.PostgresDBHost,
DebugNamespaceEnabled: p.DebugNamespaceEnabled,
BatchInterval: p.BatchInterval,
MaxBatchInterval: p.MaxBatchInterval,
@@ -191,8 +191,11 @@ type HostConfig struct {
LogPath string
// Whether the host should use in-memory or persistent storage
UseInMemoryDB bool
- // filepath for the levelDB persistence dir (can be empty if a throwaway file in /tmp/ is acceptable, or if using InMemory DB)
- LevelDBPath string
+ // Host address for Postgres DB instance (can be empty if using InMemory DB or if attestation is disabled)
+ PostgresDBHost string
+ // filepath for the sqlite DB persistence file (can be empty if a throwaway file in /tmp/ is acceptable or
+ // if using InMemory DB)
+ SqliteDBPath string
//////
// NODE NETWORKING
diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql
index a56fa96cf7..96afc4906b 100644
--- a/go/enclave/storage/init/sqlite/001_init.sql
+++ b/go/enclave/storage/init/sqlite/001_init.sql
@@ -2,13 +2,13 @@ create table if not exists keyvalue
(
ky varbinary(64) primary key,
val mediumblob NOT NULL
-);
+ );
create table if not exists config
(
ky varchar(64) primary key,
val mediumblob NOT NULL
-);
+ );
insert into config
values ('CURRENT_SEQ', -1);
@@ -18,7 +18,7 @@ create table if not exists attestation_key
-- party binary(20) primary key, // todo -pk
party binary(20),
ky binary(33) NOT NULL
-);
+ );
create table if not exists block
(
@@ -27,9 +27,9 @@ create table if not exists block
is_canonical boolean NOT NULL,
header blob NOT NULL,
height int NOT NULL
--- the unique constraint is commented for now because there might be multiple non-canonical blocks for the same height
+ -- the unique constraint is commented for now because there might be multiple non-canonical blocks for the same height
-- unique (height, is_canonical)
-);
+ );
create index IDX_BLOCK_HEIGHT on block (height);
create table if not exists l1_msg
@@ -38,7 +38,7 @@ create table if not exists l1_msg
message varbinary(1024) NOT NULL,
block binary(16) NOT NULL REFERENCES block,
is_transfer boolean
-);
+ );
create table if not exists rollup
(
@@ -48,7 +48,7 @@ create table if not exists rollup
time_stamp int NOT NULL,
header blob NOT NULL,
compression_block binary(16) NOT NULL REFERENCES block
-);
+ );
create table if not exists batch_body
(
@@ -69,9 +69,9 @@ create table if not exists batch
body int NOT NULL REFERENCES batch_body,
l1_proof binary(16) NOT NULL, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch
is_executed boolean NOT NULL
--- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height
+ -- the unique constraint is commented for now because there might be multiple non-canonical batches for the same height
-- unique (height, is_canonical, is_executed)
-);
+ );
create index IDX_BATCH_HASH on batch (hash);
create index IDX_BATCH_HEIGHT on batch (height, is_canonical);
create index IDX_BATCH_Block on batch (l1_proof);
@@ -85,18 +85,18 @@ create table if not exists tx
nonce int NOT NULL,
idx int NOT NULL,
body int REFERENCES batch_body
-);
+ );
create table if not exists exec_tx
(
id binary(16) PRIMARY KEY, -- batch_hash||tx_hash
created_contract_address binary(20),
receipt mediumblob,
--- commenting out the fk until synthetic transactions are also stored
+ -- commenting out the fk until synthetic transactions are also stored
-- tx binary(16) REFERENCES tx,
tx binary(16) NOT NULL,
batch int NOT NULL REFERENCES batch
-);
+ );
create index IX_EX_TX1 on exec_tx (tx);
-- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it
@@ -116,7 +116,7 @@ create table if not exists events
rel_address3 binary(20),
rel_address4 binary(20),
exec_tx_id binary(16) REFERENCES exec_tx
-);
+ );
create index IDX_AD on events (address);
create index IDX_RAD1 on events (rel_address1);
create index IDX_RAD2 on events (rel_address2);
diff --git a/go/enclave/storage/init/sqlite/README.md b/go/enclave/storage/init/sqlite/README.md
deleted file mode 100644
index cb88357b3e..0000000000
--- a/go/enclave/storage/init/sqlite/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Sqlite is used for testing.
-
-We (need to) make sure that a production node can't be running on top of sqlite.
\ No newline at end of file
diff --git a/go/enclave/storage/init/sqlite/sqlite.go b/go/enclave/storage/init/sqlite/sqlite.go
index 94c2851b5b..f566de1766 100644
--- a/go/enclave/storage/init/sqlite/sqlite.go
+++ b/go/enclave/storage/init/sqlite/sqlite.go
@@ -21,7 +21,7 @@ import (
)
const (
- tempDirName = "obscuro-persistence"
+ tempDirName = "ten-persistence"
initFile = "001_init.sql"
)
diff --git a/go/host/container/cli.go b/go/host/container/cli.go
index 4a8efe67d8..51af5b8cee 100644
--- a/go/host/container/cli.go
+++ b/go/host/container/cli.go
@@ -45,7 +45,7 @@ type HostConfigToml struct {
MetricsEnabled bool
MetricsHTTPPort uint
UseInMemoryDB bool
- LevelDBPath string
+ PostgresDBHost string
DebugNamespaceEnabled bool
BatchInterval string
MaxBatchInterval string
@@ -87,7 +87,7 @@ func ParseConfig() (*config.HostInputConfig, error) {
metricsEnabled := flag.Bool(metricsEnabledName, cfg.MetricsEnabled, flagUsageMap[metricsEnabledName])
metricsHTPPPort := flag.Uint(metricsHTTPPortName, cfg.MetricsHTTPPort, flagUsageMap[metricsHTTPPortName])
useInMemoryDB := flag.Bool(useInMemoryDBName, cfg.UseInMemoryDB, flagUsageMap[useInMemoryDBName])
- levelDBPath := flag.String(levelDBPathName, cfg.LevelDBPath, flagUsageMap[levelDBPathName])
+ postgresDBHost := flag.String(postgresDBHostName, cfg.PostgresDBHost, flagUsageMap[postgresDBHostName])
debugNamespaceEnabled := flag.Bool(debugNamespaceEnabledName, cfg.DebugNamespaceEnabled, flagUsageMap[debugNamespaceEnabledName])
batchInterval := flag.String(batchIntervalName, cfg.BatchInterval.String(), flagUsageMap[batchIntervalName])
maxBatchInterval := flag.String(maxBatchIntervalName, cfg.MaxBatchInterval.String(), flagUsageMap[maxBatchIntervalName])
@@ -133,7 +133,7 @@ func ParseConfig() (*config.HostInputConfig, error) {
cfg.MetricsEnabled = *metricsEnabled
cfg.MetricsHTTPPort = *metricsHTPPPort
cfg.UseInMemoryDB = *useInMemoryDB
- cfg.LevelDBPath = *levelDBPath
+ cfg.PostgresDBHost = *postgresDBHost
cfg.DebugNamespaceEnabled = *debugNamespaceEnabled
cfg.BatchInterval, err = time.ParseDuration(*batchInterval)
if err != nil {
@@ -210,7 +210,7 @@ func fileBasedConfig(configPath string) (*config.HostInputConfig, error) {
MetricsEnabled: tomlConfig.MetricsEnabled,
MetricsHTTPPort: tomlConfig.MetricsHTTPPort,
UseInMemoryDB: tomlConfig.UseInMemoryDB,
- LevelDBPath: tomlConfig.LevelDBPath,
+ PostgresDBHost: tomlConfig.PostgresDBHost,
BatchInterval: batchInterval,
MaxBatchInterval: maxBatchInterval,
RollupInterval: rollupInterval,
diff --git a/go/host/container/cli_flags.go b/go/host/container/cli_flags.go
index db64a90f65..72112609d4 100644
--- a/go/host/container/cli_flags.go
+++ b/go/host/container/cli_flags.go
@@ -29,7 +29,7 @@ const (
metricsEnabledName = "metricsEnabled"
metricsHTTPPortName = "metricsHTTPPort"
useInMemoryDBName = "useInMemoryDB"
- levelDBPathName = "levelDBPath"
+ postgresDBHostName = "postgresDBHost"
debugNamespaceEnabledName = "debugNamespaceEnabled"
batchIntervalName = "batchInterval"
maxBatchIntervalName = "maxBatchInterval"
@@ -69,7 +69,7 @@ func getFlagUsageMap() map[string]string {
metricsEnabledName: "Whether the metrics are enabled (Defaults to true)",
metricsHTTPPortName: "The port on which the metrics are served (Defaults to 0.0.0.0:14000)",
useInMemoryDBName: "Whether the host will use an in-memory DB rather than persist data",
- levelDBPathName: "Filepath for the levelDB persistence dir (can be empty if a throwaway file in /tmp/ is acceptable or if using InMemory DB)",
+ postgresDBHostName: "The host for the Postgres DB instance",
debugNamespaceEnabledName: "Whether the debug names is enabled",
batchIntervalName: "Duration between each batch. Can be put down as 1.0s",
maxBatchIntervalName: "Max interval between each batch, if greater than batchInterval then some empty batches will be skipped. Can be put down as 1.0s",
diff --git a/go/host/container/host_container.go b/go/host/container/host_container.go
index 2dd8367fe7..c42ffbe6d4 100644
--- a/go/host/container/host_container.go
+++ b/go/host/container/host_container.go
@@ -30,7 +30,6 @@ const (
APIVersion1 = "1.0"
APINamespaceObscuro = "obscuro"
APINamespaceEth = "eth"
- APINamespaceTenScan = "tenscan"
APINamespaceScan = "scan"
APINamespaceNetwork = "net"
APINamespaceTest = "test"
@@ -180,8 +179,10 @@ func NewHostContainer(cfg *config.HostConfig, services *host.ServicesRegistry, p
Service: clientapi.NewEthereumAPI(h, logger),
},
{
- Namespace: APINamespaceTenScan,
- Service: clientapi.NewTenScanAPI(h),
+ Namespace: APINamespaceScan,
+ Version: APIVersion1,
+ Service: clientapi.NewScanAPI(h, logger),
+ Public: true,
},
{
Namespace: APINamespaceNetwork,
@@ -195,10 +196,6 @@ func NewHostContainer(cfg *config.HostConfig, services *host.ServicesRegistry, p
Namespace: APINamespaceEth,
Service: filterAPI,
},
- {
- Namespace: APINamespaceScan,
- Service: clientapi.NewScanAPI(h, logger),
- },
})
if cfg.DebugNamespaceEnabled {
diff --git a/go/host/db/batches.go b/go/host/db/batches.go
deleted file mode 100644
index 8df76cc6b0..0000000000
--- a/go/host/db/batches.go
+++ /dev/null
@@ -1,388 +0,0 @@
-package db
-
-import (
- "bytes"
- "fmt"
- "math/big"
-
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/pkg/errors"
-
- "github.com/ten-protocol/go-ten/go/common/errutil"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ten-protocol/go-ten/go/common"
-)
-
-// DB methods relating to batches.
-
-// GetHeadBatchHeader returns the header of the node's current head batch.
-func (db *DB) GetHeadBatchHeader() (*common.BatchHeader, error) {
- headBatchHash, err := db.readHeadBatchHash()
- if err != nil {
- return nil, err
- }
- return db.readBatchHeader(*headBatchHash)
-}
-
-// GetBatchHeader returns the batch header given the hash.
-func (db *DB) GetBatchHeader(hash gethcommon.Hash) (*common.BatchHeader, error) {
- return db.readBatchHeader(hash)
-}
-
-// AddBatch adds a batch and its header to the DB
-func (db *DB) AddBatch(batch *common.ExtBatch) error {
- // We check if the batch is already stored, to avoid incrementing the total transaction count twice for one batch.
- _, err := db.GetBatchHeader(batch.Hash())
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return fmt.Errorf("4. could not retrieve batch header. Cause: %w", err)
- }
- if err == nil {
- // The batch is already stored, so we return early.
- return errutil.ErrAlreadyExists
- }
-
- b := db.kvStore.NewBatch()
-
- if err := db.writeBatchHeader(b, batch.Header); err != nil {
- return fmt.Errorf("could not write batch header. Cause: %w", err)
- }
- if err := db.writeBatch(b, batch); err != nil {
- return fmt.Errorf("could not write batch. Cause: %w", err)
- }
- if err := db.writeBatchTxHashes(b, batch.Hash(), batch.TxHashes); err != nil {
- return fmt.Errorf("could not write batch transaction hashes. Cause: %w", err)
- }
- if err := db.writeBatchHash(b, batch.Header); err != nil {
- return fmt.Errorf("could not write batch hash. Cause: %w", err)
- }
- if err := db.writeBatchSeqNo(b, batch.Header); err != nil {
- return fmt.Errorf("could not write batch hash. Cause: %w", err)
- }
- for _, txHash := range batch.TxHashes {
- if err := db.writeBatchNumber(b, batch.Header, txHash); err != nil {
- return fmt.Errorf("could not write batch number. Cause: %w", err)
- }
- }
-
- // Update the total number of transactions. There's a potential race here, but absolute accuracy of the number of
- // transactions is not required.
- currentTotal, err := db.readTotalTransactions()
- if err != nil {
- return fmt.Errorf("could not retrieve total transactions. Cause: %w", err)
- }
- newTotal := big.NewInt(0).Add(currentTotal, big.NewInt(int64(len(batch.TxHashes))))
- err = db.writeTotalTransactions(b, newTotal)
- if err != nil {
- return fmt.Errorf("could not write total transactions. Cause: %w", err)
- }
-
- // Update the head if the new height is greater than the existing one.
- headBatchHeader, err := db.GetHeadBatchHeader()
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return fmt.Errorf("could not retrieve head batch header. Cause: %w", err)
- }
- if headBatchHeader == nil || headBatchHeader.Number.Cmp(batch.Header.Number) == -1 {
- err = db.writeHeadBatchHash(b, batch.Hash())
- if err != nil {
- return fmt.Errorf("could not write new head batch hash. Cause: %w", err)
- }
- }
-
- if err = b.Write(); err != nil {
- return fmt.Errorf("could not write batch to DB. Cause: %w", err)
- }
- return nil
-}
-
-// GetBatchHash returns the hash of a batch given its number.
-func (db *DB) GetBatchHash(number *big.Int) (*gethcommon.Hash, error) {
- return db.readBatchHash(number)
-}
-
-// GetBatchTxs returns the transaction hashes of the batch with the given hash.
-func (db *DB) GetBatchTxs(batchHash gethcommon.Hash) ([]gethcommon.Hash, error) {
- return db.readBatchTxHashes(batchHash)
-}
-
-// GetBatchNumber returns the number of the batch containing the given transaction hash.
-func (db *DB) GetBatchNumber(txHash gethcommon.Hash) (*big.Int, error) {
- return db.readBatchNumber(txHash)
-}
-
-// GetTotalTransactions returns the total number of batched transactions.
-func (db *DB) GetTotalTransactions() (*big.Int, error) {
- return db.readTotalTransactions()
-}
-
-// GetBatch returns the batch with the given hash.
-func (db *DB) GetBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error) {
- db.batchReads.Inc(1)
- return db.readBatch(batchHash)
-}
-
-// GetBatchBySequenceNumber returns the batch with the given sequence number.
-func (db *DB) GetBatchBySequenceNumber(sequenceNumber *big.Int) (*common.ExtBatch, error) {
- db.batchReads.Inc(1)
- batchHash, err := db.readBatchHashBySequenceNumber(sequenceNumber)
- if err != nil {
- return nil, fmt.Errorf("could not retrieve batch hash for seqNo=%d: %w", sequenceNumber, err)
- }
- return db.GetBatch(*batchHash)
-}
-
-// GetBatchListing returns latest batches given a pagination.
-// For example, page 0, size 10 will return the latest 10 batches.
-// todo change this when the db changes - this is not super performant
-func (db *DB) GetBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
- // fetch the total batches so we can paginate
- header, err := db.GetHeadBatchHeader()
- if err != nil {
- return nil, err
- }
-
- batchesFrom := header.SequencerOrderNo.Uint64() - pagination.Offset
- batchesToInclusive := int(batchesFrom) - int(pagination.Size) + 1
- // batchesToInclusive can't go below zero
- if batchesToInclusive < 0 {
- batchesToInclusive = 0
- }
-
- var batches []common.PublicBatch
- // fetch requested batches - looping backwards from the latest batch subtracting any pagination offset
- // (e.g. front-end showing latest batches first, page 3 of size 10 would be skipping the 30 most recent batches)
- for i := batchesFrom; i >= uint64(batchesToInclusive); i-- {
- extBatch, err := db.GetBatchBySequenceNumber(big.NewInt(int64(i)))
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return nil, err
- }
- if extBatch != nil {
- batches = append(batches, common.PublicBatch{BatchHeader: *extBatch.Header, TxHashes: extBatch.TxHashes})
- }
- }
-
- return &common.BatchListingResponse{
- BatchesData: batches,
- Total: header.Number.Uint64(),
- }, nil
-}
-
-// headerKey = batchHeaderPrefix + hash
-func batchHeaderKey(hash gethcommon.Hash) []byte {
- return append(batchHeaderPrefix, hash.Bytes()...)
-}
-
-// headerKey = batchPrefix + hash
-func batchKey(hash gethcommon.Hash) []byte {
- return append(batchPrefix, hash.Bytes()...)
-}
-
-// headerKey = batchHashPrefix + number
-func batchHashKey(num *big.Int) []byte {
- return append(batchHashPrefix, []byte(num.String())...)
-}
-
-// headerKey = batchTxHashesPrefix + batch hash
-func batchTxHashesKey(hash gethcommon.Hash) []byte {
- return append(batchTxHashesPrefix, hash.Bytes()...)
-}
-
-// headerKey = batchNumberPrefix + hash
-func batchNumberKey(txHash gethcommon.Hash) []byte {
- return append(batchNumberPrefix, txHash.Bytes()...)
-}
-
-// hashKey = batchHashForSeqNoPrefix + seqNo
-func batchHashBySeqNoKey(seqNo *big.Int) []byte {
- return append(batchHashForSeqNoPrefix, []byte(seqNo.String())...)
-}
-
-// Retrieves the batch header corresponding to the hash.
-func (db *DB) readBatchHeader(hash gethcommon.Hash) (*common.BatchHeader, error) {
- data, err := db.kvStore.Get(batchHeaderKey(hash))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- header := new(common.BatchHeader)
- if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
- return nil, err
- }
- return header, nil
-}
-
-// Retrieves the hash of the head batch.
-func (db *DB) readHeadBatchHash() (*gethcommon.Hash, error) {
- value, err := db.kvStore.Get(headBatch)
- if err != nil {
- return nil, err
- }
- h := gethcommon.BytesToHash(value)
- return &h, nil
-}
-
-// Stores a batch header into the database.
-func (db *DB) writeBatchHeader(w ethdb.KeyValueWriter, header *common.BatchHeader) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- key := batchHeaderKey(header.Hash())
-
- return w.Put(key, data)
-}
-
-// Stores the head batch header hash into the database.
-func (db *DB) writeHeadBatchHash(w ethdb.KeyValueWriter, val gethcommon.Hash) error {
- err := w.Put(headBatch, val.Bytes())
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stores a batch's hash in the database, keyed by the batch's number.
-func (db *DB) writeBatchHash(w ethdb.KeyValueWriter, header *common.BatchHeader) error {
- key := batchHashKey(header.Number)
-
- return w.Put(key, header.Hash().Bytes())
-}
-
-// Stores a batch's hash in the database, keyed by the batch's sequencer number.
-func (db *DB) writeBatchSeqNo(w ethdb.KeyValueWriter, header *common.BatchHeader) error {
- key := batchHashBySeqNoKey(header.SequencerOrderNo)
-
- return w.Put(key, header.Hash().Bytes())
-}
-
-// Retrieves the hash for the batch with the given number..
-func (db *DB) readBatchHash(number *big.Int) (*gethcommon.Hash, error) {
- data, err := db.kvStore.Get(batchHashKey(number))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- hash := gethcommon.BytesToHash(data)
- return &hash, nil
-}
-
-// Returns the transaction hashes in the batch with the given hash.
-func (db *DB) readBatchTxHashes(batchHash common.L2BatchHash) ([]gethcommon.Hash, error) {
- data, err := db.kvStore.Get(batchTxHashesKey(batchHash))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
-
- var txHashes []gethcommon.Hash
- if err = rlp.Decode(bytes.NewReader(data), &txHashes); err != nil {
- return nil, err
- }
-
- return txHashes, nil
-}
-
-// Stores a batch's number in the database, keyed by the hash of a transaction in that rollup.
-func (db *DB) writeBatchNumber(w ethdb.KeyValueWriter, header *common.BatchHeader, txHash gethcommon.Hash) error {
- key := batchNumberKey(txHash)
-
- return w.Put(key, header.Number.Bytes())
-}
-
-// Writes the transaction hashes against the batch containing them.
-func (db *DB) writeBatchTxHashes(w ethdb.KeyValueWriter, batchHash common.L2BatchHash, txHashes []gethcommon.Hash) error {
- data, err := rlp.EncodeToBytes(txHashes)
- if err != nil {
- return err
- }
- key := batchTxHashesKey(batchHash)
-
- return w.Put(key, data)
-}
-
-// Retrieves the number of the batch containing the transaction with the given hash.
-func (db *DB) readBatchNumber(txHash gethcommon.Hash) (*big.Int, error) {
- data, err := db.kvStore.Get(batchNumberKey(txHash))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- return big.NewInt(0).SetBytes(data), nil
-}
-
-func (db *DB) readBatchHashBySequenceNumber(seqNum *big.Int) (*gethcommon.Hash, error) {
- data, err := db.kvStore.Get(batchHashBySeqNoKey(seqNum))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- h := gethcommon.BytesToHash(data)
- return &h, nil
-}
-
-// Retrieves the total number of rolled-up transactions - returns 0 if no tx count is found
-func (db *DB) readTotalTransactions() (*big.Int, error) {
- data, err := db.kvStore.Get(totalTransactionsKey)
- if err != nil {
- if errors.Is(err, errutil.ErrNotFound) {
- return big.NewInt(0), nil
- }
- return nil, err
- }
- if len(data) == 0 {
- return big.NewInt(0), nil
- }
- return big.NewInt(0).SetBytes(data), nil
-}
-
-// Stores the total number of transactions in the database.
-func (db *DB) writeTotalTransactions(w ethdb.KeyValueWriter, newTotal *big.Int) error {
- err := w.Put(totalTransactionsKey, newTotal.Bytes())
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stores a batch into the database.
-func (db *DB) writeBatch(w ethdb.KeyValueWriter, batch *common.ExtBatch) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(batch)
- if err != nil {
- return err
- }
- key := batchKey(batch.Hash())
- if err := w.Put(key, data); err != nil {
- return err
- }
- db.batchWrites.Inc(1)
- return nil
-}
-
-// Retrieves the batch corresponding to the hash.
-func (db *DB) readBatch(hash gethcommon.Hash) (*common.ExtBatch, error) {
- data, err := db.kvStore.Get(batchKey(hash))
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- batch := new(common.ExtBatch)
- if err := rlp.Decode(bytes.NewReader(data), batch); err != nil {
- return nil, err
- }
- return batch, nil
-}
diff --git a/go/host/db/batches_test.go b/go/host/db/batches_test.go
deleted file mode 100644
index 54289b6061..0000000000
--- a/go/host/db/batches_test.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package db
-
-import (
- "errors"
- "math/big"
- "testing"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ten-protocol/go-ten/go/common/errutil"
-
- "github.com/ten-protocol/go-ten/go/common"
-)
-
-func TestCanStoreAndRetrieveBatchHeader(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- batch := common.ExtBatch{
- Header: &header,
- }
-
- err := db.AddBatch(&batch)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchHeader, err := db.GetBatchHeader(header.Hash())
- if err != nil {
- t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
- }
- if batchHeader.Number.Cmp(header.Number) != 0 {
- t.Errorf("batch header was not stored correctly")
- }
-}
-
-func TestUnknownBatchHeaderReturnsNotFound(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := types.Header{}
-
- _, err := db.GetBatchHeader(header.Hash())
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("did not store batch header but was able to retrieve it")
- }
-}
-
-func TestHigherNumberBatchBecomesBatchHeader(t *testing.T) { //nolint:dupl
- db := NewInMemoryDB(nil, nil)
- headerOne := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- batchOne := common.ExtBatch{
- Header: &headerOne,
- }
-
- err := db.AddBatch(&batchOne)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- headerTwo := common.BatchHeader{
- // We give the second header a higher number, making it the head.
- Number: big.NewInt(0).Add(headerOne.Number, big.NewInt(1)),
- }
- batchTwo := common.ExtBatch{
- Header: &headerTwo,
- }
-
- err = db.AddBatch(&batchTwo)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchHeader, err := db.GetHeadBatchHeader()
- if err != nil {
- t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
- }
- if batchHeader.Number.Cmp(headerTwo.Number) != 0 {
- t.Errorf("head batch was not set correctly")
- }
-}
-
-func TestLowerNumberBatchDoesNotBecomeBatchHeader(t *testing.T) { //nolint:dupl
- db := NewInMemoryDB(nil, nil)
- headerOne := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- batchOne := common.ExtBatch{
- Header: &headerOne,
- }
-
- err := db.AddBatch(&batchOne)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- headerTwo := common.BatchHeader{
- // We give the second header a higher number, making it the head.
- Number: big.NewInt(0).Sub(headerOne.Number, big.NewInt(1)),
- }
- batchTwo := common.ExtBatch{
- Header: &headerTwo,
- }
-
- err = db.AddBatch(&batchTwo)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchHeader, err := db.GetHeadBatchHeader()
- if err != nil {
- t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
- }
- if batchHeader.Number.Cmp(headerOne.Number) != 0 {
- t.Errorf("head batch was not set correctly")
- }
-}
-
-func TestHeadBatchHeaderIsNotSetInitially(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
-
- _, err := db.GetHeadBatchHeader()
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("head batch was set, but no batchs had been written")
- }
-}
-
-func TestCanRetrieveBatchHashByNumber(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- batch := common.ExtBatch{
- Header: &header,
- }
-
- err := db.AddBatch(&batch)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchHash, err := db.GetBatchHash(header.Number)
- if err != nil {
- t.Errorf("stored batch but could not retrieve headers hash by number. Cause: %s", err)
- }
- if *batchHash != header.Hash() {
- t.Errorf("batch hash was not stored correctly against number")
- }
-}
-
-func TestUnknownBatchNumberReturnsNotFound(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := types.Header{}
-
- _, err := db.GetBatchHash(header.Number)
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("did not store batch hash but was able to retrieve it")
- }
-}
-
-func TestCanRetrieveBatchNumberByTxHash(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- txHash := gethcommon.BytesToHash([]byte("magicString"))
- batch := common.ExtBatch{
- Header: &header,
- TxHashes: []gethcommon.Hash{txHash},
- }
-
- err := db.AddBatch(&batch)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchNumber, err := db.GetBatchNumber(txHash)
- if err != nil {
- t.Errorf("stored batch but could not retrieve headers number by transaction hash. Cause: %s", err)
- }
- if batchNumber.Cmp(header.Number) != 0 {
- t.Errorf("batch number was not stored correctly against transaction hash")
- }
-}
-
-func TestUnknownBatchTxHashReturnsNotFound(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
-
- _, err := db.GetBatchNumber(gethcommon.BytesToHash([]byte("magicString")))
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("did not store batch number but was able to retrieve it")
- }
-}
-
-func TestCanRetrieveBatchTransactions(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- txHashes := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
- batch := common.ExtBatch{
- Header: &header,
- TxHashes: txHashes,
- }
-
- err := db.AddBatch(&batch)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- batchTxs, err := db.GetBatchTxs(header.Hash())
- if err != nil {
- t.Errorf("stored batch but could not retrieve headers transactions. Cause: %s", err)
- }
- if len(batchTxs) != len(txHashes) {
- t.Errorf("batch transactions were not stored correctly")
- }
- for idx, batchTx := range batchTxs {
- if batchTx != txHashes[idx] {
- t.Errorf("batch transactions were not stored correctly")
- }
- }
-}
-
-func TestTransactionsForUnknownBatchReturnsNotFound(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
-
- _, err := db.GetBatchNumber(gethcommon.BytesToHash([]byte("magicString")))
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("did not store batch number but was able to retrieve it")
- }
-}
-
-func TestCanRetrieveTotalNumberOfTransactions(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- headerOne := common.BatchHeader{
- Number: big.NewInt(batchNumber),
- }
- txHashesOne := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
- batchOne := common.ExtBatch{
- Header: &headerOne,
- TxHashes: txHashesOne,
- }
-
- err := db.AddBatch(&batchOne)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- headerTwo := common.BatchHeader{
- Number: big.NewInt(batchNumber + 1),
- }
- txHashesTwo := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringThree")), gethcommon.BytesToHash([]byte("magicStringFour"))}
- batchTwo := common.ExtBatch{
- Header: &headerTwo,
- TxHashes: txHashesTwo,
- }
-
- err = db.AddBatch(&batchTwo)
- if err != nil {
- t.Errorf("could not store batch. Cause: %s", err)
- }
-
- totalTxs, err := db.GetTotalTransactions()
- if err != nil {
- t.Errorf("was not able to read total number of transactions. Cause: %s", err)
- }
-
- if int(totalTxs.Int64()) != len(txHashesOne)+len(txHashesTwo) {
- t.Errorf("total number of batch transactions was not stored correctly")
- }
-}
-
-// todo (#718) - add tests of writing and reading extbatches.
diff --git a/go/host/db/blocks.go b/go/host/db/blocks.go
deleted file mode 100644
index a8596bcb2f..0000000000
--- a/go/host/db/blocks.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package db
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/big"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ten-protocol/go-ten/go/common"
- "github.com/ten-protocol/go-ten/go/common/errutil"
-)
-
-// DB methods relating to block headers.
-
-// GetBlockByHash returns the block header given the hash.
-func (db *DB) GetBlockByHash(hash gethcommon.Hash) (*types.Header, error) {
- return db.readBlock(db.kvStore, blockHashKey(hash))
-}
-
-// GetBlockByHeight returns the block header given the height
-func (db *DB) GetBlockByHeight(height *big.Int) (*types.Header, error) {
- return db.readBlock(db.kvStore, blockNumberKey(height))
-}
-
-// AddBlock adds a types.Header to the known headers
-func (db *DB) AddBlock(header *types.Header) error {
- b := db.kvStore.NewBatch()
- err := db.writeBlockByHash(header)
- if err != nil {
- return fmt.Errorf("could not write block header. Cause: %w", err)
- }
-
- err = db.writeBlockByHeight(header)
- if err != nil {
- return fmt.Errorf("could not write block header. Cause: %w", err)
- }
-
- // Update the tip if the new height is greater than the existing one.
- tipBlockHeader, err := db.GetBlockAtTip()
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return fmt.Errorf("could not retrieve block header at tip. Cause: %w", err)
- }
- if tipBlockHeader == nil || tipBlockHeader.Number.Cmp(header.Number) == -1 {
- err = db.writeBlockAtTip(b, header.Hash())
- if err != nil {
- return fmt.Errorf("could not write new block hash at tip. Cause: %w", err)
- }
- }
-
- if err = b.Write(); err != nil {
- return fmt.Errorf("could not write batch to DB. Cause: %w", err)
- }
-
- return nil
-}
-
-// GetBlockListing returns latest L1 blocks given the pagination.
-// For example, page 0, size 10 will return the latest 10 blocks.
-func (db *DB) GetBlockListing(pagination *common.QueryPagination) (*common.BlockListingResponse, error) {
- // fetch the total blocks so we can paginate
- tipHeader, err := db.GetBlockAtTip()
- if err != nil {
- return nil, err
- }
-
- blocksFrom := tipHeader.Number.Uint64() - pagination.Offset
- blocksToInclusive := int(blocksFrom) - int(pagination.Size) + 1
- // if blocksToInclusive would be negative, set it to 0
- if blocksToInclusive < 0 {
- blocksToInclusive = 0
- }
-
- // fetch requested batches
- var blocks []common.PublicBlock
- for i := blocksFrom; i > uint64(blocksToInclusive); i-- {
- header, err := db.GetBlockByHeight(big.NewInt(int64(i)))
- if err != nil {
- return nil, err
- }
-
- // check if the block has a rollup
- rollup, err := db.GetRollupHeaderByBlock(header.Hash())
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return nil, err
- }
-
- listedBlock := common.PublicBlock{BlockHeader: *header}
- if rollup != nil {
- listedBlock.RollupHash = rollup.Hash()
- fmt.Println("added at block: ", header.Number.Int64(), " - ", listedBlock.RollupHash)
- }
- blocks = append(blocks, listedBlock)
- }
-
- return &common.BlockListingResponse{
- BlocksData: blocks,
- Total: tipHeader.Number.Uint64(),
- }, nil
-}
-
-// GetBlockAtTip returns the block at current Head or Tip
-func (db *DB) GetBlockAtTip() (*types.Header, error) {
- value, err := db.kvStore.Get(blockHeadedAtTip)
- if err != nil {
- return nil, err
- }
- h := gethcommon.BytesToHash(value)
-
- return db.GetBlockByHash(h)
-}
-
-// Stores the hash of the block at tip
-func (db *DB) writeBlockAtTip(w ethdb.KeyValueWriter, hash gethcommon.Hash) error {
- err := w.Put(blockHeadedAtTip, hash.Bytes())
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stores a block header into the database using the hash as key
-func (db *DB) writeBlockByHash(header *types.Header) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- key := blockHashKey(header.Hash())
- if err := db.kvStore.Put(key, data); err != nil {
- return err
- }
- db.blockWrites.Inc(1)
- return nil
-}
-
-// Stores a block header into the database using the height as key
-func (db *DB) writeBlockByHeight(header *types.Header) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- key := blockNumberKey(header.Number)
- return db.kvStore.Put(key, data)
-}
-
-// Retrieves the block header corresponding to the key.
-func (db *DB) readBlock(r ethdb.KeyValueReader, key []byte) (*types.Header, error) {
- data, err := r.Get(key)
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- header := new(types.Header)
- if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
- return nil, err
- }
- db.blockReads.Inc(1)
- return header, nil
-}
-
-// headerKey = blockNumberHeaderPrefix + hash
-func blockNumberKey(height *big.Int) []byte {
- return append(blockNumberHeaderPrefix, height.Bytes()...)
-}
-
-// headerKey = blockHeaderPrefix + hash
-func blockHashKey(hash gethcommon.Hash) []byte {
- return append(blockHeaderPrefix, hash.Bytes()...)
-}
diff --git a/go/host/db/blocks_test.go b/go/host/db/blocks_test.go
deleted file mode 100644
index 0649ebca77..0000000000
--- a/go/host/db/blocks_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package db
-
-import (
- "errors"
- "math/big"
- "testing"
-
- "github.com/ten-protocol/go-ten/go/common/errutil"
-
- "github.com/ethereum/go-ethereum/core/types"
-)
-
-// An arbitrary number to put in the header, to check that the header is retrieved correctly from the DB.
-const batchNumber = 777
-
-func TestCanStoreAndRetrieveBlockHeader(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := types.Header{
- Number: big.NewInt(batchNumber),
- }
- err := db.AddBlock(&header)
- if err != nil {
- t.Errorf("could not add block header. Cause: %s", err)
- }
-
- blockHeader, err := db.GetBlockByHash(header.Hash())
- if err != nil {
- t.Errorf("stored block header but could not retrieve it. Cause: %s", err)
- }
- if blockHeader.Number.Cmp(header.Number) != 0 {
- t.Errorf("block header was not stored correctly")
- }
-}
-
-func TestUnknownBlockHeaderReturnsNotFound(t *testing.T) {
- db := NewInMemoryDB(nil, nil)
- header := types.Header{}
-
- _, err := db.GetBlockByHash(header.Hash())
- if !errors.Is(err, errutil.ErrNotFound) {
- t.Errorf("did not store block header but was able to retrieve it")
- }
-}
diff --git a/go/host/db/hostdb.go b/go/host/db/hostdb.go
deleted file mode 100644
index ee8fda2d49..0000000000
--- a/go/host/db/hostdb.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package db
-
-import (
- "fmt"
- "os"
-
- "github.com/ten-protocol/go-ten/go/config"
-
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/leveldb"
- gethlog "github.com/ethereum/go-ethereum/log"
- gethmetrics "github.com/ethereum/go-ethereum/metrics"
- "github.com/ten-protocol/go-ten/go/common/gethdb"
-)
-
-// Schema keys, in alphabetical order.
-var (
- blockHeaderPrefix = []byte("b")
- blockNumberHeaderPrefix = []byte("bnh")
- batchHeaderPrefix = []byte("ba")
- batchHashPrefix = []byte("bh")
- batchNumberPrefix = []byte("bn")
- batchPrefix = []byte("bp")
- batchHashForSeqNoPrefix = []byte("bs")
- batchTxHashesPrefix = []byte("bt")
- headBatch = []byte("hb")
- totalTransactionsKey = []byte("t")
- rollupHeaderPrefix = []byte("rh")
- rollupHeaderBlockPrefix = []byte("rhb")
- tipRollupHash = []byte("tr")
- blockHeadedAtTip = []byte("bht")
-)
-
-// DB allows to access the nodes public nodeDB
-type DB struct {
- kvStore ethdb.KeyValueStore
- logger gethlog.Logger
- batchWrites gethmetrics.Gauge
- batchReads gethmetrics.Gauge
- blockWrites gethmetrics.Gauge
- blockReads gethmetrics.Gauge
-}
-
-// Stop is especially important for graceful shutdown of LevelDB as it may flush data to disk that is currently in cache
-func (db *DB) Stop() error {
- db.logger.Info("Closing the host DB.")
- err := db.kvStore.Close()
- if err != nil {
- return err
- }
- return nil
-}
-
-func CreateDBFromConfig(cfg *config.HostConfig, regMetrics gethmetrics.Registry, logger gethlog.Logger) (*DB, error) {
- if err := validateDBConf(cfg); err != nil {
- return nil, err
- }
- if cfg.UseInMemoryDB {
- logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...")
- return NewInMemoryDB(regMetrics, logger), nil
- }
- return NewLevelDBBackedDB(cfg.LevelDBPath, regMetrics, logger)
-}
-
-func validateDBConf(cfg *config.HostConfig) error {
- if cfg.UseInMemoryDB && cfg.LevelDBPath != "" {
- return fmt.Errorf("useInMemoryDB=true so levelDB will not be used and no path is needed, but levelDBPath=%s", cfg.LevelDBPath)
- }
- return nil
-}
-
-// NewInMemoryDB returns a new instance of the Node DB
-func NewInMemoryDB(regMetrics gethmetrics.Registry, logger gethlog.Logger) *DB {
- return newDB(gethdb.NewMemDB(), regMetrics, logger)
-}
-
-// NewLevelDBBackedDB creates a persistent DB for the host, if dbPath == "" it will generate a temp file
-func NewLevelDBBackedDB(dbPath string, regMetrics gethmetrics.Registry, logger gethlog.Logger) (*DB, error) {
- var err error
- if dbPath == "" {
- // todo (#1618) - we should remove this option before prod, if you want a temp DB it should be wired in via the config
- dbPath, err = os.MkdirTemp("", "leveldb_*")
- if err != nil {
- return nil, fmt.Errorf("could not create temp leveldb directory - %w", err)
- }
- logger.Warn("dbPath was empty, created temp dir for persistence", "dbPath", dbPath)
- }
- // determine if a db file already exists, we don't want to overwrite it
- _, err = os.Stat(dbPath)
- dbDesc := "new"
- if err == nil {
- dbDesc = "existing"
- }
-
- // todo (#1618) - these should be configs
- cache := 128
- handles := 128
- db, err := leveldb.New(dbPath, cache, handles, "host", false)
- if err != nil {
- return nil, fmt.Errorf("could not create leveldb - %w", err)
- }
- logger.Info(fmt.Sprintf("Opened %s level db dir at %s", dbDesc, dbPath))
- return newDB(&ObscuroLevelDB{db: db}, regMetrics, logger), nil
-}
-
-func newDB(kvStore ethdb.KeyValueStore, regMetrics gethmetrics.Registry, logger gethlog.Logger) *DB {
- return &DB{
- kvStore: kvStore,
- logger: logger,
- batchWrites: gethmetrics.NewRegisteredGauge("host/db/batch/writes", regMetrics),
- batchReads: gethmetrics.NewRegisteredGauge("host/db/batch/reads", regMetrics),
- blockWrites: gethmetrics.NewRegisteredGauge("host/db/block/writes", regMetrics),
- blockReads: gethmetrics.NewRegisteredGauge("host/db/block/reads", regMetrics),
- }
-}
diff --git a/go/host/db/leveldb.go b/go/host/db/leveldb.go
deleted file mode 100644
index 49a3ec6cf3..0000000000
--- a/go/host/db/leveldb.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package db
-
-import (
- "errors"
-
- "github.com/ethereum/go-ethereum/ethdb"
- ethldb "github.com/ethereum/go-ethereum/ethdb/leveldb"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/ten-protocol/go-ten/go/common/errutil"
-)
-
-// ObscuroLevelDB is a very thin wrapper around a level DB database for compatibility with our internal interfaces
-// In particular, it overrides the Get method to return the obscuro ErrNotFound
-type ObscuroLevelDB struct {
- db *ethldb.Database
-}
-
-func (o *ObscuroLevelDB) NewBatchWithSize(int) ethdb.Batch {
- // TODO implement me
- panic("implement me")
-}
-
-func (o *ObscuroLevelDB) NewSnapshot() (ethdb.Snapshot, error) {
- // TODO implement me
- panic("implement me")
-}
-
-func (o *ObscuroLevelDB) Has(key []byte) (bool, error) {
- return o.db.Has(key)
-}
-
-// Get is overridden here to return our internal NotFound error
-func (o *ObscuroLevelDB) Get(key []byte) ([]byte, error) {
- d, err := o.db.Get(key)
- if err != nil {
- if errors.Is(err, leveldb.ErrNotFound) {
- return nil, errutil.ErrNotFound
- }
- return nil, err
- }
- return d, nil
-}
-
-func (o *ObscuroLevelDB) Put(key []byte, value []byte) error {
- return o.db.Put(key, value)
-}
-
-func (o *ObscuroLevelDB) Delete(key []byte) error {
- return o.db.Delete(key)
-}
-
-func (o *ObscuroLevelDB) NewBatch() ethdb.Batch {
- return o.db.NewBatch()
-}
-
-func (o *ObscuroLevelDB) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- return o.db.NewIterator(prefix, start)
-}
-
-func (o *ObscuroLevelDB) Stat(property string) (string, error) {
- return o.db.Stat(property)
-}
-
-func (o *ObscuroLevelDB) Compact(start []byte, limit []byte) error {
- return o.db.Compact(start, limit)
-}
-
-func (o *ObscuroLevelDB) Close() error {
- return o.db.Close()
-}
diff --git a/go/host/db/rollups.go b/go/host/db/rollups.go
deleted file mode 100644
index 41ac34b80d..0000000000
--- a/go/host/db/rollups.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package db
-
-import (
- "bytes"
- "fmt"
-
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/pkg/errors"
- "github.com/ten-protocol/go-ten/go/common"
- "github.com/ten-protocol/go-ten/go/common/errutil"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
-)
-
-// DB methods relating to rollup transactions.
-
-// AddRollupHeader adds a rollup to the DB
-func (db *DB) AddRollupHeader(rollup *common.ExtRollup, block *common.L1Block) error {
- // Check if the Header is already stored
- _, err := db.GetRollupHeader(rollup.Hash())
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return fmt.Errorf("could not retrieve rollup header. Cause: %w", err)
- }
- if err == nil {
- // The rollup is already stored, so we return early.
- return errutil.ErrAlreadyExists
- }
-
- b := db.kvStore.NewBatch()
-
- if err := db.writeRollupHeader(b, rollup.Header); err != nil {
- return fmt.Errorf("could not write rollup header. Cause: %w", err)
- }
-
- if err := db.writeRollupByBlockHash(b, rollup.Header, block.Hash()); err != nil {
- return fmt.Errorf("could not write rollup block. Cause: %w", err)
- }
-
- // Update the tip if the new height is greater than the existing one.
- tipRollupHeader, err := db.GetTipRollupHeader()
- if err != nil && !errors.Is(err, errutil.ErrNotFound) {
- return fmt.Errorf("could not retrieve rollup header at tip. Cause: %w", err)
- }
- if tipRollupHeader == nil || tipRollupHeader.LastBatchSeqNo < rollup.Header.LastBatchSeqNo {
- err = db.writeTipRollupHeader(b, rollup.Hash())
- if err != nil {
- return fmt.Errorf("could not write new rollup hash at tip. Cause: %w", err)
- }
- }
-
- if err = b.Write(); err != nil {
- return fmt.Errorf("could not write batch to DB. Cause: %w", err)
- }
- return nil
-}
-
-// GetRollupHeader returns the rollup with the given hash.
-func (db *DB) GetRollupHeader(hash gethcommon.Hash) (*common.RollupHeader, error) {
- return db.readRollupHeader(rollupHashKey(hash))
-}
-
-// GetTipRollupHeader returns the header of the node's current tip rollup.
-func (db *DB) GetTipRollupHeader() (*common.RollupHeader, error) {
- headBatchHash, err := db.readTipRollupHash()
- if err != nil {
- return nil, err
- }
- return db.readRollupHeader(rollupHashKey(*headBatchHash))
-}
-
-// GetRollupHeaderByBlock returns the rollup for the given block
-func (db *DB) GetRollupHeaderByBlock(blockHash gethcommon.Hash) (*common.RollupHeader, error) {
- return db.readRollupHeader(rollupBlockKey(blockHash))
-}
-
-// Retrieves the rollup corresponding to the hash.
-func (db *DB) readRollupHeader(key []byte) (*common.RollupHeader, error) {
- data, err := db.kvStore.Get(key)
- if err != nil {
- return nil, err
- }
- if len(data) == 0 {
- return nil, errutil.ErrNotFound
- }
- rollupHeader := new(common.RollupHeader)
- if err := rlp.Decode(bytes.NewReader(data), rollupHeader); err != nil {
- return nil, err
- }
- return rollupHeader, nil
-}
-
-// Stores a rollup header into the database.
-func (db *DB) writeRollupHeader(w ethdb.KeyValueWriter, header *common.RollupHeader) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- key := rollupHashKey(header.Hash())
-
- return w.Put(key, data)
-}
-
-// Retrieves the hash of the rollup at tip
-func (db *DB) readTipRollupHash() (*gethcommon.Hash, error) {
- value, err := db.kvStore.Get(tipRollupHash)
- if err != nil {
- return nil, err
- }
- h := gethcommon.BytesToHash(value)
- return &h, nil
-}
-
-// Stores the tip rollup header hash into the database
-func (db *DB) writeTipRollupHeader(w ethdb.KeyValueWriter, val gethcommon.Hash) error {
- err := w.Put(tipRollupHash, val.Bytes())
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stores if a rollup is in a block
-func (db *DB) writeRollupByBlockHash(w ethdb.KeyValueWriter, header *common.RollupHeader, blockHash gethcommon.Hash) error {
- // Write the encoded header
- data, err := rlp.EncodeToBytes(header)
- if err != nil {
- return err
- }
- key := rollupBlockKey(blockHash)
-
- return w.Put(key, data)
-}
-
-// rollupHashKey = rollupHeaderPrefix + hash
-func rollupHashKey(hash gethcommon.Hash) []byte {
- return append(rollupHeaderPrefix, hash.Bytes()...)
-}
-
-// rollupBlockKey = rollupHeaderBlockPrefix + hash
-func rollupBlockKey(hash gethcommon.Hash) []byte {
- return append(rollupHeaderBlockPrefix, hash.Bytes()...)
-}
diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go
index fa2a0975bd..8584a1dc43 100644
--- a/go/host/enclave/guardian.go
+++ b/go/host/enclave/guardian.go
@@ -7,6 +7,8 @@ import (
"sync"
"time"
+ "github.com/ten-protocol/go-ten/go/host/storage"
+
"github.com/ten-protocol/go-ten/go/common/stopcontrol"
gethcommon "github.com/ethereum/go-ethereum/common"
@@ -22,7 +24,6 @@ import (
"github.com/ten-protocol/go-ten/go/common/log"
"github.com/ten-protocol/go-ten/go/common/retry"
"github.com/ten-protocol/go-ten/go/config"
- "github.com/ten-protocol/go-ten/go/host/db"
"github.com/ten-protocol/go-ten/go/host/l1"
)
@@ -55,8 +56,8 @@ type Guardian struct {
state *StateTracker // state machine that tracks our view of the enclave's state
enclaveClient common.Enclave
- sl guardianServiceLocator
- db *db.DB
+ sl guardianServiceLocator
+ storage storage.Storage
submitDataLock sync.Mutex // we only submit one block, batch or transaction to enclave at a time
@@ -74,7 +75,7 @@ type Guardian struct {
enclaveID *common.EnclaveID
}
-func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator guardianServiceLocator, enclaveClient common.Enclave, db *db.DB, interrupter *stopcontrol.StopControl, logger gethlog.Logger) *Guardian {
+func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator guardianServiceLocator, enclaveClient common.Enclave, storage storage.Storage, interrupter *stopcontrol.StopControl, logger gethlog.Logger) *Guardian {
return &Guardian{
hostData: hostData,
state: NewStateTracker(logger),
@@ -86,7 +87,7 @@ func NewGuardian(cfg *config.HostConfig, hostData host.Identity, serviceLocator
l1StartHash: cfg.L1StartHash,
maxRollupSize: cfg.MaxRollupSize,
blockTime: cfg.L1BlockTime,
- db: db,
+ storage: storage,
hostInterrupter: interrupter,
logger: logger,
}
@@ -442,8 +443,6 @@ func (g *Guardian) submitL1Block(block *common.L1Block, isLatest bool) (bool, er
g.state.OnProcessedBlock(block.Hash())
g.processL1BlockTransactions(block)
- // todo (@matt) this should not be here, it is only used by the RPC API server for batch data which will eventually just use L1 repo
- err = g.db.AddBlock(block.Header())
if err != nil {
return false, fmt.Errorf("submitted block to enclave but could not store the block processing result. Cause: %w", err)
}
@@ -469,7 +468,13 @@ func (g *Guardian) processL1BlockTransactions(block *common.L1Block) {
if err != nil {
g.logger.Error("Could not decode rollup.", log.ErrKey, err)
}
- err = g.db.AddRollupHeader(r, block)
+
+ metaData, err := g.enclaveClient.GetRollupData(r.Header.Hash())
+ if err != nil {
+ g.logger.Error("Could not fetch rollup metadata from enclave.", log.ErrKey, err)
+ } else {
+ err = g.storage.AddRollup(r, metaData, block)
+ }
if err != nil {
if errors.Is(err, errutil.ErrAlreadyExists) {
g.logger.Info("Rollup already stored", log.RollupHashKey, r.Hash())
@@ -477,6 +482,11 @@ func (g *Guardian) processL1BlockTransactions(block *common.L1Block) {
g.logger.Error("Could not store rollup.", log.ErrKey, err)
}
}
+ // TODO (@will) this should be removed and pulled from the L1
+ err = g.storage.AddBlock(block.Header(), r.Header.Hash())
+ if err != nil {
+ g.logger.Error("Could not add block to host db.", log.ErrKey, err)
+ }
}
if len(contractAddressTxs) > 0 {
diff --git a/go/host/host.go b/go/host/host.go
index 4afbaaf0d7..e846e09420 100644
--- a/go/host/host.go
+++ b/go/host/host.go
@@ -19,8 +19,8 @@ import (
"github.com/ten-protocol/go-ten/go/config"
"github.com/ten-protocol/go-ten/go/ethadapter"
"github.com/ten-protocol/go-ten/go/ethadapter/mgmtcontractlib"
- "github.com/ten-protocol/go-ten/go/host/db"
"github.com/ten-protocol/go-ten/go/host/events"
+ "github.com/ten-protocol/go-ten/go/host/storage"
"github.com/ten-protocol/go-ten/go/responses"
"github.com/ten-protocol/go-ten/go/wallet"
"github.com/ten-protocol/go-ten/lib/gethfork/rpc"
@@ -39,7 +39,7 @@ type host struct {
// ignore incoming requests
stopControl *stopcontrol.StopControl
- db *db.DB // Stores the host's publicly-available data
+ storage storage.Storage // Stores the host's publicly-available data
logger gethlog.Logger
@@ -59,10 +59,7 @@ func (bl batchListener) HandleBatch(batch *common.ExtBatch) {
}
func NewHost(config *config.HostConfig, hostServices *ServicesRegistry, p2p hostcommon.P2PHostService, ethClient ethadapter.EthClient, l1Repo hostcommon.L1RepoService, enclaveClients []common.Enclave, ethWallet wallet.Wallet, mgmtContractLib mgmtcontractlib.MgmtContractLib, logger gethlog.Logger, regMetrics gethmetrics.Registry) hostcommon.Host {
- database, err := db.CreateDBFromConfig(config, regMetrics, logger)
- if err != nil {
- logger.Crit("unable to create database for host", log.ErrKey, err)
- }
+ hostStorage := storage.NewHostStorageFromConfig(config, logger)
hostIdentity := hostcommon.NewIdentity(config)
host := &host{
// config
@@ -73,7 +70,7 @@ func NewHost(config *config.HostConfig, hostServices *ServicesRegistry, p2p host
services: hostServices,
// Initialize the host DB
- db: database,
+ storage: hostStorage,
logger: logger,
metricRegistry: regMetrics,
@@ -91,12 +88,12 @@ func NewHost(config *config.HostConfig, hostServices *ServicesRegistry, p2p host
enclHostID.IsSequencer = false
enclHostID.IsGenesis = false
}
- enclGuardian := enclave.NewGuardian(config, enclHostID, hostServices, enclClient, database, host.stopControl, logger)
+ enclGuardian := enclave.NewGuardian(config, enclHostID, hostServices, enclClient, hostStorage, host.stopControl, logger)
enclGuardians = append(enclGuardians, enclGuardian)
}
enclService := enclave.NewService(hostIdentity, hostServices, enclGuardians, logger)
- l2Repo := l2.NewBatchRepository(config, hostServices, database, logger)
+ l2Repo := l2.NewBatchRepository(config, hostServices, hostStorage, logger)
subsService := events.NewLogEventManager(hostServices, logger)
l2Repo.Subscribe(batchListener{newHeads: host.newHeads})
@@ -154,10 +151,6 @@ func (h *host) Config() *config.HostConfig {
return h.config
}
-func (h *host) DB() *db.DB {
- return h.db
-}
-
func (h *host) EnclaveClient() common.Enclave {
return h.services.Enclaves().GetEnclaveClient()
}
@@ -196,7 +189,7 @@ func (h *host) Stop() error {
}
}
- if err := h.db.Stop(); err != nil {
+ if err := h.storage.Close(); err != nil {
h.logger.Error("Failed to stop DB", log.ErrKey, err)
}
@@ -246,6 +239,10 @@ func (h *host) ObscuroConfig() (*common.ObscuroNetworkInfo, error) {
}, nil
}
+func (h *host) Storage() storage.Storage {
+ return h.storage
+}
+
func (h *host) NewHeadsChan() chan *common.BatchHeader {
return h.newHeads
}
diff --git a/go/host/l2/batchrepository.go b/go/host/l2/batchrepository.go
index c6a7daa077..ed7cfee642 100644
--- a/go/host/l2/batchrepository.go
+++ b/go/host/l2/batchrepository.go
@@ -2,6 +2,7 @@ package l2
import (
"errors"
+ "fmt"
"math/big"
"sync"
"sync/atomic"
@@ -14,7 +15,7 @@ import (
"github.com/ten-protocol/go-ten/go/common/log"
"github.com/ten-protocol/go-ten/go/common/subscription"
"github.com/ten-protocol/go-ten/go/config"
- "github.com/ten-protocol/go-ten/go/host/db"
+ "github.com/ten-protocol/go-ten/go/host/storage"
)
const (
@@ -37,7 +38,7 @@ type Repository struct {
batchSubscribers *subscription.Manager[host.L2BatchHandler]
sl batchRepoServiceLocator
- db *db.DB
+ storage storage.Storage
isSequencer bool
// high watermark for batch sequence numbers seen so far. If we can't find batch for seq no < this, then we should ask peers for missing batches
@@ -55,11 +56,11 @@ type Repository struct {
logger gethlog.Logger
}
-func NewBatchRepository(cfg *config.HostConfig, hostService batchRepoServiceLocator, database *db.DB, logger gethlog.Logger) *Repository {
+func NewBatchRepository(cfg *config.HostConfig, hostService batchRepoServiceLocator, storage storage.Storage, logger gethlog.Logger) *Repository {
return &Repository{
batchSubscribers: subscription.NewManager[host.L2BatchHandler](),
sl: hostService,
- db: database,
+ storage: storage,
isSequencer: cfg.NodeType == common.Sequencer,
latestBatchSeqNo: big.NewInt(0),
running: atomic.Bool{},
@@ -122,7 +123,7 @@ func (r *Repository) HandleBatchRequest(requesterID string, fromSeqNo *big.Int)
batches := make([]*common.ExtBatch, 0)
nextSeqNum := fromSeqNo
for len(batches) <= _maxBatchesInP2PResponse {
- batch, err := r.db.GetBatchBySequenceNumber(nextSeqNum)
+ batch, err := r.storage.FetchBatchBySeqNo(nextSeqNum.Uint64())
if err != nil {
if !errors.Is(err, errutil.ErrNotFound) {
r.logger.Warn("unexpected error fetching batches for peer req", log.BatchSeqNoKey, nextSeqNum, log.ErrKey, err)
@@ -148,7 +149,7 @@ func (r *Repository) Subscribe(handler host.L2BatchHandler) func() {
}
func (r *Repository) FetchBatchBySeqNo(seqNo *big.Int) (*common.ExtBatch, error) {
- b, err := r.db.GetBatchBySequenceNumber(seqNo)
+ b, err := r.storage.FetchBatchBySeqNo(seqNo.Uint64())
if err != nil {
if errors.Is(err, errutil.ErrNotFound) && seqNo.Cmp(r.latestBatchSeqNo) < 0 {
if r.isSequencer {
@@ -171,10 +172,9 @@ func (r *Repository) FetchBatchBySeqNo(seqNo *big.Int) (*common.ExtBatch, error)
// If the repository already has the batch it returns an AlreadyExists error which is typically ignored.
func (r *Repository) AddBatch(batch *common.ExtBatch) error {
r.logger.Debug("Saving batch", log.BatchSeqNoKey, batch.Header.SequencerOrderNo, log.BatchHashKey, batch.Hash())
- // this returns an error if the batch already exists in the db
- err := r.db.AddBatch(batch)
+ err := r.storage.AddBatch(batch)
if err != nil {
- return err
+ return fmt.Errorf("could not add batch: %w", err)
}
// atomically compare and swap latest batch sequence number if successfully added batch is newer
r.latestSeqNoMutex.Lock()
diff --git a/go/host/rpc/clientapi/client_api_eth.go b/go/host/rpc/clientapi/client_api_eth.go
index 85bc103af0..9da97805af 100644
--- a/go/host/rpc/clientapi/client_api_eth.go
+++ b/go/host/rpc/clientapi/client_api_eth.go
@@ -39,7 +39,7 @@ func (api *EthereumAPI) ChainId() (*hexutil.Big, error) { //nolint:stylecheck,re
// BlockNumber returns the height of the current head batch.
func (api *EthereumAPI) BlockNumber() hexutil.Uint64 {
- header, err := api.host.DB().GetHeadBatchHeader()
+ header, err := api.host.Storage().FetchHeadBatchHeader()
if err != nil {
// This error may be nefarious, but unfortunately the Eth API doesn't allow us to return an error.
api.logger.Error("could not retrieve head batch header", log.ErrKey, err)
@@ -59,7 +59,7 @@ func (api *EthereumAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNu
// GetBlockByHash returns the header of the batch with the given hash.
func (api *EthereumAPI) GetBlockByHash(_ context.Context, hash gethcommon.Hash, _ bool) (*common.BatchHeader, error) {
- batchHeader, err := api.host.DB().GetBatchHeader(hash)
+ batchHeader, err := api.host.Storage().FetchBatchHeaderByHash(hash)
if err != nil {
return nil, err
}
@@ -68,7 +68,7 @@ func (api *EthereumAPI) GetBlockByHash(_ context.Context, hash gethcommon.Hash,
// GasPrice is a placeholder for an RPC method required by MetaMask/Remix.
func (api *EthereumAPI) GasPrice(context.Context) (*hexutil.Big, error) {
- header, err := api.host.DB().GetHeadBatchHeader()
+ header, err := api.host.Storage().FetchHeadBatchHeader()
if err != nil {
return nil, err
}
@@ -187,7 +187,7 @@ func (api *EthereumAPI) GetStorageAt(_ context.Context, encryptedParams common.E
// rpc.DecimalOrHex -> []byte
func (api *EthereumAPI) FeeHistory(context.Context, string, rpc.BlockNumber, []float64) (*FeeHistoryResult, error) {
// todo (#1621) - return a non-dummy fee history
- header, err := api.host.DB().GetHeadBatchHeader()
+ header, err := api.host.Storage().FetchHeadBatchHeader()
if err != nil {
api.logger.Error("Unable to retrieve header for fee history.", log.ErrKey, err)
return nil, fmt.Errorf("unable to retrieve fee history")
@@ -226,16 +226,15 @@ func (api *EthereumAPI) batchNumberToBatchHash(batchNumber rpc.BlockNumber) (*ge
// note: our API currently treats all these block statuses the same for obscuro batches
if batchNumber == rpc.LatestBlockNumber || batchNumber == rpc.PendingBlockNumber ||
batchNumber == rpc.FinalizedBlockNumber || batchNumber == rpc.SafeBlockNumber {
- batchHeader, err := api.host.DB().GetHeadBatchHeader()
+ batchHeader, err := api.host.Storage().FetchHeadBatchHeader()
if err != nil {
return nil, err
}
batchHash := batchHeader.Hash()
return &batchHash, nil
}
-
batchNumberBig := big.NewInt(batchNumber.Int64())
- batchHash, err := api.host.DB().GetBatchHash(batchNumberBig)
+ batchHash, err := api.host.Storage().FetchBatchHashByHeight(batchNumberBig)
if err != nil {
return nil, err
}
diff --git a/go/host/rpc/clientapi/client_api_obscuroscan.go b/go/host/rpc/clientapi/client_api_obscuroscan.go
deleted file mode 100644
index 41fa61cf74..0000000000
--- a/go/host/rpc/clientapi/client_api_obscuroscan.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package clientapi
-
-import (
- "errors"
- "fmt"
- "math/big"
-
- "github.com/ten-protocol/go-ten/go/common/errutil"
-
- "github.com/ten-protocol/go-ten/go/common/host"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ten-protocol/go-ten/go/common"
-)
-
-const txLimit = 100
-
-// TenScanAPI implements TenScan-specific JSON RPC operations.
-type TenScanAPI struct {
- host host.Host
-}
-
-func NewTenScanAPI(host host.Host) *TenScanAPI {
- return &TenScanAPI{
- host: host,
- }
-}
-
-// GetBlockHeaderByHash returns the header for the block with the given hash.
-func (api *TenScanAPI) GetBlockHeaderByHash(blockHash gethcommon.Hash) (*types.Header, error) {
- blockHeader, err := api.host.DB().GetBlockByHash(blockHash)
- if err != nil {
- if errors.Is(err, errutil.ErrNotFound) {
- return nil, fmt.Errorf("no block with hash %s is stored", blockHash)
- }
- return nil, fmt.Errorf("could not retrieve block with hash %s. Cause: %w", blockHash, err)
- }
- return blockHeader, nil
-}
-
-// GetBatch returns the batch with the given hash. Unlike `EthereumAPI.GetBlockByHash()`, returns the full
-// `ExtBatch`, and not just the header.
-func (api *TenScanAPI) GetBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error) {
- return api.host.DB().GetBatch(batchHash)
-}
-
-// GetBatchForTx returns the batch containing a given transaction hash.
-func (api *TenScanAPI) GetBatchForTx(txHash gethcommon.Hash) (*common.ExtBatch, error) {
- batchNumber, err := api.host.DB().GetBatchNumber(txHash)
- if err != nil {
- return nil, fmt.Errorf("could not retrieve batch containing a transaction with hash %s. Cause: %w", txHash, err)
- }
-
- batchHash, err := api.host.DB().GetBatchHash(batchNumber)
- if err != nil {
- return nil, fmt.Errorf("could not retrieve batch with number %d. Cause: %w", batchNumber.Int64(), err)
- }
-
- return api.GetBatch(*batchHash)
-}
-
-// GetLatestTransactions returns the hashes of the latest `num` transactions confirmed in batches (or all the
-// transactions if there are less than `num` total transactions).
-func (api *TenScanAPI) GetLatestTransactions(num int) ([]gethcommon.Hash, error) {
- // We prevent someone from requesting an excessive amount of transactions.
- if num > txLimit {
- return nil, fmt.Errorf("cannot request more than 100 latest transactions")
- }
-
- headBatchHeader, err := api.host.DB().GetHeadBatchHeader()
- if err != nil {
- return nil, err
- }
- currentBatchHash := headBatchHeader.Hash()
-
- // We walk the chain until we've collected the requested number of transactions.
- var txHashes []gethcommon.Hash
- for {
- batchHeader, err := api.host.DB().GetBatchHeader(currentBatchHash)
- if err != nil {
- return nil, fmt.Errorf("could not retrieve batch for hash %s. Cause: %w", currentBatchHash, err)
- }
-
- batchTxHashes, err := api.host.DB().GetBatchTxs(batchHeader.Hash())
- if err != nil {
- return nil, fmt.Errorf("could not retrieve transaction hashes for batch hash %s. Cause: %w", currentBatchHash, err)
- }
-
- for _, txHash := range batchTxHashes {
- txHashes = append(txHashes, txHash)
- if len(txHashes) >= num {
- return txHashes, nil
- }
- }
-
- // If we've reached the top of the chain, we stop walking.
- if batchHeader.Number.Uint64() == common.L2GenesisHeight {
- break
- }
- currentBatchHash = batchHeader.ParentHash
- }
-
- return txHashes, nil
-}
-
-// GetTotalTransactions returns the number of recorded transactions on the network.
-func (api *TenScanAPI) GetTotalTransactions() (*big.Int, error) {
- return api.host.DB().GetTotalTransactions()
-}
-
-// Attestation returns the node's attestation details.
-func (api *TenScanAPI) Attestation() (*common.AttestationReport, error) {
- return api.host.EnclaveClient().Attestation()
-}
diff --git a/go/host/rpc/clientapi/client_api_scan.go b/go/host/rpc/clientapi/client_api_scan.go
index 550696b104..c1124b5156 100644
--- a/go/host/rpc/clientapi/client_api_scan.go
+++ b/go/host/rpc/clientapi/client_api_scan.go
@@ -3,11 +3,11 @@ package clientapi
import (
"math/big"
+ gethcommon "github.com/ethereum/go-ethereum/common"
+
"github.com/ethereum/go-ethereum/log"
"github.com/ten-protocol/go-ten/go/common"
"github.com/ten-protocol/go-ten/go/common/host"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
)
// ScanAPI implements metric specific RPC endpoints
@@ -28,27 +28,62 @@ func (s *ScanAPI) GetTotalContractCount() (*big.Int, error) {
return s.host.EnclaveClient().GetTotalContractCount()
}
-// GetTotalTransactionCount returns the number of recorded transactions on the network.
+// GetTotalTxCount returns the number of recorded transactions on the network.
func (s *ScanAPI) GetTotalTransactionCount() (*big.Int, error) {
- return s.host.DB().GetTotalTransactions()
+ return s.host.Storage().FetchTotalTxCount()
}
-func (s *ScanAPI) GetLatestRollupHeader() (*common.RollupHeader, error) {
- return s.host.DB().GetTipRollupHeader()
+// GetBatchListingNew returns a paginated list of batches
+func (s *ScanAPI) GetBatchListingNew(pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
+ return s.host.Storage().FetchBatchListing(pagination)
}
-func (s *ScanAPI) GetPublicTransactionData(pagination *common.QueryPagination) (*common.TransactionListingResponse, error) {
- return s.host.EnclaveClient().GetPublicTransactionData(pagination)
+// GetBatchListing returns the deprecated version of batch listing
+func (s *ScanAPI) GetBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponseDeprecated, error) {
+ return s.host.Storage().FetchBatchListingDeprecated(pagination)
+}
+
+// GetPublicBatchByHash returns the public batch
+func (s *ScanAPI) GetPublicBatchByHash(hash common.L2BatchHash) (*common.PublicBatch, error) {
+ return s.host.Storage().FetchPublicBatchByHash(hash)
+}
+
+// GetBatch returns the `ExtBatch` with the given hash
+func (s *ScanAPI) GetBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error) {
+ return s.host.Storage().FetchBatch(batchHash)
}
-func (s *ScanAPI) GetBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
- return s.host.DB().GetBatchListing(pagination)
+// GetBatchByTx returns the `ExtBatch` with the given tx hash
+func (s *ScanAPI) GetBatchByTx(txHash gethcommon.Hash) (*common.ExtBatch, error) {
+ return s.host.Storage().FetchBatchByTx(txHash)
}
-func (s *ScanAPI) GetBatchByHash(hash gethcommon.Hash) (*common.ExtBatch, error) {
- return s.host.DB().GetBatch(hash)
+// GetLatestBatch returns the head `BatchHeader`
+func (s *ScanAPI) GetLatestBatch() (*common.BatchHeader, error) {
+ return s.host.Storage().FetchLatestBatch()
+}
+
+// GetBatchByHeight returns the `BatchHeader` with the given height
+func (s *ScanAPI) GetBatchByHeight(height *big.Int) (*common.BatchHeader, error) {
+ return s.host.Storage().FetchBatchHeaderByHeight(height)
+}
+
+// GetRollupListing returns a paginated list of Rollups
+func (s *ScanAPI) GetRollupListing(pagination *common.QueryPagination) (*common.RollupListingResponse, error) {
+ return s.host.Storage().FetchRollupListing(pagination)
+}
+
+// GetLatestRollupHeader returns the head `RollupHeader`
+func (s *ScanAPI) GetLatestRollupHeader() (*common.RollupHeader, error) {
+ return s.host.Storage().FetchLatestRollupHeader()
+}
+
+// GetPublicTransactionData returns a paginated list of transaction data
+func (s *ScanAPI) GetPublicTransactionData(pagination *common.QueryPagination) (*common.TransactionListingResponse, error) {
+ return s.host.EnclaveClient().GetPublicTransactionData(pagination)
}
+// GetBlockListing returns a paginated list of blocks that include rollups
func (s *ScanAPI) GetBlockListing(pagination *common.QueryPagination) (*common.BlockListingResponse, error) {
- return s.host.DB().GetBlockListing(pagination)
+ return s.host.Storage().FetchBlockListing(pagination)
}
diff --git a/go/host/storage/db_init.go b/go/host/storage/db_init.go
new file mode 100644
index 0000000000..2cef13ec09
--- /dev/null
+++ b/go/host/storage/db_init.go
@@ -0,0 +1,47 @@
+package storage
+
+import (
+ "fmt"
+
+ "github.com/ten-protocol/go-ten/go/host/storage/hostdb"
+ "github.com/ten-protocol/go-ten/go/host/storage/init/sqlite"
+
+ gethlog "github.com/ethereum/go-ethereum/log"
+ "github.com/ten-protocol/go-ten/go/config"
+ "github.com/ten-protocol/go-ten/go/host/storage/init/postgres"
+)
+
+const HOST = "HOST_"
+
+// CreateDBFromConfig creates an appropriate ethdb.Database instance based on your config
+func CreateDBFromConfig(cfg *config.HostConfig, logger gethlog.Logger) (hostdb.HostDB, error) {
+ dbName := HOST + cfg.ID.String()
+ if err := validateDBConf(cfg); err != nil {
+ return nil, err
+ }
+ if cfg.UseInMemoryDB {
+ logger.Info("UseInMemoryDB flag is true, data will not be persisted. Creating in-memory database...")
+ sqliteDB, err := sqlite.CreateTemporarySQLiteHostDB(dbName, "mode=memory&cache=shared&_foreign_keys=on")
+ if err != nil {
+ return nil, fmt.Errorf("could not create in memory sqlite DB: %w", err)
+ }
+ return hostdb.NewHostDB(sqliteDB, hostdb.SQLiteSQLStatements())
+ }
+ logger.Info(fmt.Sprintf("Preparing Postgres DB connection to %s...", cfg.PostgresDBHost))
+ postgresDB, err := postgres.CreatePostgresDBConnection(cfg.PostgresDBHost, dbName)
+ if err != nil {
+ return nil, fmt.Errorf("could not create postresql connection: %w", err)
+ }
+ return hostdb.NewHostDB(postgresDB, hostdb.PostgresSQLStatements())
+}
+
+// validateDBConf high-level checks that you have a valid configuration for DB creation
+func validateDBConf(cfg *config.HostConfig) error {
+ if cfg.UseInMemoryDB && cfg.PostgresDBHost != "" {
+ return fmt.Errorf("invalid db config, useInMemoryDB=true so MariaDB host not expected, but PostgresDBHost=%s", cfg.PostgresDBHost)
+ }
+ if cfg.SqliteDBPath != "" && cfg.UseInMemoryDB {
+ return fmt.Errorf("useInMemoryDB=true so sqlite database will not be used and no path is needed, but sqliteDBPath=%s", cfg.SqliteDBPath)
+ }
+ return nil
+}
diff --git a/go/host/storage/hostdb/batch.go b/go/host/storage/hostdb/batch.go
new file mode 100644
index 0000000000..1eb0e7d740
--- /dev/null
+++ b/go/host/storage/hostdb/batch.go
@@ -0,0 +1,456 @@
+package hostdb
+
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "math/big"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ten-protocol/go-ten/go/common"
+ "github.com/ten-protocol/go-ten/go/common/errutil"
+)
+
+const (
+ selectTxCount = "SELECT total FROM transaction_count WHERE id = 1"
+ selectBatch = "SELECT sequence, full_hash, hash, height, ext_batch FROM batch_host"
+ selectExtBatch = "SELECT ext_batch FROM batch_host"
+ selectLatestBatch = "SELECT sequence, full_hash, hash, height, ext_batch FROM batch_host ORDER BY sequence DESC LIMIT 1"
+ selectTxsAndBatch = "SELECT t.hash FROM transactions_host t JOIN batch_host b ON t.b_sequence = b.sequence WHERE b.full_hash = "
+ selectBatchSeqByTx = "SELECT b_sequence FROM transactions_host WHERE hash = "
+ selectTxBySeq = "SELECT hash FROM transactions_host WHERE b_sequence = "
+)
+
+// AddBatch adds a batch and its header to the DB
+func AddBatch(dbtx *dbTransaction, statements *SQLStatements, batch *common.ExtBatch) error {
+ extBatch, err := rlp.EncodeToBytes(batch)
+ if err != nil {
+ return fmt.Errorf("could not encode L2 transactions: %w", err)
+ }
+
+ _, err = dbtx.tx.Exec(statements.InsertBatch,
+ batch.SeqNo().Uint64(), // sequence
+ batch.Hash(), // full hash
+ truncTo16(batch.Hash()), // shortened hash
+ batch.Header.Number.Uint64(), // height
+ extBatch, // ext_batch
+ )
+ if err != nil {
+ return fmt.Errorf("host failed to insert batch: %w", err)
+ }
+
+ if len(batch.TxHashes) > 0 {
+ for _, transaction := range batch.TxHashes {
+ _, err = dbtx.tx.Exec(statements.InsertTransactions, transaction.Bytes(), batch.SeqNo().Uint64())
+ if err != nil {
+ return fmt.Errorf("failed to insert transaction with hash: %d", err)
+ }
+ }
+ }
+
+ var currentTotal int
+ err = dbtx.tx.QueryRow(selectTxCount).Scan(¤tTotal)
+ if err != nil {
+ return fmt.Errorf("failed to query transaction count: %w", err)
+ }
+
+ newTotal := currentTotal + len(batch.TxHashes)
+ _, err = dbtx.tx.Exec(statements.InsertTxCount, 1, newTotal)
+ if err != nil {
+ return fmt.Errorf("failed to update transaction count: %w", err)
+ }
+
+ return nil
+}
+
+// GetBatchListing returns latest batches given a pagination.
+// For example, page 0, size 10 will return the latest 10 batches.
+func GetBatchListing(db HostDB, pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
+ headBatch, err := GetCurrentHeadBatch(db.GetSQLDB())
+ if err != nil {
+ return nil, err
+ }
+ batchesFrom := headBatch.SequencerOrderNo.Uint64() - pagination.Offset
+ batchesTo := int(batchesFrom) - int(pagination.Size) + 1
+
+ if batchesTo <= 0 {
+ batchesTo = 1
+ }
+
+ var batches []common.PublicBatch
+ for i := batchesFrom; i >= uint64(batchesTo); i-- {
+ batch, err := GetPublicBatchBySequenceNumber(db, i)
+ if err != nil && !errors.Is(err, errutil.ErrNotFound) {
+ return nil, err
+ }
+ if batch != nil {
+ batches = append(batches, *batch)
+ }
+ }
+
+ return &common.BatchListingResponse{
+ BatchesData: batches,
+ Total: uint64(len(batches)),
+ }, nil
+}
+
+// GetBatchListingDeprecated returns latest batches given a pagination.
+// For example, page 0, size 10 will return the latest 10 batches.
+func GetBatchListingDeprecated(db HostDB, pagination *common.QueryPagination) (*common.BatchListingResponseDeprecated, error) {
+ headBatch, err := GetCurrentHeadBatch(db.GetSQLDB())
+ if err != nil {
+ return nil, err
+ }
+ batchesFrom := headBatch.SequencerOrderNo.Uint64() - pagination.Offset
+ batchesTo := int(batchesFrom) - int(pagination.Size) + 1
+
+ if batchesTo <= 0 {
+ batchesTo = 1
+ }
+
+ var batches []common.PublicBatchDeprecated
+ var txHashes []common.TxHash
+ for i := batchesFrom; i >= uint64(batchesTo); i-- {
+ batch, err := GetPublicBatchBySequenceNumber(db, i)
+ if batch == nil {
+ continue
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to get batch by seq no: %w", err)
+ }
+
+ txHashes, err = GetTxsBySequenceNumber(db, batch.Header.SequencerOrderNo.Uint64())
+ if err != nil {
+ return nil, fmt.Errorf("failed to get tx hashes by seq no: %w", err)
+ }
+ if batch == nil || batch.Header == nil {
+ return nil, fmt.Errorf("batch or batch header is nil")
+ } else {
+ publicBatchDeprecated := common.PublicBatchDeprecated{
+ BatchHeader: *batch.Header,
+ TxHashes: txHashes,
+ }
+ batches = append(batches, publicBatchDeprecated)
+ }
+ }
+
+ return &common.BatchListingResponseDeprecated{
+ BatchesData: batches,
+ Total: uint64(len(batches)),
+ }, nil
+}
+
+// GetPublicBatchBySequenceNumber returns the batch with the given sequence number.
+func GetPublicBatchBySequenceNumber(db HostDB, seqNo uint64) (*common.PublicBatch, error) {
+ whereQuery := " WHERE sequence=" + db.GetSQLStatement().Placeholder
+ return fetchPublicBatch(db.GetSQLDB(), whereQuery, seqNo)
+}
+
+// GetTxsBySequenceNumber returns the transaction hashes with sequence number.
+func GetTxsBySequenceNumber(db HostDB, seqNo uint64) ([]common.TxHash, error) {
+ return fetchTx(db, seqNo)
+}
+
+// GetBatchBySequenceNumber returns the ext batch for a given sequence number.
+func GetBatchBySequenceNumber(db HostDB, seqNo uint64) (*common.ExtBatch, error) {
+ whereQuery := " WHERE sequence=" + db.GetSQLStatement().Placeholder
+ return fetchFullBatch(db.GetSQLDB(), whereQuery, seqNo)
+}
+
+// GetCurrentHeadBatch retrieves the current head batch with the largest sequence number (or height).
+func GetCurrentHeadBatch(db *sql.DB) (*common.PublicBatch, error) {
+ return fetchHeadBatch(db)
+}
+
+// GetBatchHeader returns the batch header given the hash.
+func GetBatchHeader(db HostDB, hash gethcommon.Hash) (*common.BatchHeader, error) {
+ whereQuery := " WHERE hash=" + db.GetSQLStatement().Placeholder
+ return fetchBatchHeader(db.GetSQLDB(), whereQuery, truncTo16(hash))
+}
+
+// GetBatchHashByNumber returns the hash of a batch given its number.
+func GetBatchHashByNumber(db HostDB, number *big.Int) (*gethcommon.Hash, error) {
+ whereQuery := " WHERE height=" + db.GetSQLStatement().Placeholder
+ batch, err := fetchBatchHeader(db.GetSQLDB(), whereQuery, number.Uint64())
+ if err != nil {
+ return nil, err
+ }
+ l2BatchHash := batch.Hash()
+ return &l2BatchHash, nil
+}
+
+// GetHeadBatchHeader returns the latest batch header.
+func GetHeadBatchHeader(db *sql.DB) (*common.BatchHeader, error) {
+ batch, err := fetchHeadBatch(db)
+ if err != nil {
+ return nil, err
+ }
+ return batch.Header, nil
+}
+
+// GetBatchNumber returns the height of the batch containing the given transaction hash.
+func GetBatchNumber(db HostDB, txHash gethcommon.Hash) (*big.Int, error) {
+ txBytes := txHash.Bytes()
+ batchHeight, err := fetchBatchNumber(db, txBytes)
+ if err != nil {
+ return nil, err
+ }
+ return batchHeight, nil
+}
+
+// GetBatchTxs returns the transaction hashes of the batch with the given hash.
+func GetBatchTxs(db HostDB, batchHash gethcommon.Hash) ([]gethcommon.Hash, error) {
+ query := selectTxsAndBatch + db.GetSQLStatement().Placeholder
+ rows, err := db.GetSQLDB().Query(query, batchHash)
+ if err != nil {
+ return nil, fmt.Errorf("query execution failed: %w", err)
+ }
+ defer rows.Close()
+
+ var transactions []gethcommon.Hash
+ for rows.Next() {
+ var txHashBytes []byte
+ if err := rows.Scan(&txHashBytes); err != nil {
+ return nil, fmt.Errorf("failed to scan transaction hash: %w", err)
+ }
+ txHash := gethcommon.BytesToHash(txHashBytes)
+ transactions = append(transactions, txHash)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error looping through transacion rows: %w", err)
+ }
+
+ return transactions, nil
+}
+
+// GetTotalTxCount returns the total number of batched transactions.
+func GetTotalTxCount(db *sql.DB) (*big.Int, error) {
+ var totalCount int
+ err := db.QueryRow(selectTxCount).Scan(&totalCount)
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve total transaction count: %w", err)
+ }
+ return big.NewInt(int64(totalCount)), nil
+}
+
+// GetPublicBatch returns the batch with the given hash.
+func GetPublicBatch(db HostDB, hash common.L2BatchHash) (*common.PublicBatch, error) {
+ whereQuery := " WHERE b.hash=" + db.GetSQLStatement().Placeholder
+ return fetchPublicBatch(db.GetSQLDB(), whereQuery, truncTo16(hash))
+}
+
+// GetBatchByTx returns the batch with the given hash.
+func GetBatchByTx(db HostDB, txHash gethcommon.Hash) (*common.ExtBatch, error) {
+ var seqNo uint64
+ query := selectBatchSeqByTx + db.GetSQLStatement().Placeholder
+ err := db.GetSQLDB().QueryRow(query, txHash).Scan(&seqNo)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, err
+ }
+ return GetBatchBySequenceNumber(db, seqNo)
+}
+
+// GetBatchByHash returns the batch with the given hash.
+func GetBatchByHash(db HostDB, hash common.L2BatchHash) (*common.ExtBatch, error) {
+ whereQuery := " WHERE hash=" + db.GetSQLStatement().Placeholder
+ return fetchFullBatch(db.GetSQLDB(), whereQuery, truncTo16(hash))
+}
+
+// GetLatestBatch returns the head batch header
+func GetLatestBatch(db *sql.DB) (*common.BatchHeader, error) {
+ headBatch, err := fetchHeadBatch(db)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch head batch: %w", err)
+ }
+ return headBatch.Header, nil
+}
+
+// GetBatchByHeight returns the batch header given the height
+func GetBatchByHeight(db HostDB, height *big.Int) (*common.BatchHeader, error) {
+ whereQuery := " WHERE height=" + db.GetSQLStatement().Placeholder
+ headBatch, err := fetchBatchHeader(db.GetSQLDB(), whereQuery, height.Uint64())
+ if err != nil {
+ return nil, fmt.Errorf("failed to batch header: %w", err)
+ }
+ return headBatch, nil
+}
+
+func fetchBatchHeader(db *sql.DB, whereQuery string, args ...any) (*common.BatchHeader, error) {
+ var extBatch []byte
+ query := selectExtBatch + " " + whereQuery
+ var err error
+ if len(args) > 0 {
+ err = db.QueryRow(query, args...).Scan(&extBatch)
+ } else {
+ err = db.QueryRow(query).Scan(&extBatch)
+ }
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, err
+ }
+ // Decode batch
+ var b common.ExtBatch
+ err = rlp.DecodeBytes(extBatch, &b)
+ if err != nil {
+ return nil, fmt.Errorf("could not decode batch header. Cause: %w", err)
+ }
+ return b.Header, nil
+}
+
+func fetchBatchNumber(db HostDB, args ...any) (*big.Int, error) {
+ var seqNo uint64
+ query := selectBatchSeqByTx + db.GetSQLStatement().Placeholder
+ var err error
+ if len(args) > 0 {
+ err = db.GetSQLDB().QueryRow(query, args...).Scan(&seqNo)
+ } else {
+ err = db.GetSQLDB().QueryRow(query).Scan(&seqNo)
+ }
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, err
+ }
+ batch, err := GetPublicBatchBySequenceNumber(db, seqNo)
+ if err != nil {
+ return nil, fmt.Errorf("could not fetch batch by seq no. Cause: %w", err)
+ }
+ return batch.Height, nil
+}
+
+func fetchPublicBatch(db *sql.DB, whereQuery string, args ...any) (*common.PublicBatch, error) {
+ var sequenceInt64 uint64
+ var fullHash common.TxHash
+ var hash []byte
+ var heightInt64 int
+ var extBatch []byte
+
+ query := selectBatch + " " + whereQuery
+
+ var err error
+ if len(args) > 0 {
+ err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch)
+ } else {
+ err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch)
+ }
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, err
+ }
+ var b common.ExtBatch
+ err = rlp.DecodeBytes(extBatch, &b)
+ if err != nil {
+ return nil, fmt.Errorf("could not decode ext batch. Cause: %w", err)
+ }
+
+ batch := &common.PublicBatch{
+ SequencerOrderNo: new(big.Int).SetInt64(int64(sequenceInt64)),
+ Hash: hash,
+ FullHash: fullHash,
+ Height: new(big.Int).SetInt64(int64(heightInt64)),
+ TxCount: new(big.Int).SetInt64(int64(len(b.TxHashes))),
+ Header: b.Header,
+ EncryptedTxBlob: b.EncryptedTxBlob,
+ }
+
+ return batch, nil
+}
+
+func fetchFullBatch(db *sql.DB, whereQuery string, args ...any) (*common.ExtBatch, error) {
+ var sequenceInt64 uint64
+ var fullHash common.TxHash
+ var hash []byte
+ var heightInt64 int
+ var extBatch []byte
+
+ query := selectBatch + whereQuery
+
+ var err error
+ if len(args) > 0 {
+ err = db.QueryRow(query, args...).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch)
+ } else {
+ err = db.QueryRow(query).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch)
+ }
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, err
+ }
+ var b common.ExtBatch
+ err = rlp.DecodeBytes(extBatch, &b)
+ if err != nil {
+ return nil, fmt.Errorf("could not decode ext batch. Cause: %w", err)
+ }
+
+ return &b, nil
+}
+
+func fetchHeadBatch(db *sql.DB) (*common.PublicBatch, error) {
+ var sequenceInt64 int
+ var fullHash gethcommon.Hash // common.Hash
+ var hash []byte
+ var heightInt64 int
+ var extBatch []byte
+
+ err := db.QueryRow(selectLatestBatch).Scan(&sequenceInt64, &fullHash, &hash, &heightInt64, &extBatch)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, fmt.Errorf("failed to fetch current head batch: %w", err)
+ }
+
+ var b common.ExtBatch
+ err = rlp.DecodeBytes(extBatch, &b)
+ if err != nil {
+ return nil, fmt.Errorf("could not decode ext batch. Cause: %w", err)
+ }
+
+ batch := &common.PublicBatch{
+ SequencerOrderNo: new(big.Int).SetInt64(int64(sequenceInt64)),
+ Hash: hash,
+ FullHash: fullHash,
+ Height: new(big.Int).SetInt64(int64(heightInt64)),
+ TxCount: new(big.Int).SetInt64(int64(len(b.TxHashes))),
+ Header: b.Header,
+ EncryptedTxBlob: b.EncryptedTxBlob,
+ }
+
+ return batch, nil
+}
+
+func fetchTx(db HostDB, seqNo uint64) ([]common.TxHash, error) {
+ query := selectTxBySeq + db.GetSQLStatement().Placeholder
+ rows, err := db.GetSQLDB().Query(query, seqNo)
+ if err != nil {
+ return nil, fmt.Errorf("query execution for select txs failed: %w", err)
+ }
+ defer rows.Close()
+
+ var transactions []gethcommon.Hash
+ for rows.Next() {
+ var txHashBytes []byte
+ if err := rows.Scan(&txHashBytes); err != nil {
+ return nil, fmt.Errorf("failed to scan transaction hash: %w", err)
+ }
+ txHash := gethcommon.BytesToHash(txHashBytes)
+ transactions = append(transactions, txHash)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error looping through transacion rows: %w", err)
+ }
+
+ return transactions, nil
+}
diff --git a/go/host/storage/hostdb/batch_test.go b/go/host/storage/hostdb/batch_test.go
new file mode 100644
index 0000000000..86c861a2c9
--- /dev/null
+++ b/go/host/storage/hostdb/batch_test.go
@@ -0,0 +1,445 @@
+package hostdb
+
+import (
+ "errors"
+ "math/big"
+ "testing"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ _ "github.com/mattn/go-sqlite3"
+ "github.com/ten-protocol/go-ten/go/common/errutil"
+
+ "github.com/ten-protocol/go-ten/go/common"
+)
+
+// An arbitrary number to put in the header, to check that the header is retrieved correctly from the DB.
+
+func TestCanStoreAndRetrieveBatchHeader(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ batch := createBatch(batchNumber, []common.L2TxHash{})
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batch)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+ batchHeader, err := GetBatchHeader(db, batch.Header.Hash())
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
+ }
+ if batchHeader.Number.Cmp(batch.Header.Number) != 0 {
+ t.Errorf("batch header was not stored correctly")
+ }
+}
+
+func TestUnknownBatchHeaderReturnsNotFound(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ header := types.Header{}
+
+ _, err := GetBatchHeader(db, header.Hash())
+ if !errors.Is(err, errutil.ErrNotFound) {
+ t.Errorf("did not store batch header but was able to retrieve it")
+ }
+}
+
+func TestHigherNumberBatchBecomesBatchHeader(t *testing.T) { //nolint:dupl
+ db, _ := createSQLiteDB(t)
+ batchOne := createBatch(batchNumber, []common.L2TxHash{})
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ batchTwo := createBatch(batchNumber+1, []common.L2TxHash{})
+ if err != nil {
+ t.Errorf("could not create batch. Cause: %s", err)
+ }
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ dbtx.Write()
+
+ batchHeader, err := GetHeadBatchHeader(db.GetSQLDB())
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
+ }
+ if batchHeader.Number.Cmp(batchTwo.Header.Number) != 0 {
+ t.Errorf("head batch was not set correctly")
+ }
+}
+
+func TestLowerNumberBatchDoesNotBecomeBatchHeader(t *testing.T) { //nolint:dupl
+ db, _ := createSQLiteDB(t)
+ dbtx, _ := db.NewDBTransaction()
+ batchOne := createBatch(batchNumber, []common.L2TxHash{})
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ batchTwo := createBatch(batchNumber-1, []common.L2TxHash{})
+ if err != nil {
+ t.Errorf("could not create batch. Cause: %s", err)
+ }
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ batchHeader, err := GetHeadBatchHeader(db.GetSQLDB())
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve header. Cause: %s", err)
+ }
+ if batchHeader.Number.Cmp(batchOne.Header.Number) != 0 {
+ t.Errorf("head batch was not set correctly")
+ }
+}
+
+func TestHeadBatchHeaderIsNotSetInitially(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ _, err := GetHeadBatchHeader(db.GetSQLDB())
+ if !errors.Is(err, errutil.ErrNotFound) {
+ t.Errorf("head batch was set, but no batchs had been written")
+ }
+}
+
+func TestCanRetrieveBatchHashByNumber(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ batch := createBatch(batchNumber, []common.L2TxHash{})
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batch)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ batchHash, err := GetBatchHashByNumber(db, batch.Header.Number)
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve headers hash by number. Cause: %s", err)
+ }
+ if *batchHash != batch.Header.Hash() {
+ t.Errorf("batch hash was not stored correctly against number")
+ }
+}
+
+func TestUnknownBatchNumberReturnsNotFound(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ header := types.Header{Number: big.NewInt(10)}
+
+ _, err := GetBatchHashByNumber(db, header.Number)
+ if !errors.Is(err, errutil.ErrNotFound) {
+ t.Errorf("did not store batch hash but was able to retrieve it")
+ }
+}
+
+func TestCanRetrieveBatchNumberByTxHash(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHash := gethcommon.BytesToHash([]byte("magicString"))
+ batch := createBatch(batchNumber, []common.L2TxHash{txHash})
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batch)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ extBatch, err := GetBatchByTx(db, txHash)
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve batch by transaction hash. Cause: %s", err)
+ }
+ if extBatch.Header.Number.Cmp(batch.Header.Number) != 0 {
+ t.Errorf("batch number was not stored correctly against transaction hash")
+ }
+ batchNumber, err := GetBatchNumber(db, txHash)
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve number by transaction hash. Cause: %s", err)
+ }
+ if batchNumber.Cmp(batch.Header.Number) != 0 {
+ t.Errorf("batch number was not stored correctly against transaction hash")
+ }
+}
+
+func TestUnknownBatchTxHashReturnsNotFound(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+
+ _, err := GetBatchNumber(db, gethcommon.BytesToHash([]byte("magicString")))
+ if !errors.Is(err, errutil.ErrNotFound) {
+ t.Errorf("did not store batch number but was able to retrieve it")
+ }
+}
+
+func TestCanRetrieveBatchTransactions(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHashes := []common.L2TxHash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
+ batch := createBatch(batchNumber, txHashes)
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batch)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ batchTxs, err := GetBatchTxs(db, batch.Header.Hash())
+ if err != nil {
+ t.Errorf("stored batch but could not retrieve headers transactions. Cause: %s", err)
+ }
+ if len(batchTxs) != len(txHashes) {
+ t.Errorf("batch transactions were not stored correctly")
+ }
+ for idx, batchTx := range batchTxs {
+ if batchTx != txHashes[idx] {
+ t.Errorf("batch transactions were not stored correctly")
+ }
+ }
+}
+
+func TestTransactionsForUnknownBatchReturnsNotFound(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+
+ _, err := GetBatchNumber(db, gethcommon.BytesToHash([]byte("magicString")))
+ if !errors.Is(err, errutil.ErrNotFound) {
+ t.Errorf("did not store batch number but was able to retrieve it")
+ }
+}
+
+func TestCanRetrieveTotalNumberOfTransactions(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHashesOne := []common.L2TxHash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
+ batchOne := createBatch(batchNumber, txHashesOne)
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesTwo := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringThree")), gethcommon.BytesToHash([]byte("magicStringFour"))}
+ batchTwo := createBatch(batchNumber+1, txHashesTwo)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ totalTxs, err := GetTotalTxCount(db.GetSQLDB())
+ if err != nil {
+ t.Errorf("was not able to read total number of transactions. Cause: %s", err)
+ }
+
+ if int(totalTxs.Int64()) != len(txHashesOne)+len(txHashesTwo) {
+ t.Errorf("total number of batch transactions was not stored correctly")
+ }
+}
+
+func TestGetLatestBatch(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHashesOne := []common.L2TxHash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
+ batchOne := createBatch(batchNumber, txHashesOne)
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesTwo := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringThree")), gethcommon.BytesToHash([]byte("magicStringFour"))}
+ batchTwo := createBatch(batchNumber+1, txHashesTwo)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ batch, err := GetLatestBatch(db.GetSQLDB())
+ if err != nil {
+ t.Errorf("was not able to read total number of transactions. Cause: %s", err)
+ }
+
+ if int(batch.SequencerOrderNo.Uint64()) != int(batchTwo.SeqNo().Uint64()) {
+ t.Errorf("latest batch was not retrieved correctly")
+ }
+}
+
+func TestGetBatchListing(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHashesOne := []common.L2TxHash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
+ batchOne := createBatch(batchNumber, txHashesOne)
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesTwo := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringThree")), gethcommon.BytesToHash([]byte("magicStringFour"))}
+ batchTwo := createBatch(batchNumber+1, txHashesTwo)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesThree := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringFive")), gethcommon.BytesToHash([]byte("magicStringSix"))}
+ batchThree := createBatch(batchNumber+2, txHashesThree)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchThree)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ // page 1, size 2
+ batchListing, err := GetBatchListing(db, &common.QueryPagination{Offset: 1, Size: 2})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be two elements
+ if big.NewInt(int64(batchListing.Total)).Cmp(big.NewInt(2)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // first element should be the second batch
+ if batchListing.BatchesData[0].SequencerOrderNo.Cmp(batchTwo.SeqNo()) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // page 0, size 3
+ batchListing1, err := GetBatchListing(db, &common.QueryPagination{Offset: 0, Size: 3})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // first element should be the most recent batch since they're in descending order
+ if batchListing1.BatchesData[0].SequencerOrderNo.Cmp(batchThree.SeqNo()) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(batchListing1.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // page 0, size 4
+ batchListing2, err := GetBatchListing(db, &common.QueryPagination{Offset: 0, Size: 4})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(batchListing2.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // page 5, size 1
+ rollupListing3, err := GetBatchListing(db, &common.QueryPagination{Offset: 5, Size: 1})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be 0 elements
+ if big.NewInt(int64(rollupListing3.Total)).Cmp(big.NewInt(0)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+}
+
+func TestGetBatchListingDeprecated(t *testing.T) {
+ db, _ := createSQLiteDB(t)
+ txHashesOne := []common.L2TxHash{gethcommon.BytesToHash([]byte("magicStringOne")), gethcommon.BytesToHash([]byte("magicStringTwo"))}
+ batchOne := createBatch(batchNumber, txHashesOne)
+ dbtx, _ := db.NewDBTransaction()
+ err := AddBatch(dbtx, db.GetSQLStatement(), &batchOne)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesTwo := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringThree")), gethcommon.BytesToHash([]byte("magicStringFour"))}
+ batchTwo := createBatch(batchNumber+1, txHashesTwo)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchTwo)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+
+ txHashesThree := []gethcommon.Hash{gethcommon.BytesToHash([]byte("magicStringFive")), gethcommon.BytesToHash([]byte("magicStringSix"))}
+ batchThree := createBatch(batchNumber+2, txHashesThree)
+
+ err = AddBatch(dbtx, db.GetSQLStatement(), &batchThree)
+ if err != nil {
+ t.Errorf("could not store batch. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ // page 1, size 2
+ batchListing, err := GetBatchListingDeprecated(db, &common.QueryPagination{Offset: 1, Size: 2})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be two elements
+ if big.NewInt(int64(batchListing.Total)).Cmp(big.NewInt(2)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // first element should be the second batch
+ if batchListing.BatchesData[0].BatchHeader.SequencerOrderNo.Cmp(batchTwo.SeqNo()) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // page 0, size 3
+ batchListing1, err := GetBatchListingDeprecated(db, &common.QueryPagination{Offset: 0, Size: 3})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // first element should be the most recent batch since they're in descending order
+ if batchListing1.BatchesData[0].BatchHeader.SequencerOrderNo.Cmp(batchThree.SeqNo()) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(batchListing1.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+
+ // page 0, size 4
+ batchListing2, err := GetBatchListingDeprecated(db, &common.QueryPagination{Offset: 0, Size: 4})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(batchListing2.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // page 5, size 1
+ rollupListing3, err := GetBatchListing(db, &common.QueryPagination{Offset: 5, Size: 1})
+ if err != nil {
+ t.Errorf("could not get batch listing. Cause: %s", err)
+ }
+
+ // should be 0 elements
+ if big.NewInt(int64(rollupListing3.Total)).Cmp(big.NewInt(0)) != 0 {
+ t.Errorf("batch listing was not paginated correctly")
+ }
+}
+
+func createBatch(batchNum int64, txHashes []common.L2BatchHash) common.ExtBatch {
+ header := common.BatchHeader{
+ SequencerOrderNo: big.NewInt(batchNum),
+ Number: big.NewInt(batchNum),
+ }
+ batch := common.ExtBatch{
+ Header: &header,
+ TxHashes: txHashes,
+ }
+
+ return batch
+}
diff --git a/go/host/storage/hostdb/block.go b/go/host/storage/hostdb/block.go
new file mode 100644
index 0000000000..b1b01addda
--- /dev/null
+++ b/go/host/storage/hostdb/block.go
@@ -0,0 +1,75 @@
+package hostdb
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ten-protocol/go-ten/go/common"
+)
+
+// AddBlock stores a block header with the given rollupHash it contains in the host DB
+func AddBlock(dbtx *dbTransaction, statements *SQLStatements, b *types.Header, rollupHash common.L2RollupHash) error {
+ header, err := rlp.EncodeToBytes(b)
+ if err != nil {
+ return fmt.Errorf("could not encode block header. Cause: %w", err)
+ }
+
+ r, err := rlp.EncodeToBytes(rollupHash)
+ if err != nil {
+ return fmt.Errorf("could not encode rollup hash transactions: %w", err)
+ }
+
+ _, err = dbtx.tx.Exec(statements.InsertBlock,
+ b.Hash(), // hash
+ header, // l1 block header
+ r, // rollup hash
+ )
+ if err != nil {
+ return fmt.Errorf("could not insert block. Cause: %w", err)
+ }
+
+ return nil
+}
+
+// GetBlockListing returns a paginated list of blocks in descending order against the order they were added
+func GetBlockListing(db HostDB, pagination *common.QueryPagination) (*common.BlockListingResponse, error) {
+ rows, err := db.GetSQLDB().Query(db.GetSQLStatement().SelectBlocks, pagination.Size, pagination.Offset)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ var blocks []common.PublicBlock
+
+ for rows.Next() {
+ var id int
+ var hash, header, rollupHash []byte
+
+ err = rows.Scan(&id, &hash, &header, &rollupHash)
+ if err != nil {
+ return nil, err
+ }
+
+ blockHeader := new(types.Header)
+ if err := rlp.DecodeBytes(header, blockHeader); err != nil {
+ return nil, fmt.Errorf("could not decode block header. Cause: %w", err)
+ }
+ r := new(common.L2RollupHash)
+ if err := rlp.DecodeBytes(rollupHash, r); err != nil {
+ return nil, fmt.Errorf("could not decode rollup hash. Cause: %w", err)
+ }
+ block := common.PublicBlock{
+ BlockHeader: *blockHeader,
+ RollupHash: *r,
+ }
+ blocks = append(blocks, block)
+ }
+ if err = rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return &common.BlockListingResponse{
+ BlocksData: blocks,
+ Total: uint64(len(blocks)),
+ }, nil
+}
diff --git a/go/host/storage/hostdb/hostdb.go b/go/host/storage/hostdb/hostdb.go
new file mode 100644
index 0000000000..d63e63e1e8
--- /dev/null
+++ b/go/host/storage/hostdb/hostdb.go
@@ -0,0 +1,61 @@
+package hostdb
+
+import (
+ "database/sql"
+ "fmt"
+)
+
+type HostDB interface {
+ GetSQLDB() *sql.DB
+ NewDBTransaction() (*dbTransaction, error)
+ GetSQLStatement() *SQLStatements
+}
+
+type hostDB struct {
+ sqldb *sql.DB
+ statements *SQLStatements
+}
+
+func (db *hostDB) GetSQLStatement() *SQLStatements {
+ return db.statements
+}
+
+func NewHostDB(db *sql.DB, statements *SQLStatements) (HostDB, error) {
+ return &hostDB{
+ sqldb: db,
+ statements: statements,
+ }, nil
+}
+
+func (db *hostDB) GetSQLDB() *sql.DB {
+ return db.sqldb
+}
+
+func (db *hostDB) NewDBTransaction() (*dbTransaction, error) {
+ tx, err := db.sqldb.Begin()
+ if err != nil {
+ return nil, fmt.Errorf("failed to begin host db transaction. Cause: %w", err)
+ }
+
+ return &dbTransaction{
+ tx: tx,
+ }, nil
+}
+
+func (db *hostDB) Close() error {
+ if err := db.sqldb.Close(); err != nil {
+ return fmt.Errorf("failed to close host sql db - %w", err)
+ }
+ return nil
+}
+
+type dbTransaction struct {
+ tx *sql.Tx
+}
+
+func (b *dbTransaction) Write() error {
+ if err := b.tx.Commit(); err != nil {
+ return fmt.Errorf("failed to commit host db transaction. Cause: %w", err)
+ }
+ return nil
+}
diff --git a/go/host/storage/hostdb/rollup.go b/go/host/storage/hostdb/rollup.go
new file mode 100644
index 0000000000..6310c615bd
--- /dev/null
+++ b/go/host/storage/hostdb/rollup.go
@@ -0,0 +1,161 @@
+package hostdb
+
+import (
+ "database/sql"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/pkg/errors"
+ "github.com/ten-protocol/go-ten/go/common"
+ "github.com/ten-protocol/go-ten/go/common/errutil"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+)
+
+const (
+ selectExtRollup = "SELECT ext_rollup from rollup_host r"
+ selectLatestRollup = "SELECT ext_rollup FROM rollup_host ORDER BY time_stamp DESC LIMIT 1"
+)
+
+// AddRollup adds a rollup to the DB
+func AddRollup(dbtx *dbTransaction, statements *SQLStatements, rollup *common.ExtRollup, metadata *common.PublicRollupMetadata, block *common.L1Block) error {
+ extRollup, err := rlp.EncodeToBytes(rollup)
+ if err != nil {
+ return fmt.Errorf("could not encode rollup: %w", err)
+ }
+ _, err = dbtx.tx.Exec(statements.InsertRollup,
+ truncTo16(rollup.Header.Hash()), // short hash
+ metadata.FirstBatchSequence.Uint64(), // first batch sequence
+ rollup.Header.LastBatchSeqNo, // last batch sequence
+ metadata.StartTime, // timestamp
+ extRollup, // rollup blob
+ block.Hash(), // l1 block hash
+ )
+ if err != nil {
+ return fmt.Errorf("could not insert rollup. Cause: %w", err)
+ }
+ return nil
+}
+
+// GetRollupListing returns latest rollups given a pagination.
+// For example, offset 1, size 10 will return the latest 11-20 rollups.
+func GetRollupListing(db HostDB, pagination *common.QueryPagination) (*common.RollupListingResponse, error) {
+ rows, err := db.GetSQLDB().Query(db.GetSQLStatement().SelectRollups, pagination.Size, pagination.Offset)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ var rollups []common.PublicRollup
+
+ for rows.Next() {
+ var id, startSeq, endSeq, timeStamp int
+ var hash, extRollup, compressionBlock []byte
+
+ var rollup common.PublicRollup
+ err = rows.Scan(&id, &hash, &startSeq, &endSeq, &timeStamp, &extRollup, &compressionBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ extRollupDecoded := new(common.ExtRollup)
+ if err := rlp.DecodeBytes(extRollup, extRollupDecoded); err != nil {
+ return nil, fmt.Errorf("could not decode rollup header. Cause: %w", err)
+ }
+
+ rollup = common.PublicRollup{
+ ID: big.NewInt(int64(id)),
+ Hash: hash,
+ FirstSeq: big.NewInt(int64(startSeq)),
+ LastSeq: big.NewInt(int64(endSeq)),
+ Timestamp: uint64(timeStamp),
+ Header: extRollupDecoded.Header,
+ L1Hash: compressionBlock,
+ }
+ rollups = append(rollups, rollup)
+ }
+ if err = rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return &common.RollupListingResponse{
+ RollupsData: rollups,
+ Total: uint64(len(rollups)),
+ }, nil
+}
+
+func GetExtRollup(db HostDB, hash gethcommon.Hash) (*common.ExtRollup, error) {
+ whereQuery := " WHERE r.hash=" + db.GetSQLStatement().Placeholder
+ return fetchExtRollup(db.GetSQLDB(), whereQuery, truncTo16(hash))
+}
+
+// GetRollupHeader returns the rollup with the given hash.
+func GetRollupHeader(db HostDB, hash gethcommon.Hash) (*common.RollupHeader, error) {
+ whereQuery := " WHERE r.hash=" + db.GetSQLStatement().Placeholder
+ return fetchRollupHeader(db.GetSQLDB(), whereQuery, truncTo16(hash))
+}
+
+// GetRollupHeaderByBlock returns the rollup for the given block
+func GetRollupHeaderByBlock(db HostDB, blockHash gethcommon.Hash) (*common.RollupHeader, error) {
+ whereQuery := " WHERE r.compression_block=" + db.GetSQLStatement().Placeholder
+ return fetchRollupHeader(db.GetSQLDB(), whereQuery, blockHash)
+}
+
+// GetLatestRollup returns the latest rollup ordered by timestamp
+func GetLatestRollup(db *sql.DB) (*common.RollupHeader, error) {
+ extRollup, err := fetchHeadRollup(db)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch head rollup: %w", err)
+ }
+ return extRollup.Header, nil
+}
+
+func fetchRollupHeader(db *sql.DB, whereQuery string, args ...any) (*common.RollupHeader, error) {
+ rollup, err := fetchExtRollup(db, whereQuery, args...)
+ if err != nil {
+ return nil, err
+ }
+ return rollup.Header, nil
+}
+
+func fetchExtRollup(db *sql.DB, whereQuery string, args ...any) (*common.ExtRollup, error) {
+ var rollupBlob []byte
+ query := selectExtRollup + whereQuery
+ var err error
+ if len(args) > 0 {
+ err = db.QueryRow(query, args...).Scan(&rollupBlob)
+ } else {
+ err = db.QueryRow(query).Scan(&rollupBlob)
+ }
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, fmt.Errorf("failed to fetch rollup by hash: %w", err)
+ }
+ var rollup common.ExtRollup
+ err = rlp.DecodeBytes(rollupBlob, &rollup)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode rollup: %w", err)
+ }
+
+ return &rollup, nil
+}
+
+func fetchHeadRollup(db *sql.DB) (*common.ExtRollup, error) {
+ var extRollup []byte
+ err := db.QueryRow(selectLatestRollup).Scan(&extRollup)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, errutil.ErrNotFound
+ }
+ return nil, fmt.Errorf("failed to fetch rollup by hash: %w", err)
+ }
+ var rollup common.ExtRollup
+ err = rlp.DecodeBytes(extRollup, &rollup)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode rollup: %w", err)
+ }
+
+ return &rollup, nil
+}
diff --git a/go/host/storage/hostdb/rollup_test.go b/go/host/storage/hostdb/rollup_test.go
new file mode 100644
index 0000000000..1302966637
--- /dev/null
+++ b/go/host/storage/hostdb/rollup_test.go
@@ -0,0 +1,222 @@
+package hostdb
+
+import (
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ten-protocol/go-ten/go/common"
+)
+
+func TestCanStoreAndRetrieveRollup(t *testing.T) {
+ db, err := createSQLiteDB(t)
+ if err != nil {
+ t.Fatalf("unable to initialise test db: %s", err)
+ }
+
+ metadata := createRollupMetadata(batchNumber - 10)
+ rollup := createRollup(batchNumber)
+ block := common.L1Block{}
+ dbtx, _ := db.NewDBTransaction()
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup, &metadata, &block)
+ if err != nil {
+ t.Errorf("could not store rollup. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ extRollup, err := GetExtRollup(db, rollup.Header.Hash())
+ if err != nil {
+ t.Errorf("stored rollup but could not retrieve ext rollup. Cause: %s", err)
+ }
+
+ rollupHeader, err := GetRollupHeader(db, rollup.Header.Hash())
+ if err != nil {
+ t.Errorf("stored rollup but could not retrieve header. Cause: %s", err)
+ }
+ if big.NewInt(int64(rollupHeader.LastBatchSeqNo)).Cmp(big.NewInt(batchNumber)) != 0 {
+ t.Errorf("rollup header was not stored correctly")
+ }
+
+ if rollup.Hash() != extRollup.Hash() {
+ t.Errorf("rollup was not stored correctly")
+ }
+}
+
+func TestGetRollupByBlockHash(t *testing.T) {
+ db, err := createSQLiteDB(t)
+ if err != nil {
+ t.Fatalf("unable to initialise test db: %s", err)
+ }
+
+ metadata := createRollupMetadata(batchNumber - 10)
+ rollup := createRollup(batchNumber)
+ block := common.L1Block{}
+ dbtx, _ := db.NewDBTransaction()
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup, &metadata, &block)
+ if err != nil {
+ t.Errorf("could not store rollup. Cause: %s", err)
+ }
+ dbtx.Write()
+ rollupHeader, err := GetRollupHeaderByBlock(db, block.Hash())
+ if err != nil {
+ t.Errorf("stored rollup but could not retrieve header. Cause: %s", err)
+ }
+ if big.NewInt(int64(rollupHeader.LastBatchSeqNo)).Cmp(big.NewInt(batchNumber)) != 0 {
+ t.Errorf("rollup header was not stored correctly")
+ }
+}
+
+func TestGetLatestRollup(t *testing.T) {
+ db, err := createSQLiteDB(t)
+ if err != nil {
+ t.Fatalf("unable to initialise test db: %s", err)
+ }
+
+ rollup1FirstSeq := int64(batchNumber - 10)
+ rollup1LastSeq := int64(batchNumber)
+ metadata1 := createRollupMetadata(rollup1FirstSeq)
+ rollup1 := createRollup(rollup1LastSeq)
+ block := common.L1Block{}
+ dbtx, _ := db.NewDBTransaction()
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup1, &metadata1, &block)
+ if err != nil {
+ t.Errorf("could not store rollup. Cause: %s", err)
+ }
+ // Needed to increment the timestamp
+ time.Sleep(1 * time.Second)
+
+ rollup2FirstSeq := int64(batchNumber + 1)
+ rollup2LastSeq := int64(batchNumber + 10)
+ metadata2 := createRollupMetadata(rollup2FirstSeq)
+ rollup2 := createRollup(rollup2LastSeq)
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup2, &metadata2, &block)
+ if err != nil {
+ t.Errorf("could not store rollup 2. Cause: %s", err)
+ }
+ dbtx.Write()
+
+ latestHeader, err := GetLatestRollup(db.GetSQLDB())
+ if err != nil {
+ t.Errorf("could not get latest rollup. Cause: %s", err)
+ }
+
+ if latestHeader.LastBatchSeqNo != uint64(rollup2LastSeq) {
+ t.Errorf("latest rollup was not updated correctly")
+ }
+}
+
+func TestGetRollupListing(t *testing.T) {
+ db, err := createSQLiteDB(t)
+ if err != nil {
+ t.Fatalf("unable to initialise test db: %s", err)
+ }
+
+ rollup1FirstSeq := int64(batchNumber - 10)
+ rollup1LastSeq := int64(batchNumber)
+ metadata1 := createRollupMetadata(rollup1FirstSeq)
+ rollup1 := createRollup(rollup1LastSeq)
+ block := common.L1Block{}
+ dbtx, _ := db.NewDBTransaction()
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup1, &metadata1, &block)
+ if err != nil {
+ t.Errorf("could not store rollup. Cause: %s", err)
+ }
+
+ rollup2FirstSeq := int64(batchNumber + 1)
+ rollup2LastSeq := int64(batchNumber + 10)
+ metadata2 := createRollupMetadata(rollup2FirstSeq)
+ rollup2 := createRollup(rollup2LastSeq)
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup2, &metadata2, &block)
+ if err != nil {
+ t.Errorf("could not store rollup 2. Cause: %s", err)
+ }
+
+ rollup3FirstSeq := int64(batchNumber + 11)
+ rollup3LastSeq := int64(batchNumber + 20)
+ metadata3 := createRollupMetadata(rollup3FirstSeq)
+ rollup3 := createRollup(rollup3LastSeq)
+ err = AddRollup(dbtx, db.GetSQLStatement(), &rollup3, &metadata3, &block)
+ dbtx.Write()
+ if err != nil {
+ t.Errorf("could not store rollup 3. Cause: %s", err)
+ }
+
+ // page 1, size 2
+ rollupListing, err := GetRollupListing(db, &common.QueryPagination{Offset: 1, Size: 2})
+ if err != nil {
+ t.Errorf("could not get rollup listing. Cause: %s", err)
+ }
+
+ // should be two elements
+ if big.NewInt(int64(rollupListing.Total)).Cmp(big.NewInt(2)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // First element should be the second rollup
+ if rollupListing.RollupsData[0].LastSeq.Cmp(big.NewInt(rollup2LastSeq)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+ if rollupListing.RollupsData[0].FirstSeq.Cmp(big.NewInt(rollup2FirstSeq)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // page 0, size 3
+ rollupListing1, err := GetRollupListing(db, &common.QueryPagination{Offset: 0, Size: 3})
+ if err != nil {
+ t.Errorf("could not get rollup listing. Cause: %s", err)
+ }
+
+ // First element should be the most recent rollup since they're in descending order
+ if rollupListing1.RollupsData[0].LastSeq.Cmp(big.NewInt(rollup3LastSeq)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+ if rollupListing1.RollupsData[0].FirstSeq.Cmp(big.NewInt(rollup3FirstSeq)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(rollupListing1.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // page 0, size 4
+ rollupListing2, err := GetRollupListing(db, &common.QueryPagination{Offset: 0, Size: 4})
+ if err != nil {
+ t.Errorf("could not get rollup listing. Cause: %s", err)
+ }
+
+ // should be 3 elements
+ if big.NewInt(int64(rollupListing2.Total)).Cmp(big.NewInt(3)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+
+ // page 5, size 1
+ rollupListing3, err := GetRollupListing(db, &common.QueryPagination{Offset: 5, Size: 1})
+ if err != nil {
+ t.Errorf("could not get rollup listing. Cause: %s", err)
+ }
+
+ // should be 0 elements
+ if big.NewInt(int64(rollupListing3.Total)).Cmp(big.NewInt(0)) != 0 {
+ t.Errorf("rollup listing was not paginated correctly")
+ }
+}
+
+func createRollup(lastBatch int64) common.ExtRollup {
+ header := common.RollupHeader{
+ LastBatchSeqNo: uint64(lastBatch),
+ }
+
+ rollup := common.ExtRollup{
+ Header: &header,
+ }
+
+ return rollup
+}
+
+func createRollupMetadata(firstBatch int64) common.PublicRollupMetadata {
+ return common.PublicRollupMetadata{
+ FirstBatchSequence: big.NewInt(firstBatch),
+ StartTime: uint64(time.Now().Unix()),
+ }
+}
diff --git a/go/host/storage/hostdb/sql_statements.go b/go/host/storage/hostdb/sql_statements.go
new file mode 100644
index 0000000000..bf3d80c9f0
--- /dev/null
+++ b/go/host/storage/hostdb/sql_statements.go
@@ -0,0 +1,39 @@
+package hostdb
+
+// SQLStatements struct holds SQL statements for a specific database type
+type SQLStatements struct {
+ InsertBatch string
+ InsertTransactions string
+ InsertTxCount string
+ InsertRollup string
+ InsertBlock string
+ SelectRollups string
+ SelectBlocks string
+ Placeholder string
+}
+
+func SQLiteSQLStatements() *SQLStatements {
+ return &SQLStatements{
+ InsertBatch: "INSERT INTO batch_host (sequence, full_hash, hash, height, ext_batch) VALUES (?, ?, ?, ?, ?)",
+ InsertTransactions: "REPLACE INTO transactions_host (hash, b_sequence) VALUES (?, ?)",
+ InsertTxCount: "INSERT INTO transaction_count (id, total) VALUES (?, ?) ON CONFLICT(id) DO UPDATE SET total = EXCLUDED.total",
+ InsertRollup: "INSERT INTO rollup_host (hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block) values (?,?,?,?,?,?)",
+ InsertBlock: "REPLACE INTO block_host (hash, header, rollup_hash) values (?,?,?)",
+ SelectRollups: "SELECT id, hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block FROM rollup_host ORDER BY id DESC LIMIT ? OFFSET ?",
+ SelectBlocks: "SELECT id, hash, header, rollup_hash FROM block_host ORDER BY id DESC LIMIT ? OFFSET ?",
+ Placeholder: "?",
+ }
+}
+
+func PostgresSQLStatements() *SQLStatements {
+ return &SQLStatements{
+ InsertBatch: "INSERT INTO batch_host (sequence, full_hash, hash, height, ext_batch) VALUES ($1, $2, $3, $4, $5)",
+ InsertTransactions: "INSERT INTO transactions_host (hash, b_sequence) VALUES ($1, $2) ON CONFLICT (hash) DO NOTHING",
+ InsertTxCount: "INSERT INTO transaction_count (id, total) VALUES ($1, $2) ON CONFLICT (id) DO UPDATE SET total = EXCLUDED.total",
+ InsertRollup: "INSERT INTO rollup_host (hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block) values ($1, $2, $3, $4, $5, $6)",
+ InsertBlock: "INSERT INTO block_host (hash, header, rollup_hash) VALUES ($1, $2, $3) ON CONFLICT (hash) DO NOTHING",
+ SelectRollups: "SELECT id, hash, start_seq, end_seq, time_stamp, ext_rollup, compression_block FROM rollup_host ORDER BY id DESC LIMIT $1 OFFSET $2",
+ SelectBlocks: "SELECT id, hash, header, rollup_hash FROM block_host ORDER BY id DESC LIMIT $1 OFFSET $2",
+ Placeholder: "$1",
+ }
+}
diff --git a/go/host/storage/hostdb/utils.go b/go/host/storage/hostdb/utils.go
new file mode 100644
index 0000000000..f98140a274
--- /dev/null
+++ b/go/host/storage/hostdb/utils.go
@@ -0,0 +1,68 @@
+package hostdb
+
+import (
+ "testing"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ten-protocol/go-ten/go/host/storage/init/sqlite"
+)
+
+const truncHash = 16
+
+// An arbitrary number to put in the header
+const batchNumber = 777
+
+// truncTo16 checks if the leading half of the hash is filled with zeros and decides whether to truncate the first or last 16 bytes.
+func truncTo16(hash gethcommon.Hash) []byte {
+ hashBytes := hash.Bytes()
+ // Check if the first half of the hash is all zeros
+ if isLeadingHalfZeros(hashBytes) {
+ return truncLastTo16(hashBytes)
+ }
+ return truncFirstTo16(hashBytes)
+}
+
+// isLeadingHalfZeros checks if the leading half of the hash is all zeros.
+func isLeadingHalfZeros(bytes []byte) bool {
+ halfLength := len(bytes) / 2
+ for i := 0; i < halfLength; i++ {
+ if bytes[i] != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// truncLastTo16 truncates the last 16 bytes of the hash.
+func truncLastTo16(bytes []byte) []byte {
+ if len(bytes) == 0 {
+ return bytes
+ }
+ start := len(bytes) - truncHash
+ if start < 0 {
+ start = 0
+ }
+ b := bytes[start:]
+ c := make([]byte, truncHash)
+ copy(c, b)
+ return c
+}
+
+// truncFirstTo16 truncates the first 16 bytes of the hash.
+func truncFirstTo16(bytes []byte) []byte {
+ if len(bytes) == 0 {
+ return bytes
+ }
+ b := bytes[0:truncHash]
+ c := make([]byte, truncHash)
+ copy(c, b)
+ return c
+}
+
+func createSQLiteDB(t *testing.T) (HostDB, error) {
+ hostDB, err := sqlite.CreateTemporarySQLiteHostDB("", "mode=memory")
+ if err != nil {
+ t.Fatalf("unable to create temp sql db: %s", err)
+ }
+ return NewHostDB(hostDB, SQLiteSQLStatements())
+}
diff --git a/go/host/storage/init/postgres/001_init.sql b/go/host/storage/init/postgres/001_init.sql
new file mode 100644
index 0000000000..a37ae04929
--- /dev/null
+++ b/go/host/storage/init/postgres/001_init.sql
@@ -0,0 +1,53 @@
+CREATE TABLE IF NOT EXISTS block_host
+(
+ id SERIAL PRIMARY KEY,
+ hash BYTEA NOT NULL UNIQUE,
+ header BYTEA NOT NULL,
+ rollup_hash BYTEA NOT NULL
+);
+
+CREATE INDEX IF NOT EXISTS IDX_BLOCK_HASH_HOST ON block_host USING HASH (hash);
+
+CREATE TABLE IF NOT EXISTS rollup_host
+(
+ id SERIAL PRIMARY KEY,
+ hash BYTEA NOT NULL UNIQUE,
+ start_seq INT NOT NULL,
+ end_seq INT NOT NULL,
+ time_stamp INT NOT NULL,
+ ext_rollup BYTEA NOT NULL,
+ compression_block BYTEA NOT NULL
+);
+
+CREATE INDEX IF NOT EXISTS IDX_ROLLUP_HASH_HOST ON rollup_host USING HASH (hash);
+CREATE INDEX IF NOT EXISTS IDX_ROLLUP_PROOF_HOST ON rollup_host (compression_block);
+CREATE INDEX IF NOT EXISTS IDX_ROLLUP_SEQ_HOST ON rollup_host (start_seq, end_seq);
+
+CREATE TABLE IF NOT EXISTS batch_host
+(
+ sequence INT PRIMARY KEY,
+ full_hash BYTEA NOT NULL,
+ hash BYTEA NOT NULL UNIQUE,
+ height INT NOT NULL,
+ ext_batch BYTEA NOT NULL
+);
+
+CREATE INDEX IF NOT EXISTS IDX_BATCH_HEIGHT_HOST ON batch_host (height);
+
+CREATE TABLE IF NOT EXISTS transactions_host
+(
+ hash BYTEA PRIMARY KEY,
+ b_sequence INT,
+ FOREIGN KEY (b_sequence) REFERENCES batch_host(sequence)
+ );
+
+CREATE TABLE IF NOT EXISTS transaction_count
+(
+ id SERIAL PRIMARY KEY,
+ total INT NOT NULL
+);
+
+INSERT INTO transaction_count (id, total)
+VALUES (1, 0)
+ ON CONFLICT (id)
+DO NOTHING;
diff --git a/go/host/storage/init/postgres/postgres.go b/go/host/storage/init/postgres/postgres.go
new file mode 100644
index 0000000000..7b2601e850
--- /dev/null
+++ b/go/host/storage/init/postgres/postgres.go
@@ -0,0 +1,66 @@
+package postgres
+
+import (
+ "database/sql"
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/ten-protocol/go-ten/go/common/storage"
+
+ _ "github.com/lib/pq"
+)
+
+const (
+ defaultDatabase = "postgres"
+ maxDBPoolSize = 100
+)
+
+func CreatePostgresDBConnection(baseURL string, dbName string) (*sql.DB, error) {
+ if baseURL == "" {
+ return nil, fmt.Errorf("failed to prepare PostgreSQL connection - DB URL was not set on host config")
+ }
+ dbURL := baseURL + defaultDatabase
+
+ dbName = strings.ToLower(dbName)
+
+ db, err := sql.Open("postgres", dbURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to PostgreSQL server: %v", err)
+ }
+ defer db.Close() // Close the connection when done
+
+ rows, err := db.Query("SELECT 1 FROM pg_database WHERE datname = $1", dbName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query database existence: %v", err)
+ }
+ defer rows.Close()
+
+ if !rows.Next() {
+ _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbName))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create database %s: %v", dbName, err)
+ }
+ }
+
+ dbURL = fmt.Sprintf("%s%s", baseURL, dbName)
+
+ db, err = sql.Open("postgres", dbURL)
+ db.SetMaxOpenConns(maxDBPoolSize)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to PostgreSQL database %s: %v", dbName, err)
+ }
+
+ _, filename, _, ok := runtime.Caller(0)
+ if !ok {
+ return nil, fmt.Errorf("failed to get current directory")
+ }
+ migrationsDir := filepath.Dir(filename)
+
+ if err = storage.ApplyMigrations(db, migrationsDir); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
diff --git a/go/host/storage/init/sqlite/host_sqlite_init.sql b/go/host/storage/init/sqlite/host_sqlite_init.sql
new file mode 100644
index 0000000000..1a31ca8d53
--- /dev/null
+++ b/go/host/storage/init/sqlite/host_sqlite_init.sql
@@ -0,0 +1,49 @@
+create table if not exists block_host
+(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ hash binary(32) NOT NULL UNIQUE,
+ header blob NOT NULL,
+ rollup_hash binary(32) NOT NULL
+);
+
+create index IDX_BLOCK_HASH_HOST on block_host (hash);
+
+create table if not exists rollup_host
+(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ hash binary(16) NOT NULL UNIQUE,
+ start_seq int NOT NULL,
+ end_seq int NOT NULL,
+ time_stamp int NOT NULL,
+ ext_rollup blob NOT NULL,
+ compression_block binary(32) NOT NULL
+);
+
+create index IDX_ROLLUP_HASH_HOST on rollup_host (hash);
+create index IDX_ROLLUP_PROOF_HOST on rollup_host (compression_block);
+create index IDX_ROLLUP_SEQ_HOST on rollup_host (start_seq, end_seq);
+
+create table if not exists batch_host
+(
+ sequence int primary key,
+ full_hash binary(32) NOT NULL,
+ hash binary(16) NOT NULL unique,
+ height int NOT NULL,
+ ext_batch mediumblob NOT NULL
+);
+create index IDX_BATCH_HEIGHT_HOST on batch_host (height);
+
+create table if not exists transactions_host
+(
+ hash binary(32) primary key,
+ b_sequence int REFERENCES batch_host
+);
+
+create table if not exists transaction_count
+(
+ id int NOT NULL primary key,
+ total int NOT NULL
+);
+
+insert into transaction_count (id, total)
+values (1, 0) on CONFLICT (id) DO NOTHING;
\ No newline at end of file
diff --git a/go/host/storage/init/sqlite/sqlite.go b/go/host/storage/init/sqlite/sqlite.go
new file mode 100644
index 0000000000..2aabe7f401
--- /dev/null
+++ b/go/host/storage/init/sqlite/sqlite.go
@@ -0,0 +1,70 @@
+package sqlite
+
+import (
+ "database/sql"
+ "embed"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/ten-protocol/go-ten/go/common"
+
+ _ "github.com/mattn/go-sqlite3" // this imports the sqlite driver to make the sql.Open() connection work
+)
+
+const (
+ tempDirName = "ten-persistence"
+ initFile = "host_sqlite_init.sql"
+)
+
+//go:embed *.sql
+var sqlFiles embed.FS
+
+// CreateTemporarySQLiteHostDB if dbPath is empty will use a random throwaway temp file,
+// otherwise dbPath is a filepath for the sqldb file, allows for tests that care about persistence between restarts
+func CreateTemporarySQLiteHostDB(dbPath string, dbOptions string) (*sql.DB, error) {
+ if dbPath == "" {
+ tempPath, err := CreateTempDBFile("host.db")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temp sqlite DB file - %w", err)
+ }
+ dbPath = tempPath
+ }
+
+ db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?%s", dbPath, dbOptions))
+ if err != nil {
+ return nil, fmt.Errorf("couldn't open sqlite db - %w", err)
+ }
+
+ // Sqlite fails with table locks when there are multiple connections
+ db.SetMaxOpenConns(1)
+
+ err = initialiseDB(db, initFile)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't initialise db - %w", err)
+ }
+ return db, nil
+}
+
+func initialiseDB(db *sql.DB, initFile string) error {
+ sqlInitFile, err := sqlFiles.ReadFile(initFile)
+ if err != nil {
+ return err
+ }
+
+ _, err = db.Exec(string(sqlInitFile))
+ if err != nil {
+ return fmt.Errorf("failed to initialise sqlite %s - %w", sqlInitFile, err)
+ }
+ return nil
+}
+
+func CreateTempDBFile(dbname string) (string, error) {
+ tempDir := filepath.Join("/tmp", tempDirName, common.RandomStr(5))
+ err := os.MkdirAll(tempDir, os.ModePerm)
+ if err != nil {
+ return "", fmt.Errorf("failed to create sqlite temp dir - %w", err)
+ }
+ tempFile := filepath.Join(tempDir, dbname)
+ return tempFile, nil
+}
diff --git a/go/host/storage/interfaces.go b/go/host/storage/interfaces.go
new file mode 100644
index 0000000000..d40845d947
--- /dev/null
+++ b/go/host/storage/interfaces.go
@@ -0,0 +1,58 @@
+package storage
+
+import (
+ "io"
+ "math/big"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ten-protocol/go-ten/go/common"
+)
+
+type Storage interface {
+ BatchResolver
+ BlockResolver
+ io.Closer
+}
+
+type BatchResolver interface {
+ // AddBatch stores the batch
+ AddBatch(batch *common.ExtBatch) error
+ // FetchBatchBySeqNo returns the batch with the given seq number
+ FetchBatchBySeqNo(seqNum uint64) (*common.ExtBatch, error)
+ // FetchBatchHashByHeight returns the batch hash given the batch number
+ FetchBatchHashByHeight(number *big.Int) (*gethcommon.Hash, error)
+ // FetchBatchHeaderByHash returns the batch header given its hash
+ FetchBatchHeaderByHash(hash gethcommon.Hash) (*common.BatchHeader, error)
+ // FetchHeadBatchHeader returns the latest batch header
+ FetchHeadBatchHeader() (*common.BatchHeader, error)
+ // FetchPublicBatchByHash returns the public batch
+ FetchPublicBatchByHash(batchHash common.L2BatchHash) (*common.PublicBatch, error)
+ // FetchBatch returns the `ExtBatch` with the given hash
+ FetchBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error)
+ // FetchBatchByTx returns the `ExtBatch` with the given tx hash
+ FetchBatchByTx(txHash gethcommon.Hash) (*common.ExtBatch, error)
+ // FetchLatestBatch returns the head `BatchHeader`
+ FetchLatestBatch() (*common.BatchHeader, error)
+ // FetchBatchListing returns a paginated list of the public batch data
+ FetchBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error)
+ // FetchBatchListingDeprecated backwards compatible API to return batch data
+ FetchBatchListingDeprecated(pagination *common.QueryPagination) (*common.BatchListingResponseDeprecated, error)
+ // FetchBatchHeaderByHeight returns the `BatchHeader` with the given height
+ FetchBatchHeaderByHeight(height *big.Int) (*common.BatchHeader, error)
+ // FetchTotalTxCount returns the number of transactions in the DB
+ FetchTotalTxCount() (*big.Int, error)
+}
+
+type BlockResolver interface {
+ // AddBlock stores block data containing rollups in the host DB
+ AddBlock(b *types.Header, rollupHash common.L2RollupHash) error
+ // AddRollup stores a rollup in the host DB
+ AddRollup(rollup *common.ExtRollup, metadata *common.PublicRollupMetadata, block *common.L1Block) error
+ // FetchLatestRollupHeader returns the head `RollupHeader`
+ FetchLatestRollupHeader() (*common.RollupHeader, error)
+ // FetchRollupListing returns a paginated list of rollups
+ FetchRollupListing(pagination *common.QueryPagination) (*common.RollupListingResponse, error)
+ // FetchBlockListing returns a paginated list of blocks that include rollups
+ FetchBlockListing(pagination *common.QueryPagination) (*common.BlockListingResponse, error)
+}
diff --git a/go/host/storage/storage.go b/go/host/storage/storage.go
new file mode 100644
index 0000000000..9737c9164e
--- /dev/null
+++ b/go/host/storage/storage.go
@@ -0,0 +1,161 @@
+package storage
+
+import (
+ "fmt"
+ "io"
+ "math/big"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ gethlog "github.com/ethereum/go-ethereum/log"
+ "github.com/ten-protocol/go-ten/go/common"
+ "github.com/ten-protocol/go-ten/go/common/errutil"
+ "github.com/ten-protocol/go-ten/go/common/log"
+ "github.com/ten-protocol/go-ten/go/config"
+ "github.com/ten-protocol/go-ten/go/host/storage/hostdb"
+)
+
+type storageImpl struct {
+ db hostdb.HostDB
+ logger gethlog.Logger
+ io.Closer
+}
+
+func (s *storageImpl) AddBatch(batch *common.ExtBatch) error {
+ // Check if the Batch is already stored
+ _, err := hostdb.GetBatchHeader(s.db, batch.Hash())
+ if err == nil {
+ return errutil.ErrAlreadyExists
+ }
+
+ dbtx, err := s.db.NewDBTransaction()
+ if err != nil {
+ return err
+ }
+
+ if err := hostdb.AddBatch(dbtx, s.db.GetSQLStatement(), batch); err != nil {
+ return fmt.Errorf("could not add batch to host. Cause: %w", err)
+ }
+
+ if err := dbtx.Write(); err != nil {
+ return fmt.Errorf("could not commit batch tx. Cause: %w", err)
+ }
+ return nil
+}
+
+func (s *storageImpl) AddRollup(rollup *common.ExtRollup, metadata *common.PublicRollupMetadata, block *common.L1Block) error {
+ // Check if the Header is already stored
+ _, err := hostdb.GetRollupHeader(s.db, rollup.Header.Hash())
+ if err == nil {
+ return errutil.ErrAlreadyExists
+ }
+
+ dbtx, err := s.db.NewDBTransaction()
+ if err != nil {
+ return err
+ }
+
+ if err := hostdb.AddRollup(dbtx, s.db.GetSQLStatement(), rollup, metadata, block); err != nil {
+ return fmt.Errorf("could not add rollup to host. Cause: %w", err)
+ }
+
+ if err := dbtx.Write(); err != nil {
+ return fmt.Errorf("could not commit rollup tx. Cause %w", err)
+ }
+ return nil
+}
+
+func (s *storageImpl) AddBlock(b *types.Header, rollupHash common.L2RollupHash) error {
+ dbtx, err := s.db.NewDBTransaction()
+ if err != nil {
+ return err
+ }
+
+ if err := hostdb.AddBlock(dbtx, s.db.GetSQLStatement(), b, rollupHash); err != nil {
+ return fmt.Errorf("could not add block to host. Cause: %w", err)
+ }
+
+ if err := dbtx.Write(); err != nil {
+ return fmt.Errorf("could not commit block tx. Cause %w", err)
+ }
+ return nil
+}
+
+func (s *storageImpl) FetchBatchBySeqNo(seqNum uint64) (*common.ExtBatch, error) {
+ return hostdb.GetBatchBySequenceNumber(s.db, seqNum)
+}
+
+func (s *storageImpl) FetchBatchHashByHeight(number *big.Int) (*gethcommon.Hash, error) {
+ return hostdb.GetBatchHashByNumber(s.db, number)
+}
+
+func (s *storageImpl) FetchBatchHeaderByHash(hash gethcommon.Hash) (*common.BatchHeader, error) {
+ return hostdb.GetBatchHeader(s.db, hash)
+}
+
+func (s *storageImpl) FetchHeadBatchHeader() (*common.BatchHeader, error) {
+ return hostdb.GetHeadBatchHeader(s.db.GetSQLDB())
+}
+
+func (s *storageImpl) FetchPublicBatchByHash(batchHash common.L2BatchHash) (*common.PublicBatch, error) {
+ return hostdb.GetPublicBatch(s.db, batchHash)
+}
+
+func (s *storageImpl) FetchBatch(batchHash gethcommon.Hash) (*common.ExtBatch, error) {
+ return hostdb.GetBatchByHash(s.db, batchHash)
+}
+
+func (s *storageImpl) FetchBatchByTx(txHash gethcommon.Hash) (*common.ExtBatch, error) {
+ return hostdb.GetBatchByTx(s.db, txHash)
+}
+
+func (s *storageImpl) FetchLatestBatch() (*common.BatchHeader, error) {
+ return hostdb.GetLatestBatch(s.db.GetSQLDB())
+}
+
+func (s *storageImpl) FetchBatchHeaderByHeight(height *big.Int) (*common.BatchHeader, error) {
+ return hostdb.GetBatchByHeight(s.db, height)
+}
+
+func (s *storageImpl) FetchBatchListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
+ return hostdb.GetBatchListing(s.db, pagination)
+}
+
+func (s *storageImpl) FetchBatchListingDeprecated(pagination *common.QueryPagination) (*common.BatchListingResponseDeprecated, error) {
+ return hostdb.GetBatchListingDeprecated(s.db, pagination)
+}
+
+func (s *storageImpl) FetchLatestRollupHeader() (*common.RollupHeader, error) {
+ return hostdb.GetLatestRollup(s.db.GetSQLDB())
+}
+
+func (s *storageImpl) FetchRollupListing(pagination *common.QueryPagination) (*common.RollupListingResponse, error) {
+ return hostdb.GetRollupListing(s.db, pagination)
+}
+
+func (s *storageImpl) FetchBlockListing(pagination *common.QueryPagination) (*common.BlockListingResponse, error) {
+ return hostdb.GetBlockListing(s.db, pagination)
+}
+
+func (s *storageImpl) FetchTotalTxCount() (*big.Int, error) {
+ return hostdb.GetTotalTxCount(s.db.GetSQLDB())
+}
+
+func (s *storageImpl) Close() error {
+ return s.db.GetSQLDB().Close()
+}
+
+func NewHostStorageFromConfig(config *config.HostConfig, logger gethlog.Logger) Storage {
+ backingDB, err := CreateDBFromConfig(config, logger)
+ if err != nil {
+ logger.Crit("Failed to connect to backing database", log.ErrKey, err)
+ }
+ return NewStorage(backingDB, logger)
+}
+
+func NewStorage(backingDB hostdb.HostDB, logger gethlog.Logger) Storage {
+ return &storageImpl{
+ db: backingDB,
+ logger: logger,
+ }
+}
diff --git a/go/node/cmd/cli.go b/go/node/cmd/cli.go
index e2b9d737af..83fdf0e716 100644
--- a/go/node/cmd/cli.go
+++ b/go/node/cmd/cli.go
@@ -45,6 +45,7 @@ type NodeConfigCLI struct {
maxBatchInterval string // format like 500ms or 2s (any time parsable by time.ParseDuration())
rollupInterval string // format like 500ms or 2s (any time parsable by time.ParseDuration())
l1ChainID int
+ postgresDBHost string
}
// ParseConfigCLI returns a NodeConfigCLI based the cli params and defaults.
@@ -81,6 +82,7 @@ func ParseConfigCLI() *NodeConfigCLI {
maxBatchInterval := flag.String(maxBatchIntervalFlag, "1s", flagUsageMap[maxBatchIntervalFlag])
rollupInterval := flag.String(rollupIntervalFlag, "3s", flagUsageMap[rollupIntervalFlag])
l1ChainID := flag.Int(l1ChainIDFlag, 1337, flagUsageMap[l1ChainIDFlag])
+ postgresDBHost := flag.String(postgresDBHostFlag, "dd", flagUsageMap[postgresDBHostFlag])
flag.Parse()
cfg.nodeName = *nodeName
@@ -112,6 +114,7 @@ func ParseConfigCLI() *NodeConfigCLI {
cfg.maxBatchInterval = *maxBatchInterval
cfg.rollupInterval = *rollupInterval
cfg.l1ChainID = *l1ChainID
+ cfg.postgresDBHost = *postgresDBHost
cfg.nodeAction = flag.Arg(0)
if !validateNodeAction(cfg.nodeAction) {
diff --git a/go/node/cmd/cli_flags.go b/go/node/cmd/cli_flags.go
index 9e96bf16cf..2840fc0158 100644
--- a/go/node/cmd/cli_flags.go
+++ b/go/node/cmd/cli_flags.go
@@ -31,6 +31,7 @@ const (
maxBatchIntervalFlag = "max_batch_interval"
rollupIntervalFlag = "rollup_interval"
l1ChainIDFlag = "l1_chain_id"
+ postgresDBHostFlag = "postgres_db_host"
)
// Returns a map of the flag usages.
@@ -66,5 +67,6 @@ func getFlagUsageMap() map[string]string {
maxBatchIntervalFlag: "Max interval between batches, if greater than batchInterval then some empty batches will be skipped. Can be formatted like 500ms or 1s",
rollupIntervalFlag: "Duration between each rollup. Can be formatted like 500ms or 1s",
l1ChainIDFlag: "Chain ID of the L1 network",
+ postgresDBHostFlag: "Host connection details for Postgres DB",
}
}
diff --git a/go/node/cmd/main.go b/go/node/cmd/main.go
index 145222bd68..1061a0ccc5 100644
--- a/go/node/cmd/main.go
+++ b/go/node/cmd/main.go
@@ -37,6 +37,7 @@ func main() {
node.WithMaxBatchInterval(cliConfig.maxBatchInterval),
node.WithRollupInterval(cliConfig.rollupInterval),
node.WithL1ChainID(cliConfig.l1ChainID),
+ node.WithPostgresDBHost(cliConfig.postgresDBHost),
)
dockerNode := node.NewDockerNode(nodeCfg)
diff --git a/go/node/config.go b/go/node/config.go
index 3c406f402a..7d0398c655 100644
--- a/go/node/config.go
+++ b/go/node/config.go
@@ -44,6 +44,7 @@ type Config struct {
enclaveDebug bool
nodeName string
hostInMemDB bool
+ postgresDB string
debugNamespaceEnabled bool
profilerEnabled bool
coinbaseAddress string
@@ -126,6 +127,7 @@ func (c *Config) ToHostConfig() *config.HostInputConfig {
cfg.IsInboundP2PDisabled = c.isInboundP2PDisabled
cfg.L1BlockTime = c.l1BlockTime
cfg.L1ChainID = int64(c.l1ChainID)
+ cfg.PostgresDBHost = c.postgresDB
return cfg
}
@@ -341,3 +343,9 @@ func WithObscuroGenesis(g string) Option {
c.obscuroGenesis = g
}
}
+
+func WithPostgresDBHost(g string) Option {
+ return func(c *Config) {
+ c.postgresDB = g
+ }
+}
diff --git a/go/node/docker_node.go b/go/node/docker_node.go
index 4a797aaffc..8c2e39195b 100644
--- a/go/node/docker_node.go
+++ b/go/node/docker_node.go
@@ -3,9 +3,9 @@ package node
import (
"fmt"
- "github.com/ethereum/go-ethereum/log"
"github.com/sanity-io/litter"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ten-protocol/go-ten/go/common/docker"
)
@@ -124,7 +124,7 @@ func (d *DockerNode) startHost() error {
fmt.Sprintf("-l1ChainID=%d", d.cfg.l1ChainID),
}
if !d.cfg.hostInMemDB {
- cmd = append(cmd, "-levelDBPath", _hostDataDir)
+ cmd = append(cmd, "-postgresDBHost", d.cfg.postgresDB)
}
exposedPorts := []int{
diff --git a/go/obsclient/obsclient.go b/go/obsclient/obsclient.go
index b16021f49f..30a68f47b4 100644
--- a/go/obsclient/obsclient.go
+++ b/go/obsclient/obsclient.go
@@ -59,7 +59,7 @@ func (oc *ObsClient) BatchNumber() (uint64, error) {
// BatchByHash returns the batch with the given hash.
func (oc *ObsClient) BatchByHash(hash gethcommon.Hash) (*common.ExtBatch, error) {
var batch *common.ExtBatch
- err := oc.rpcClient.Call(&batch, rpc.GetFullBatchByHash, hash)
+ err := oc.rpcClient.Call(&batch, rpc.GetBatch, hash)
if err == nil && batch == nil {
err = ethereum.NotFound
}
@@ -112,14 +112,14 @@ func (oc *ObsClient) GetTotalContractCount() (int, error) {
// GetTotalTransactionCount returns the total count of executed transactions
func (oc *ObsClient) GetTotalTransactionCount() (int, error) {
var count int
- err := oc.rpcClient.Call(&count, rpc.GetTotalTransactionCount)
+ err := oc.rpcClient.Call(&count, rpc.GetTotalTxCount)
if err != nil {
return 0, err
}
return count, nil
}
-// GetLatestRollupHeader returns the header of the rollup at tip
+// GetLatestRollupHeader returns the header of the latest rollup
func (oc *ObsClient) GetLatestRollupHeader() (*common.RollupHeader, error) {
var header *common.RollupHeader
err := oc.rpcClient.Call(&header, rpc.GetLatestRollupHeader)
@@ -129,6 +129,16 @@ func (oc *ObsClient) GetLatestRollupHeader() (*common.RollupHeader, error) {
return header, nil
}
+// GetLatestBatch returns the header of the latest rollup at tip
+func (oc *ObsClient) GetLatestBatch() (*common.BatchHeader, error) {
+ var header *common.BatchHeader
+ err := oc.rpcClient.Call(&header, rpc.GetLatestBatch)
+ if err != nil {
+ return nil, err
+ }
+ return header, nil
+}
+
// GetPublicTxListing returns a list of public transactions
func (oc *ObsClient) GetPublicTxListing(pagination *common.QueryPagination) (*common.TransactionListingResponse, error) {
var result common.TransactionListingResponse
@@ -142,6 +152,16 @@ func (oc *ObsClient) GetPublicTxListing(pagination *common.QueryPagination) (*co
// GetBatchesListing returns a list of batches
func (oc *ObsClient) GetBatchesListing(pagination *common.QueryPagination) (*common.BatchListingResponse, error) {
var result common.BatchListingResponse
+ err := oc.rpcClient.Call(&result, rpc.GetBatchListingNew, pagination)
+ if err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+// GetBatchesListingDeprecated returns a list of batches
+func (oc *ObsClient) GetBatchesListingDeprecated(pagination *common.QueryPagination) (*common.BatchListingResponseDeprecated, error) {
+ var result common.BatchListingResponseDeprecated
err := oc.rpcClient.Call(&result, rpc.GetBatchListing, pagination)
if err != nil {
return nil, err
@@ -159,6 +179,16 @@ func (oc *ObsClient) GetBlockListing(pagination *common.QueryPagination) (*commo
return &result, nil
}
+// GetRollupListing returns a list of Rollups
+func (oc *ObsClient) GetRollupListing(pagination *common.QueryPagination) (*common.RollupListingResponse, error) {
+ var result common.RollupListingResponse
+ err := oc.rpcClient.Call(&result, rpc.GetRollupListing, pagination)
+ if err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
// GetConfig returns the network config for obscuro
func (oc *ObsClient) GetConfig() (*common.ObscuroNetworkInfo, error) {
var result common.ObscuroNetworkInfo
diff --git a/go/rpc/client.go b/go/rpc/client.go
index fbb2408702..d1e54964ce 100644
--- a/go/rpc/client.go
+++ b/go/rpc/client.go
@@ -26,27 +26,24 @@ const (
Health = "obscuro_health"
Config = "obscuro_config"
- GetBlockHeaderByHash = "tenscan_getBlockHeaderByHash"
- GetBatch = "tenscan_getBatch"
- GetBatchForTx = "tenscan_getBatchForTx"
- GetLatestTxs = "tenscan_getLatestTransactions"
- GetTotalTxs = "tenscan_getTotalTransactions"
- Attestation = "tenscan_attestation"
StopHost = "test_stopHost"
SubscribeNamespace = "eth"
SubscriptionTypeLogs = "logs"
SubscriptionTypeNewHeads = "newHeads"
- // GetL1RollupHeaderByHash = "scan_getL1RollupHeaderByHash"
- // GetActiveNodeCount = "scan_getActiveNodeCount"
-
+ GetBatchByTx = "scan_getBatchByTx"
GetLatestRollupHeader = "scan_getLatestRollupHeader"
- GetTotalTransactionCount = "scan_getTotalTransactionCount"
+ GetTotalTxCount = "scan_getTotalTransactionCount"
GetTotalContractCount = "scan_getTotalContractCount"
GetPublicTransactionData = "scan_getPublicTransactionData"
GetBatchListing = "scan_getBatchListing"
+ GetBatchListingNew = "scan_getBatchListingNew"
GetBlockListing = "scan_getBlockListing"
- GetFullBatchByHash = "scan_getBatchByHash"
+ GetRollupListing = "scan_getRollupListing"
+ GetBatch = "scan_getBatch"
+ GetLatestBatch = "scan_getLatestBatch"
+ GetPublicBatchByHash = "scan_getPublicBatchByHash"
+ GetBatchByHeight = "scan_getBatchByHeight"
)
// Client is used by client applications to interact with the Ten node
diff --git a/integration/manualtests/client_test.go b/integration/manualtests/client_test.go
index 5643cd8e90..73d067c51f 100644
--- a/integration/manualtests/client_test.go
+++ b/integration/manualtests/client_test.go
@@ -22,11 +22,11 @@ func TestClientGetRollup(t *testing.T) {
obsClient := obsclient.NewObsClient(client)
- rollupHeader, err := obsClient.BatchHeaderByNumber(big.NewInt(4392))
+ batchHeader, err := obsClient.BatchHeaderByNumber(big.NewInt(4392))
assert.Nil(t, err)
var rollup *common.ExtRollup
- err = client.Call(&rollup, rpc.GetBatch, rollupHeader.Hash())
+ err = client.Call(&rollup, rpc.GetBatch, batchHeader.Hash())
assert.Nil(t, err)
}
diff --git a/integration/networktest/actions/publicdata/tenscan_data.go b/integration/networktest/actions/publicdata/tenscan_data.go
index c4c9e1d7f0..98f8108a0e 100644
--- a/integration/networktest/actions/publicdata/tenscan_data.go
+++ b/integration/networktest/actions/publicdata/tenscan_data.go
@@ -32,7 +32,7 @@ func VerifyBatchesDataAction() networktest.Action {
if batchListing.Total <= 10 {
return fmt.Errorf("expected more than 10 batches, got %d", batchListing.Total)
}
- if batchListing.BatchesData[0].Number.Cmp(batchListing.BatchesData[1].Number) < 0 {
+ if batchListing.BatchesData[0].Height.Cmp(batchListing.BatchesData[1].Height) < 0 {
return fmt.Errorf("expected batches to be sorted by height descending")
}
diff --git a/integration/simulation/devnetwork/node.go b/integration/simulation/devnetwork/node.go
index 0c92b9a13b..3cffcfb53a 100644
--- a/integration/simulation/devnetwork/node.go
+++ b/integration/simulation/devnetwork/node.go
@@ -2,7 +2,6 @@ package devnetwork
import (
"fmt"
- "os"
"github.com/ten-protocol/go-ten/lib/gethfork/node"
@@ -57,7 +56,6 @@ type InMemNodeOperator struct {
enclaves []*enclavecontainer.EnclaveContainer
l1Wallet wallet.Wallet
enclaveDBFilepaths []string // 1 per enclave
- hostDBFilepath string
}
func (n *InMemNodeOperator) StopHost() error {
@@ -144,13 +142,14 @@ func (n *InMemNodeOperator) createHostContainer() *hostcontainer.HostContainer {
L1ChainID: integration.EthereumChainID,
ObscuroChainID: integration.TenChainID,
L1StartHash: n.l1Data.TenStartBlock,
- UseInMemoryDB: false,
- LevelDBPath: n.hostDBFilepath,
- DebugNamespaceEnabled: true,
- BatchInterval: n.config.BatchInterval,
- RollupInterval: n.config.RollupInterval,
- L1BlockTime: n.config.L1BlockTime,
- MaxRollupSize: 1024 * 64,
+ SequencerID: n.config.SequencerID,
+ // Can provide the postgres db host if testing against a local DB instance
+ UseInMemoryDB: true,
+ DebugNamespaceEnabled: true,
+ BatchInterval: n.config.BatchInterval,
+ RollupInterval: n.config.RollupInterval,
+ L1BlockTime: n.config.L1BlockTime,
+ MaxRollupSize: 1024 * 64,
}
hostLogger := testlog.Logger().New(log.NodeIDKey, n.l1Wallet.Address(), log.CmpKey, log.HostCmp)
@@ -278,10 +277,6 @@ func NewInMemNodeOperator(operatorIdx int, config *TenConfig, nodeType common.No
}
sqliteDBPaths[i] = sqliteDBPath
}
- levelDBPath, err := os.MkdirTemp("", "levelDB_*")
- if err != nil {
- panic("failed to create temp levelDBPath")
- }
l1Nonce, err := l1Client.Nonce(l1Wallet.Address())
if err != nil {
@@ -298,6 +293,5 @@ func NewInMemNodeOperator(operatorIdx int, config *TenConfig, nodeType common.No
l1Wallet: l1Wallet,
logger: logger,
enclaveDBFilepaths: sqliteDBPaths,
- hostDBFilepath: levelDBPath,
}
}
diff --git a/integration/simulation/network/network_utils.go b/integration/simulation/network/network_utils.go
index 12962c8be6..9b6c3f1aa3 100644
--- a/integration/simulation/network/network_utils.go
+++ b/integration/simulation/network/network_utils.go
@@ -76,6 +76,7 @@ func createInMemObscuroNode(
BatchInterval: batchInterval,
IsInboundP2PDisabled: incomingP2PDisabled,
L1BlockTime: l1BlockTime,
+ UseInMemoryDB: true,
}
enclaveConfig := &config.EnclaveConfig{
diff --git a/integration/simulation/network/socket.go b/integration/simulation/network/socket.go
index 71d9fa2cc9..7831bfeab6 100644
--- a/integration/simulation/network/socket.go
+++ b/integration/simulation/network/socket.go
@@ -160,7 +160,8 @@ func (n *networkOfSocketNodes) createConnections(simParams *params.SimParams) er
// create a connection to the newly created nodes - panic if no connection is made after some time
startTime := time.Now()
for connected := false; !connected; time.Sleep(500 * time.Millisecond) {
- client, err = rpc.NewNetworkClient(fmt.Sprintf("ws://127.0.0.1:%d", simParams.StartPort+integration.DefaultHostRPCWSOffset+i))
+ port := simParams.StartPort + integration.DefaultHostRPCWSOffset + i
+ client, err = rpc.NewNetworkClient(fmt.Sprintf("ws://127.0.0.1:%d", port))
connected = err == nil // The client cannot be created until the node has started.
if time.Now().After(startTime.Add(2 * time.Minute)) {
return fmt.Errorf("failed to create a connect to node after 2 minute - %w", err)
diff --git a/integration/simulation/p2p/in_mem_obscuro_client.go b/integration/simulation/p2p/in_mem_obscuro_client.go
index 02236be3d0..6db8332e1c 100644
--- a/integration/simulation/p2p/in_mem_obscuro_client.go
+++ b/integration/simulation/p2p/in_mem_obscuro_client.go
@@ -33,7 +33,7 @@ type inMemObscuroClient struct {
obscuroAPI *clientapi.ObscuroAPI
ethAPI *clientapi.EthereumAPI
filterAPI *clientapi.FilterAPI
- tenScanAPI *clientapi.TenScanAPI
+ tenScanAPI *clientapi.ScanAPI
testAPI *clientapi.TestAPI
enclavePublicKey *ecies.PublicKey
}
@@ -51,7 +51,7 @@ func NewInMemObscuroClient(hostContainer *container.HostContainer) rpc.Client {
obscuroAPI: clientapi.NewObscuroAPI(hostContainer.Host()),
ethAPI: clientapi.NewEthereumAPI(hostContainer.Host(), logger),
filterAPI: clientapi.NewFilterAPI(hostContainer.Host(), logger),
- tenScanAPI: clientapi.NewTenScanAPI(hostContainer.Host()),
+ tenScanAPI: clientapi.NewScanAPI(hostContainer.Host(), logger),
testAPI: clientapi.NewTestAPI(hostContainer),
enclavePublicKey: enclPubKey,
}
@@ -94,18 +94,24 @@ func (c *inMemObscuroClient) Call(result interface{}, method string, args ...int
case rpc.Health:
return c.health(result)
- case rpc.GetTotalTxs:
+ case rpc.GetTotalTxCount:
return c.getTotalTransactions(result)
- case rpc.GetLatestTxs:
- return c.getLatestTransactions(result, args)
-
- case rpc.GetBatchForTx:
- return c.getBatchForTx(result, args)
+ case rpc.GetBatchByTx:
+ return c.getBatchByTx(result, args)
case rpc.GetBatch:
return c.getBatch(result, args)
+ case rpc.GetBatchListing:
+ return c.getBatchListingDeprecated(result, args)
+
+ case rpc.GetRollupListing:
+ return c.getRollupListing(result, args)
+
+ case rpc.GetPublicTransactionData:
+ return c.getPublicTransactionData(result, args)
+
default:
return fmt.Errorf("RPC method %s is unknown", method)
}
@@ -277,66 +283,114 @@ func (c *inMemObscuroClient) health(result interface{}) error {
}
func (c *inMemObscuroClient) getTotalTransactions(result interface{}) error {
- totalTxs, err := c.tenScanAPI.GetTotalTransactions()
+ totalTxs, err := c.tenScanAPI.GetTotalTransactionCount()
if err != nil {
- return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetTotalTxs, err)
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetTotalTxCount, err)
}
*result.(**big.Int) = totalTxs
return nil
}
-func (c *inMemObscuroClient) getLatestTransactions(result interface{}, args []interface{}) error {
+func (c *inMemObscuroClient) getBatchByTx(result interface{}, args []interface{}) error {
if len(args) != 1 {
- return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetLatestTxs, len(args))
+ return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetBatchByTx, len(args))
}
- numTxs, ok := args[0].(int)
+ txHash, ok := args[0].(gethcommon.Hash)
if !ok {
- return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetLatestTxs, args[0])
+ return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetBatchByTx, args[0])
}
- latestTxs, err := c.tenScanAPI.GetLatestTransactions(numTxs)
+ batch, err := c.tenScanAPI.GetBatchByTx(txHash)
if err != nil {
- return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetLatestTxs, err)
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetBatchByTx, err)
}
- *result.(*[]gethcommon.Hash) = latestTxs
+ *result.(**common.ExtBatch) = batch
return nil
}
-func (c *inMemObscuroClient) getBatchForTx(result interface{}, args []interface{}) error {
+func (c *inMemObscuroClient) getBatch(result interface{}, args []interface{}) error {
if len(args) != 1 {
- return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetBatchForTx, len(args))
+ return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetBatch, len(args))
}
- txHash, ok := args[0].(gethcommon.Hash)
+ batchHash, ok := args[0].(gethcommon.Hash)
if !ok {
- return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetBatchForTx, args[0])
+ return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetBatch, args[0])
}
- batch, err := c.tenScanAPI.GetBatchForTx(txHash)
+ batch, err := c.tenScanAPI.GetBatch(batchHash)
if err != nil {
- return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetBatchForTx, err)
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetBatch, err)
}
*result.(**common.ExtBatch) = batch
return nil
}
-func (c *inMemObscuroClient) getBatch(result interface{}, args []interface{}) error {
+func (c *inMemObscuroClient) getBatchListingDeprecated(result interface{}, args []interface{}) error {
if len(args) != 1 {
- return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetBatch, len(args))
+ return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetBatchListing, len(args))
}
- batchHash, ok := args[0].(gethcommon.Hash)
+ pagination, ok := args[0].(*common.QueryPagination)
if !ok {
- return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetBatch, args[0])
+ return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetBatchListing, args[0])
}
- batch, err := c.tenScanAPI.GetBatch(batchHash)
+ batches, err := c.tenScanAPI.GetBatchListing(pagination)
if err != nil {
- return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetBatch, err)
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetBatchListing, err)
}
- *result.(**common.ExtBatch) = batch
+ res, ok := result.(*common.BatchListingResponseDeprecated)
+ if !ok {
+ return fmt.Errorf("result is of type %T, expected *common.BatchListingResponseDeprecated", result)
+ }
+ *res = *batches
+ return nil
+}
+
+func (c *inMemObscuroClient) getRollupListing(result interface{}, args []interface{}) error {
+ if len(args) != 1 {
+ return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetRollupListing, len(args))
+ }
+ pagination, ok := args[0].(*common.QueryPagination)
+ if !ok {
+ return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetRollupListing, args[0])
+ }
+
+ rollups, err := c.tenScanAPI.GetRollupListing(pagination)
+ if err != nil {
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetRollupListing, err)
+ }
+
+ res, ok := result.(*common.RollupListingResponse)
+ if !ok {
+ return fmt.Errorf("result is of type %T, expected *common.BatchListingResponseDeprecated", result)
+ }
+ *res = *rollups
+ return nil
+}
+
+func (c *inMemObscuroClient) getPublicTransactionData(result interface{}, args []interface{}) error {
+ if len(args) != 1 {
+ return fmt.Errorf("expected 1 arg to %s, got %d", rpc.GetPublicTransactionData, len(args))
+ }
+ pagination, ok := args[0].(*common.QueryPagination)
+ if !ok {
+ return fmt.Errorf("first arg to %s is of type %T, expected type int", rpc.GetPublicTransactionData, args[0])
+ }
+
+ txs, err := c.tenScanAPI.GetPublicTransactionData(pagination)
+ if err != nil {
+ return fmt.Errorf("`%s` call failed. Cause: %w", rpc.GetPublicTransactionData, err)
+ }
+
+ res, ok := result.(*common.TransactionListingResponse)
+ if !ok {
+ return fmt.Errorf("result is of type %T, expected *common.BatchListingResponseDeprecated", result)
+ }
+ *res = *txs
return nil
}
diff --git a/integration/simulation/validate_chain.go b/integration/simulation/validate_chain.go
index 8858cad630..cd406edb8a 100644
--- a/integration/simulation/validate_chain.go
+++ b/integration/simulation/validate_chain.go
@@ -720,8 +720,11 @@ func assertNoDupeLogs(t *testing.T, logs []*types.Log) {
func checkTenscan(t *testing.T, s *Simulation) {
for idx, client := range s.RPCHandles.RPCClients {
checkTotalTransactions(t, client, idx)
- latestTxHashes := checkLatestTxs(t, client, idx)
- for _, txHash := range latestTxHashes {
+ checkForLatestBatches(t, client, idx)
+ checkForLatestRollups(t, client, idx)
+
+ txHashes := getLatestTransactions(t, client, idx)
+ for _, txHash := range txHashes {
checkBatchFromTxs(t, client, txHash, idx)
}
}
@@ -730,7 +733,7 @@ func checkTenscan(t *testing.T, s *Simulation) {
// Checks that the node has stored sufficient transactions.
func checkTotalTransactions(t *testing.T, client rpc.Client, nodeIdx int) {
var totalTxs *big.Int
- err := client.Call(&totalTxs, rpc.GetTotalTxs)
+ err := client.Call(&totalTxs, rpc.GetTotalTxCount)
if err != nil {
t.Errorf("node %d: could not retrieve total transactions. Cause: %s", nodeIdx, err)
}
@@ -739,23 +742,52 @@ func checkTotalTransactions(t *testing.T, client rpc.Client, nodeIdx int) {
}
}
-// Checks that we can retrieve the latest transactions for the node.
-func checkLatestTxs(t *testing.T, client rpc.Client, nodeIdx int) []gethcommon.Hash {
- var latestTxHashes []gethcommon.Hash
- err := client.Call(&latestTxHashes, rpc.GetLatestTxs, txThreshold)
+// Checks that we can retrieve the latest batches
+func checkForLatestBatches(t *testing.T, client rpc.Client, nodeIdx int) {
+ var latestBatches common.BatchListingResponseDeprecated
+ pagination := common.QueryPagination{Offset: uint64(0), Size: uint(5)}
+ err := client.Call(&latestBatches, rpc.GetBatchListing, &pagination)
+ if err != nil {
+ t.Errorf("node %d: could not retrieve latest batches. Cause: %s", nodeIdx, err)
+ }
+ if len(latestBatches.BatchesData) != 5 {
+ t.Errorf("node %d: expected at least %d batches, but only received %d", nodeIdx, 5, len(latestBatches.BatchesData))
+ }
+}
+
+// Checks that we can retrieve the latest rollups
+func checkForLatestRollups(t *testing.T, client rpc.Client, nodeIdx int) {
+ var latestRollups common.RollupListingResponse
+ pagination := common.QueryPagination{Offset: uint64(0), Size: uint(5)}
+ err := client.Call(&latestRollups, rpc.GetRollupListing, &pagination)
+ if err != nil {
+ t.Errorf("node %d: could not retrieve latest transactions. Cause: %s", nodeIdx, err)
+ }
+ if len(latestRollups.RollupsData) != 5 {
+ t.Errorf("node %d: expected at least %d transactions, but only received %d", nodeIdx, 5, len(latestRollups.RollupsData))
+ }
+}
+
+func getLatestTransactions(t *testing.T, client rpc.Client, nodeIdx int) []gethcommon.Hash {
+ var transactionResponse common.TransactionListingResponse
+ var txHashes []gethcommon.Hash
+ pagination := common.QueryPagination{Offset: uint64(0), Size: uint(5)}
+ err := client.Call(&transactionResponse, rpc.GetPublicTransactionData, &pagination)
if err != nil {
t.Errorf("node %d: could not retrieve latest transactions. Cause: %s", nodeIdx, err)
}
- if len(latestTxHashes) != txThreshold {
- t.Errorf("node %d: expected at least %d transactions, but only received %d", nodeIdx, txThreshold, len(latestTxHashes))
+
+ for _, transaction := range transactionResponse.TransactionsData {
+ txHashes = append(txHashes, transaction.TransactionHash)
}
- return latestTxHashes
+
+ return txHashes
}
// Retrieves the batch using the transaction hash, and validates it.
func checkBatchFromTxs(t *testing.T, client rpc.Client, txHash gethcommon.Hash, nodeIdx int) {
var batchByTx *common.ExtBatch
- err := client.Call(&batchByTx, rpc.GetBatchForTx, txHash)
+ err := client.Call(&batchByTx, rpc.GetBatchByTx, txHash)
if err != nil {
t.Errorf("node %d: could not retrieve batch for transaction. Cause: %s", nodeIdx, err)
return
diff --git a/integration/tenscan/tenscan_test.go b/integration/tenscan/tenscan_test.go
index 5a9030403c..4024f55699 100644
--- a/integration/tenscan/tenscan_test.go
+++ b/integration/tenscan/tenscan_test.go
@@ -128,6 +128,24 @@ func TestTenscan(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, statusCode)
+ type batchlistingDeprecated struct {
+ Result common.BatchListingResponseDeprecated `json:"result"`
+ }
+
+ batchlistingObjDeprecated := batchlistingDeprecated{}
+ err = json.Unmarshal(body, &batchlistingObjDeprecated)
+ assert.NoError(t, err)
+ assert.LessOrEqual(t, 9, len(batchlistingObjDeprecated.Result.BatchesData))
+ assert.LessOrEqual(t, uint64(9), batchlistingObjDeprecated.Result.Total)
+ // check results are descending order (latest first)
+ assert.LessOrEqual(t, batchlistingObjDeprecated.Result.BatchesData[1].Number.Cmp(batchlistingObjDeprecated.Result.BatchesData[0].Number), 0)
+ // check "hash" field is included in json response
+ assert.Contains(t, string(body), "\"hash\"")
+
+ statusCode, body, err = fasthttp.Get(nil, fmt.Sprintf("%s/items/new/batches/?offset=0&size=10", serverAddress))
+ assert.NoError(t, err)
+ assert.Equal(t, 200, statusCode)
+
type batchlisting struct {
Result common.BatchListingResponse `json:"result"`
}
@@ -138,7 +156,7 @@ func TestTenscan(t *testing.T) {
assert.LessOrEqual(t, 9, len(batchlistingObj.Result.BatchesData))
assert.LessOrEqual(t, uint64(9), batchlistingObj.Result.Total)
// check results are descending order (latest first)
- assert.LessOrEqual(t, batchlistingObj.Result.BatchesData[1].Number.Cmp(batchlistingObj.Result.BatchesData[0].Number), 0)
+ assert.LessOrEqual(t, batchlistingObj.Result.BatchesData[1].Height.Cmp(batchlistingObj.Result.BatchesData[0].Height), 0)
// check "hash" field is included in json response
assert.Contains(t, string(body), "\"hash\"")
@@ -156,7 +174,7 @@ func TestTenscan(t *testing.T) {
// assert.LessOrEqual(t, 9, len(blocklistingObj.Result.BlocksData))
// assert.LessOrEqual(t, uint64(9), blocklistingObj.Result.Total)
- statusCode, body, err = fasthttp.Get(nil, fmt.Sprintf("%s/items/batch/%s", serverAddress, batchlistingObj.Result.BatchesData[0].Hash()))
+ statusCode, body, err = fasthttp.Get(nil, fmt.Sprintf("%s/items/batch/%s", serverAddress, batchlistingObj.Result.BatchesData[0].Header.Hash()))
assert.NoError(t, err)
assert.Equal(t, 200, statusCode)
@@ -167,7 +185,7 @@ func TestTenscan(t *testing.T) {
batchObj := batchFetch{}
err = json.Unmarshal(body, &batchObj)
assert.NoError(t, err)
- assert.Equal(t, batchlistingObj.Result.BatchesData[0].Hash(), batchObj.Item.Hash())
+ assert.Equal(t, batchlistingObj.Result.BatchesData[0].Header.Hash(), batchObj.Item.Header.Hash())
statusCode, body, err = fasthttp.Get(nil, fmt.Sprintf("%s/info/obscuro/", serverAddress))
assert.NoError(t, err)
@@ -182,7 +200,6 @@ func TestTenscan(t *testing.T) {
assert.NoError(t, err)
assert.NotEqual(t, configFetchObj.Item.SequencerID, gethcommon.Address{})
- // Gracefully shutdown
err = tenScanContainer.Stop()
assert.NoError(t, err)
}
diff --git a/tools/tenscan/backend/obscuroscan_backend.go b/tools/tenscan/backend/obscuroscan_backend.go
index 527a4669b3..5f4d9bd127 100644
--- a/tools/tenscan/backend/obscuroscan_backend.go
+++ b/tools/tenscan/backend/obscuroscan_backend.go
@@ -27,7 +27,7 @@ func NewBackend(obsClient *obsclient.ObsClient) *Backend {
}
func (b *Backend) GetLatestBatch() (*common.BatchHeader, error) {
- return b.obsClient.BatchHeaderByNumber(nil)
+ return b.obsClient.GetLatestBatch()
}
func (b *Backend) GetTenNodeHealthStatus() (bool, error) {
@@ -81,6 +81,13 @@ func (b *Backend) GetBatchesListing(offset uint64, size uint64) (*common.BatchLi
})
}
+func (b *Backend) GetBatchesListingDeprecated(offset uint64, size uint64) (*common.BatchListingResponseDeprecated, error) {
+ return b.obsClient.GetBatchesListingDeprecated(&common.QueryPagination{
+ Offset: offset,
+ Size: uint(size),
+ })
+}
+
func (b *Backend) GetBlockListing(offset uint64, size uint64) (*common.BlockListingResponse, error) {
return b.obsClient.GetBlockListing(&common.QueryPagination{
Offset: offset,
@@ -88,6 +95,13 @@ func (b *Backend) GetBlockListing(offset uint64, size uint64) (*common.BlockList
})
}
+func (b *Backend) GetRollupListing(offset uint64, size uint64) (*common.RollupListingResponse, error) {
+ return b.obsClient.GetRollupListing(&common.QueryPagination{
+ Offset: offset,
+ Size: uint(size),
+ })
+}
+
func (b *Backend) DecryptTxBlob(payload string) ([]*common.L2Tx, error) {
encryptedTxBytes, err := base64.StdEncoding.DecodeString(payload)
if err != nil {
diff --git a/tools/tenscan/backend/webserver/webserver_routes_items.go b/tools/tenscan/backend/webserver/webserver_routes_items.go
index f0de58bfbc..de58191bb3 100644
--- a/tools/tenscan/backend/webserver/webserver_routes_items.go
+++ b/tools/tenscan/backend/webserver/webserver_routes_items.go
@@ -11,11 +11,13 @@ import (
func routeItems(r *gin.Engine, server *WebServer) {
r.GET("/items/batch/latest/", server.getLatestBatch)
- r.GET("/items/rollup/latest/", server.getLatestRollupHeader)
r.GET("/items/batch/:hash", server.getBatch)
+ r.GET("/items/rollup/latest/", server.getLatestRollupHeader)
+ r.GET("/items/rollups/", server.getRollupListing) // New
+ r.GET("/items/batches/", server.getBatchListingDeprecated)
+ r.GET("/items/new/batches/", server.getBatchListingNew)
+ r.GET("/items/blocks/", server.getBlockListing) // Deprecated
r.GET("/items/transactions/", server.getPublicTransactions)
- r.GET("/items/batches/", server.getBatchListing)
- r.GET("/items/blocks/", server.getBlockListing)
r.GET("/info/obscuro/", server.getConfig)
r.POST("/info/health/", server.getHealthStatus)
}
@@ -38,13 +40,13 @@ func (w *WebServer) getLatestBatch(c *gin.Context) {
}
func (w *WebServer) getLatestRollupHeader(c *gin.Context) {
- block, err := w.backend.GetLatestRollupHeader()
+ rollup, err := w.backend.GetLatestRollupHeader()
if err != nil {
errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
return
}
- c.JSON(http.StatusOK, gin.H{"item": block})
+ c.JSON(http.StatusOK, gin.H{"item": rollup})
}
func (w *WebServer) getBatch(c *gin.Context) {
@@ -108,7 +110,7 @@ func (w *WebServer) getPublicTransactions(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"result": publicTxs})
}
-func (w *WebServer) getBatchListing(c *gin.Context) {
+func (w *WebServer) getBatchListingNew(c *gin.Context) {
offsetStr := c.DefaultQuery("offset", "0")
sizeStr := c.DefaultQuery("size", "10")
@@ -133,6 +135,56 @@ func (w *WebServer) getBatchListing(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"result": batchesListing})
}
+func (w *WebServer) getBatchListingDeprecated(c *gin.Context) {
+ offsetStr := c.DefaultQuery("offset", "0")
+ sizeStr := c.DefaultQuery("size", "10")
+
+ offset, err := strconv.ParseUint(offsetStr, 10, 32)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ parseUint, err := strconv.ParseUint(sizeStr, 10, 64)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ batchesListing, err := w.backend.GetBatchesListingDeprecated(offset, parseUint)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ c.JSON(http.StatusOK, gin.H{"result": batchesListing})
+}
+
+func (w *WebServer) getRollupListing(c *gin.Context) {
+ offsetStr := c.DefaultQuery("offset", "0")
+ sizeStr := c.DefaultQuery("size", "10")
+
+ offset, err := strconv.ParseUint(offsetStr, 10, 32)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ parseUint, err := strconv.ParseUint(sizeStr, 10, 64)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ rollupListing, err := w.backend.GetRollupListing(offset, parseUint)
+ if err != nil {
+ errorHandler(c, fmt.Errorf("unable to execute request %w", err), w.logger)
+ return
+ }
+
+ c.JSON(http.StatusOK, gin.H{"result": rollupListing})
+}
+
func (w *WebServer) getBlockListing(c *gin.Context) {
offsetStr := c.DefaultQuery("offset", "0")
sizeStr := c.DefaultQuery("size", "10")
diff --git a/tools/tenscan/frontend/api/rollups.ts b/tools/tenscan/frontend/api/rollups.ts
index f26ef3eb93..375f0e0ecf 100644
--- a/tools/tenscan/frontend/api/rollups.ts
+++ b/tools/tenscan/frontend/api/rollups.ts
@@ -8,7 +8,7 @@ export const fetchRollups = async (
): Promise> => {
return await httpRequest>({
method: "get",
- url: pathToUrl(apiRoutes.getRollups),
+ url: pathToUrl(apiRoutes.getLatestRollup),
searchParams: payload,
});
};
diff --git a/tools/tenscan/frontend/src/routes/index.ts b/tools/tenscan/frontend/src/routes/index.ts
index 045b35989d..b0516e895a 100644
--- a/tools/tenscan/frontend/src/routes/index.ts
+++ b/tools/tenscan/frontend/src/routes/index.ts
@@ -21,7 +21,7 @@ export const apiRoutes = {
"https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd",
// **** ROLLUPS ****
- getRollups: "/items/rollup/latest/",
+ getLatestRollup: "/items/rollup/latest/",
decryptEncryptedRollup: "/actions/decryptTxBlob/",
// **** INFO ****
diff --git a/tools/walletextension/Dockerfile b/tools/walletextension/Dockerfile
index 6fae9c88bc..b399a471dd 100644
--- a/tools/walletextension/Dockerfile
+++ b/tools/walletextension/Dockerfile
@@ -55,10 +55,22 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
# Lightweight final build stage. Includes bare minimum to start wallet extension
FROM alpine:3.18
+# Install NGINX
+RUN apk update && apk add nginx
+
# copy over the gateway executable
COPY --from=build-wallet /home/obscuro/go-obscuro/tools/walletextension/bin /home/obscuro/go-obscuro/tools/walletextension/bin
# copy over the .sql migration files
COPY --from=build-wallet /home/obscuro/go-obscuro/tools/walletextension/storage/database /home/obscuro/go-obscuro/tools/walletextension/storage/database
-WORKDIR /home/obscuro/go-obscuro/tools/walletextension/bin
+# copy over the NGINX configuration file
+COPY --from=build-wallet /home/obscuro/go-obscuro/tools/walletextension/nginx.conf /etc/nginx/nginx.conf
+
+
+# copy over the entrypoint script
+COPY --from=build-wallet /home/obscuro/go-obscuro/tools/walletextension/entrypoint.sh /entrypoint.sh
+RUN chmod +x /entrypoint.sh
+
+ENTRYPOINT ["/entrypoint.sh"]
+
diff --git a/tools/walletextension/entrypoint.sh b/tools/walletextension/entrypoint.sh
new file mode 100644
index 0000000000..f470d7de86
--- /dev/null
+++ b/tools/walletextension/entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Start NGINX in the background
+nginx &
+
+# Start wallet_extension_linux with parameters passed to the script
+/home/obscuro/go-obscuro/tools/walletextension/bin/wallet_extension_linux "$@"
+
+# Wait for any process to exit
+wait -n
+
+# Exit with the status of the process that exited first
+exit $?
diff --git a/tools/walletextension/httpapi/routes.go b/tools/walletextension/httpapi/routes.go
index 3ddbd3e073..883d96d9ed 100644
--- a/tools/walletextension/httpapi/routes.go
+++ b/tools/walletextension/httpapi/routes.go
@@ -387,8 +387,14 @@ func getMessageRequestHandler(walletExt *rpcapi.Services, conn UserConn) {
return
}
- // create the response structure
- type JSONResponse struct {
+ // create the response structure for EIP712 message where the message is a JSON object
+ type JSONResponseEIP712 struct {
+ Message json.RawMessage `json:"message"`
+ Type string `json:"type"`
+ }
+
+ // create the response structure for personal sign message where the message is a string
+ type JSONResponsePersonal struct {
Message string `json:"message"`
Type string `json:"type"`
}
@@ -396,16 +402,52 @@ func getMessageRequestHandler(walletExt *rpcapi.Services, conn UserConn) {
// get string representation of the message format
messageFormat := viewingkey.GetBestFormat(formatsSlice)
messageFormatString := viewingkey.GetSignatureTypeString(messageFormat)
+ responseBytes := []byte{}
+ if messageFormat == viewingkey.PersonalSign {
+ response := JSONResponsePersonal{
+ Message: message,
+ Type: messageFormatString,
+ }
- response := JSONResponse{
- Message: message,
- Type: messageFormatString,
- }
+ responseBytes, err = json.Marshal(response)
+ if err != nil {
+ handleError(conn, walletExt.Logger(), fmt.Errorf("error marshaling JSON response: %w", err))
+ return
+ }
+ } else if messageFormat == viewingkey.EIP712Signature {
+ var messageMap map[string]interface{}
+ err = json.Unmarshal([]byte(message), &messageMap)
+ if err != nil {
+ handleError(conn, walletExt.Logger(), fmt.Errorf("error unmarshaling JSON: %w", err))
+ return
+ }
- responseBytes, err := json.Marshal(response)
- if err != nil {
- walletExt.Logger().Error("error marshaling JSON response", log.ErrKey, err)
- return
+ if domainMap, ok := messageMap["domain"].(map[string]interface{}); ok {
+ delete(domainMap, "salt")
+ delete(domainMap, "verifyingContract")
+ }
+
+ if typesMap, ok := messageMap["types"].(map[string]interface{}); ok {
+ delete(typesMap, "EIP712Domain")
+ }
+
+ // Marshal the modified map back to JSON
+ modifiedMessage, err := json.Marshal(messageMap)
+ if err != nil {
+ handleError(conn, walletExt.Logger(), fmt.Errorf("error marshaling modified JSON: %w", err))
+ return
+ }
+
+ response := JSONResponseEIP712{
+ Message: modifiedMessage,
+ Type: messageFormatString,
+ }
+
+ responseBytes, err = json.Marshal(response)
+ if err != nil {
+ handleError(conn, walletExt.Logger(), fmt.Errorf("error marshaling JSON response: %w", err))
+ return
+ }
}
err = conn.WriteResponse(responseBytes)
diff --git a/tools/walletextension/nginx.conf b/tools/walletextension/nginx.conf
new file mode 100644
index 0000000000..b0a1e266b6
--- /dev/null
+++ b/tools/walletextension/nginx.conf
@@ -0,0 +1,27 @@
+events {
+ worker_connections 4096;
+}
+
+http {
+ server {
+ listen 80;
+
+ location = / {
+ proxy_pass http://localhost:8080/static/; # Redirects only the root URL
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+
+ location / {
+ proxy_pass http://localhost:8080; # Pass all other requests to the app
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+ }
+}
\ No newline at end of file
diff --git a/tools/walletextension/storage/database/mariadb/003_add_signature_type.sql b/tools/walletextension/storage/database/mariadb/003_add_signature_type.sql
index f99247c7a6..f04ffb6f23 100644
--- a/tools/walletextension/storage/database/mariadb/003_add_signature_type.sql
+++ b/tools/walletextension/storage/database/mariadb/003_add_signature_type.sql
@@ -1,2 +1 @@
-ALTER TABLE ogdb.accounts
-ADD COLUMN signature_type INT DEFAULT 0;
\ No newline at end of file
+ALTER TABLE ogdb.accounts ADD COLUMN IF NOT EXISTS signature_type INT DEFAULT 0;
\ No newline at end of file
diff --git a/tools/walletextension/storage/database/mariadb/mariadb.go b/tools/walletextension/storage/database/mariadb/mariadb.go
index 651c958745..a377970ba2 100644
--- a/tools/walletextension/storage/database/mariadb/mariadb.go
+++ b/tools/walletextension/storage/database/mariadb/mariadb.go
@@ -7,6 +7,8 @@ import (
"path/filepath"
"runtime"
+ "github.com/ten-protocol/go-ten/go/common/storage"
+
"github.com/ten-protocol/go-ten/go/common/viewingkey"
"github.com/ethereum/go-ethereum/crypto"
@@ -14,7 +16,6 @@ import (
_ "github.com/go-sql-driver/mysql" // Importing MariaDB driver
"github.com/ten-protocol/go-ten/go/common/errutil"
"github.com/ten-protocol/go-ten/tools/walletextension/common"
- "github.com/ten-protocol/go-ten/tools/walletextension/storage/database"
)
type MariaDB struct {
@@ -35,8 +36,7 @@ func NewMariaDB(dbURL string) (*MariaDB, error) {
}
migrationsDir := filepath.Dir(filename)
- // apply migrations
- if err = database.ApplyMigrations(db, migrationsDir); err != nil {
+ if err = storage.ApplyMigrations(db, migrationsDir); err != nil {
return nil, err
}