Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into pedro/gaslimit_gascap…
Browse files Browse the repository at this point in the history
…_flags
  • Loading branch information
otherview committed Dec 18, 2023
2 parents 6ef0a52 + b528fcb commit cde3747
Show file tree
Hide file tree
Showing 154 changed files with 11,377 additions and 1,853 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/manual-deploy-obscuro-gateway.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:

- name: Build and Push Docker Image
run: |
DOCKER_BUILDKIT=1 docker build -t ${{ vars.DOCKER_BUILD_TAG_GATEWAY }} -f ./tools/walletextension/Dockerfile .
DOCKER_BUILDKIT=1 docker build --build-arg TESTNET_TYPE=${{ github.event.inputs.testnet_type }} -t ${{ vars.DOCKER_BUILD_TAG_GATEWAY }} -f ./tools/walletextension/Dockerfile .
docker push ${{ vars.DOCKER_BUILD_TAG_GATEWAY }}
# This will fail some deletions due to resource dependencies ( ie. you must first delete the vm before deleting the disk)
Expand Down
8 changes: 4 additions & 4 deletions .github/workflows/manual-deploy-obscuro-scan-3.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Deploys Ten Scan on Azure for Testnet
# Builds the Ten Scan image, pushes the image to dockerhub and starts the Ten Scan on Azure
# Deploys Tenscan on Azure for Testnet
# Builds the Tenscan image, pushes the image to dockerhub and starts the Tenscan on Azure

name: "[M] Deploy Ten Scan 3 Testnet"
run-name: "[M] Deploy Ten Scan Testnet ( ${{ github.event.inputs.testnet_type }} )"
name: "[M] Deploy Tenscan 3 Testnet"
run-name: "[M] Deploy Tenscan Testnet ( ${{ github.event.inputs.testnet_type }} )"
on:
workflow_dispatch:
inputs:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/manual-deploy-testnet-l2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ jobs:

- name: 'Build and push obscuro node images'
run: |
DOCKER_BUILDKIT=1 docker build -t ${{ vars.DOCKER_BUILD_TAG_ENCLAVE }} -f dockerfiles/enclave.Dockerfile .
DOCKER_BUILDKIT=1 docker build -t ${{ vars.DOCKER_BUILD_TAG_ENCLAVE }} --build-arg TESTMODE=true -f dockerfiles/enclave.Dockerfile .
docker push ${{ vars.DOCKER_BUILD_TAG_ENCLAVE }}
DOCKER_BUILDKIT=1 docker build -t ${{ vars.DOCKER_BUILD_TAG_HOST }} -f dockerfiles/host.Dockerfile .
docker push ${{ vars.DOCKER_BUILD_TAG_HOST }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/manual-upgrade-testnet-l2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:

- name: 'Build and push obscuro node images'
run: |
DOCKER_BUILDKIT=1 docker build -t ${{ vars.L2_ENCLAVE_DOCKER_BUILD_TAG }} -f dockerfiles/enclave.Dockerfile .
DOCKER_BUILDKIT=1 docker build -t ${{ vars.L2_ENCLAVE_DOCKER_BUILD_TAG }} --build-arg TESTMODE=true -f dockerfiles/enclave.Dockerfile .
docker push ${{ vars.L2_ENCLAVE_DOCKER_BUILD_TAG }}
DOCKER_BUILDKIT=1 docker build -t ${{ vars.L2_HOST_DOCKER_BUILD_TAG }} -f dockerfiles/host.Dockerfile .
docker push ${{ vars.L2_HOST_DOCKER_BUILD_TAG }}
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ docs/_site
# db files
tools/walletextension/main/.obscuro

# static files generated by npm run build
tools/walletextension/api/static

# contracts

# Logs
Expand Down
2 changes: 1 addition & 1 deletion dockerfiles/enclave.debug.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# build-enclave = copies over the source and builds the enclave using a go compiler cache
# final = using the base system copies over only the enclave executable and creates the final image without source and dependencies.

FROM golang:1.20-alpine as system
FROM golang:1.20-alpine3.18 as system

# install build utils
RUN apk add build-base
Expand Down
4 changes: 2 additions & 2 deletions dockerfiles/host.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# build-host = copies over the source code and builds the binaries using a compiler cache
# final = copies over only the executables in an alpine image that doesn't have any additional load.

FROM golang:1.20-alpine as system
FROM golang:1.20-alpine3.18 as system
# set the base libs to build / run
RUN apk add build-base bash
ENV CGO_ENABLED=1
Expand All @@ -31,7 +31,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
go build

# Trigger another build stage to remove unnecessary files.
FROM alpine:3.17
FROM alpine:3.18

# Copy over just the binary from the previous build stage into this one.
COPY --from=build-host \
Expand Down
2 changes: 2 additions & 0 deletions go/common/headers.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ type BatchHeader struct {
}

type batchHeaderEncoding struct {
Hash common.Hash `json:"hash"`
ParentHash L2BatchHash `json:"parentHash"`
Root common.Hash `json:"stateRoot"`
TxHash common.Hash `json:"transactionsRoot"`
Expand All @@ -72,6 +73,7 @@ type batchHeaderEncoding struct {
// MarshalJSON custom marshals the BatchHeader into a json
func (b *BatchHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(batchHeaderEncoding{
b.Hash(),
b.ParentHash,
b.Root,
b.TxHash,
Expand Down
2 changes: 1 addition & 1 deletion go/enclave/components/batch_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext, fail
if failForEmptyBatch &&
len(txReceipts) == 0 &&
len(ccReceipts) == 0 &&
len(transactionsToProcess) == 0 &&
len(transactionsToProcess)-len(excludedTxs) == 0 &&
len(crossChainTransactions) == 0 &&
len(messages) == 0 &&
len(transfers) == 0 {
Expand Down
9 changes: 7 additions & 2 deletions go/enclave/enclave.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ func NewEnclave(
sharedSecretProcessor := components.NewSharedSecretProcessor(mgmtContractLib, attestationProvider, storage, logger)

blockchain := ethchainadapter.NewEthChainAdapter(big.NewInt(config.ObscuroChainID), registry, storage, logger)
mempool, err := txpool.NewTxPool(blockchain, config.MinGasPrice)
mempool, err := txpool.NewTxPool(blockchain, config.MinGasPrice, logger)
if err != nil {
logger.Crit("unable to init eth tx pool", log.ErrKey, err)
}
Expand Down Expand Up @@ -966,8 +966,13 @@ func (e *enclaveImpl) Stop() common.SystemError {
e.registry.UnsubscribeFromBatches()
}

err := e.service.Close()
if err != nil {
e.logger.Error("Could not stop node service", log.ErrKey, err)
}

time.Sleep(time.Second)
err := e.storage.Close()
err = e.storage.Close()
if err != nil {
e.logger.Error("Could not stop db", log.ErrKey, err)
return err
Expand Down
51 changes: 29 additions & 22 deletions go/enclave/evm/ethchainadapter/eth_chainadapter.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package ethchainadapter

import (
"fmt"
"math/big"

"github.com/ethereum/go-ethereum/common"
Expand Down Expand Up @@ -83,13 +82,31 @@ func (e *EthChainAdapter) SubscribeChainHeadEvent(ch chan<- gethcore.ChainHeadEv

// GetBlock retrieves a specific block, used during pool resets.
func (e *EthChainAdapter) GetBlock(_ common.Hash, number uint64) *gethtypes.Block {
nbatch, err := e.storage.FetchBatchByHeight(number)
var batch *core.Batch

// to avoid a costly select to the db, check whether the batches requested are the last ones which are cached
headBatch, err := e.storage.FetchBatchBySeqNo(e.batchRegistry.HeadBatchSeq().Uint64())
if err != nil {
e.logger.Warn("unable to get batch by height", "number", number, log.ErrKey, err)
e.logger.Error("unable to get head batch", log.ErrKey, err)
return nil
}
if headBatch.Number().Uint64() == number {
batch = headBatch
} else if headBatch.Number().Uint64()-1 == number {
batch, err = e.storage.FetchBatch(headBatch.Header.ParentHash)
if err != nil {
e.logger.Error("unable to get parent of head batch", log.ErrKey, err, log.BatchHashKey, headBatch.Header.ParentHash)
return nil
}
} else {
batch, err = e.storage.FetchBatchByHeight(number)
if err != nil {
e.logger.Error("unable to get batch by height", log.BatchHeightKey, number, log.ErrKey, err)
return nil
}
}

nfromBatch, err := gethencoding.CreateEthBlockFromBatch(nbatch)
nfromBatch, err := gethencoding.CreateEthBlockFromBatch(batch)
if err != nil {
e.logger.Error("unable to convert batch to eth block", log.ErrKey, err)
return nil
Expand All @@ -104,17 +121,7 @@ func (e *EthChainAdapter) StateAt(root common.Hash) (*state.StateDB, error) {
return nil, nil //nolint:nilnil
}

currentBatchSeqNo := e.batchRegistry.HeadBatchSeq()
if currentBatchSeqNo == nil {
return nil, fmt.Errorf("not ready yet")
}
currentBatch, err := e.storage.FetchBatchBySeqNo(currentBatchSeqNo.Uint64())
if err != nil {
e.logger.Warn("unable to get batch by height", "currentBatchSeqNo", currentBatchSeqNo, log.ErrKey, err)
return nil, nil //nolint:nilnil
}

return e.storage.CreateStateDB(currentBatch.Hash())
return state.New(root, e.storage.StateDB(), nil)
}

func (e *EthChainAdapter) IngestNewBlock(batch *core.Batch) error {
Expand All @@ -136,13 +143,13 @@ func NewLegacyPoolConfig() legacypool.Config {
NoLocals: false,
Journal: "",
Rejournal: 0,
PriceLimit: 0,
PriceBump: 0,
AccountSlots: 100,
GlobalSlots: 10000000,
AccountQueue: 100,
GlobalQueue: 10000000,
Lifetime: 0,
PriceLimit: legacypool.DefaultConfig.PriceLimit,
PriceBump: legacypool.DefaultConfig.PriceBump,
AccountSlots: legacypool.DefaultConfig.AccountSlots,
GlobalSlots: legacypool.DefaultConfig.GlobalSlots,
AccountQueue: legacypool.DefaultConfig.AccountQueue,
GlobalQueue: legacypool.DefaultConfig.GlobalQueue,
Lifetime: legacypool.DefaultConfig.Lifetime,
}
}

Expand Down
2 changes: 2 additions & 0 deletions go/enclave/nodetype/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ type NodeType interface {

// OnL1Block - performed after the block was processed
OnL1Block(block types.Block, result *components.BlockIngestionType) error

Close() error
}

type Sequencer interface {
Expand Down
4 changes: 4 additions & 0 deletions go/enclave/nodetype/sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -448,3 +448,7 @@ func (s *sequencer) OnL1Block(_ types.Block, _ *components.BlockIngestionType) e
// nothing to do
return nil
}

func (s *sequencer) Close() error {
return s.mempool.Close()
}
4 changes: 4 additions & 0 deletions go/enclave/nodetype/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,3 +153,7 @@ func (val *obsValidator) handleGenesis(batch *core.Batch) error {
func (val *obsValidator) OnL1Block(_ types.Block, _ *components.BlockIngestionType) error {
return val.ExecuteStoredBatches()
}

func (val *obsValidator) Close() error {
return nil
}
3 changes: 3 additions & 0 deletions go/enclave/storage/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,9 @@ type Storage interface {

// TrieDB - return the underlying trie database
TrieDB() *trie.Database

// StateDB - return the underlying state database
StateDB() state.Database
}

type ScanStorage interface {
Expand Down
4 changes: 4 additions & 0 deletions go/enclave/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,10 @@ func (s *storageImpl) TrieDB() *trie.Database {
return s.stateDB.TrieDB()
}

func (s *storageImpl) StateDB() state.Database {
return s.stateDB
}

func (s *storageImpl) Close() error {
return s.db.GetSQLDB().Close()
}
Expand Down
16 changes: 15 additions & 1 deletion go/enclave/txpool/txpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ import (
"math/big"
"strings"

gethlog "github.com/ethereum/go-ethereum/log"
"github.com/ten-protocol/go-ten/go/common/log"

gethcommon "github.com/ethereum/go-ethereum/common"
gethtxpool "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
Expand All @@ -20,10 +23,11 @@ type TxPool struct {
blockchain *ethchainadapter.EthChainAdapter
gasTip *big.Int
running bool
logger gethlog.Logger
}

// NewTxPool returns a new instance of the tx pool
func NewTxPool(blockchain *ethchainadapter.EthChainAdapter, gasTip *big.Int) (*TxPool, error) {
func NewTxPool(blockchain *ethchainadapter.EthChainAdapter, gasTip *big.Int, logger gethlog.Logger) (*TxPool, error) {
txPoolConfig := ethchainadapter.NewLegacyPoolConfig()
legacyPool := legacypool.New(txPoolConfig, blockchain)

Expand All @@ -32,6 +36,7 @@ func NewTxPool(blockchain *ethchainadapter.EthChainAdapter, gasTip *big.Int) (*T
txPoolConfig: txPoolConfig,
legacyPool: legacyPool,
gasTip: gasTip,
logger: logger,
}, nil
}

Expand Down Expand Up @@ -75,3 +80,12 @@ func (t *TxPool) Add(transaction *common.L2Tx) error {
func (t *TxPool) Running() bool {
return t.running
}

func (t *TxPool) Close() error {
defer func() {
if err := recover(); err != nil {
t.logger.Error("Could not close legacy pool", log.ErrKey, err)
}
}()
return t.legacyPool.Close()
}
4 changes: 4 additions & 0 deletions go/enclave/txpool/txpool_mock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,3 +340,7 @@ func (m *mockStorage) TrieDB() *trie.Database {
// TODO implement me
panic("implement me")
}

func (m *mockStorage) StateDB() state.Database {
return m.stateDB
}
2 changes: 1 addition & 1 deletion go/enclave/txpool/txpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func TestTxPool_AddTransaction_Pending(t *testing.T) {
err = blockchain.IngestNewBlock(genesisBatch)
require.NoError(t, err)

txPool, err := NewTxPool(blockchain, big.NewInt(1))
txPool, err := NewTxPool(blockchain, big.NewInt(1), testlog.Logger())
require.NoError(t, err)

// Start the TxPool
Expand Down
8 changes: 8 additions & 0 deletions integration/faucet/faucet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ const (
)

func TestFaucet(t *testing.T) {
t.Skip("Skipping because it is too flaky")

startPort := integration.StartPortFaucetUnitTest
createObscuroNetwork(t, startPort)
// This sleep is required to ensure the initial rollup exists, and thus contract deployer can check its balance.
Expand All @@ -60,6 +62,12 @@ func TestFaucet(t *testing.T) {
assert.NoError(t, err)

err = faucetContainer.Start()
defer func(faucetContainer *container.FaucetContainer) {
err := faucetContainer.Stop()
if err != nil {
fmt.Printf("Could not stop faucet %s", err.Error())
}
}(faucetContainer)
assert.NoError(t, err)

initialFaucetBal, err := getFaucetBalance(faucetConfig.ServerPort)
Expand Down
41 changes: 41 additions & 0 deletions integration/networktest/actions/publicdata/tenscan_data.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package publicdata

import (
"context"
"fmt"

"github.com/ten-protocol/go-ten/go/common"
"github.com/ten-protocol/go-ten/go/obsclient"
"github.com/ten-protocol/go-ten/integration/networktest"
"github.com/ten-protocol/go-ten/integration/networktest/actions"
)

// VerifyBatchesDataAction tests the batches data RPC endpoint
func VerifyBatchesDataAction() networktest.Action {
return actions.VerifyOnlyAction(func(ctx context.Context, network networktest.NetworkConnector) error {
client, err := obsclient.Dial(network.ValidatorRPCAddress(0))
if err != nil {
return err
}

pagination := common.QueryPagination{
Offset: 0,
Size: 20,
}
batchListing, err := client.GetBatchesListing(&pagination)
if err != nil {
return err
}
if len(batchListing.BatchesData) != 20 {
return fmt.Errorf("expected 20 batches, got %d", len(batchListing.BatchesData))
}
if batchListing.Total <= 10 {
return fmt.Errorf("expected more than 10 batches, got %d", batchListing.Total)
}
if batchListing.BatchesData[0].Number.Cmp(batchListing.BatchesData[1].Number) < 0 {
return fmt.Errorf("expected batches to be sorted by height descending")
}

return nil
})
}
Loading

0 comments on commit cde3747

Please sign in to comment.