diff --git a/.github/workflows/build-gateway-lib.yml b/.github/workflows/build-gateway-lib.yml index 5583b23641..7c936df9b6 100644 --- a/.github/workflows/build-gateway-lib.yml +++ b/.github/workflows/build-gateway-lib.yml @@ -15,7 +15,7 @@ jobs: steps: # Check out the repository - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Set up Node.js - name: Set up Node.js diff --git a/.github/workflows/build-pr.yml b/.github/workflows/build-pr.yml index f377b6bc78..1bc3e8ea24 100644 --- a/.github/workflows/build-pr.yml +++ b/.github/workflows/build-pr.yml @@ -14,9 +14,9 @@ jobs: build: runs-on: self-hosted steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: 1.21.8 @@ -57,7 +57,7 @@ jobs: run: go test --failfast -v ./... -count=1 -timeout 5m - name: Store simulation logs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: failure() with: name: ci-logs diff --git a/.github/workflows/deploy-gateway-lib.yml b/.github/workflows/deploy-gateway-lib.yml index f1d8b39c24..60132c9584 100644 --- a/.github/workflows/deploy-gateway-lib.yml +++ b/.github/workflows/deploy-gateway-lib.yml @@ -15,7 +15,7 @@ jobs: steps: # Check out the repository - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Set up Node.js - name: Set up Node.js diff --git a/.github/workflows/hardhat-pr-check.yml b/.github/workflows/hardhat-pr-check.yml index 758a59c883..ab6f85a364 100644 --- a/.github/workflows/hardhat-pr-check.yml +++ b/.github/workflows/hardhat-pr-check.yml @@ -12,7 +12,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@v3 with: node-version: 18 diff --git a/.github/workflows/manual-deploy-dexynth-gateway.yml b/.github/workflows/manual-deploy-dexynth-gateway.yml index 0b51245200..5271686154 100644 --- a/.github/workflows/manual-deploy-dexynth-gateway.yml +++ b/.github/workflows/manual-deploy-dexynth-gateway.yml @@ -25,7 +25,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Extract branch name' shell: bash diff --git a/.github/workflows/manual-deploy-obscuro-gateway-database.yml b/.github/workflows/manual-deploy-obscuro-gateway-database.yml index d99b18ec2e..b4eeec0f68 100644 --- a/.github/workflows/manual-deploy-obscuro-gateway-database.yml +++ b/.github/workflows/manual-deploy-obscuro-gateway-database.yml @@ -27,7 +27,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Extract branch name' shell: bash diff --git a/.github/workflows/manual-deploy-obscuro-gateway.yml b/.github/workflows/manual-deploy-obscuro-gateway.yml index 622ae25141..4d063d70a7 100644 --- a/.github/workflows/manual-deploy-obscuro-gateway.yml +++ b/.github/workflows/manual-deploy-obscuro-gateway.yml @@ -27,7 +27,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Extract branch name' shell: bash diff --git a/.github/workflows/manual-deploy-ten-gateway-frontend.yml b/.github/workflows/manual-deploy-ten-gateway-frontend.yml index 16545153ef..8ac6e17e70 100644 --- a/.github/workflows/manual-deploy-ten-gateway-frontend.yml +++ b/.github/workflows/manual-deploy-ten-gateway-frontend.yml @@ -37,7 +37,7 @@ jobs: echo "Selected Testnet Type: ${{ matrix.testnet_type }}" echo "Gateway API URL: ${{ matrix.GATEWAY_API_URL }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Extract branch name' shell: bash diff --git a/.github/workflows/manual-deploy-ten-scan.yml b/.github/workflows/manual-deploy-ten-scan.yml index 02dd88839d..f6811fd749 100644 --- a/.github/workflows/manual-deploy-ten-scan.yml +++ b/.github/workflows/manual-deploy-ten-scan.yml @@ -27,7 +27,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Set up Docker" uses: docker/setup-buildx-action@v1 diff --git a/.github/workflows/manual-deploy-testnet-faucet.yml b/.github/workflows/manual-deploy-testnet-faucet.yml index 9550c78ef8..a6765a89b1 100644 --- a/.github/workflows/manual-deploy-testnet-faucet.yml +++ b/.github/workflows/manual-deploy-testnet-faucet.yml @@ -43,7 +43,7 @@ jobs: - run: echo "Workflow_dispatch inputs ${{ github.event.inputs.testnet_type }}" - run: echo "Workflow_call inputs ${{ inputs.testnet_type }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Set up Docker' uses: docker/setup-buildx-action@v1 diff --git a/.github/workflows/manual-deploy-testnet-l1.yml b/.github/workflows/manual-deploy-testnet-l1.yml index 1accfb5260..b92101dff0 100644 --- a/.github/workflows/manual-deploy-testnet-l1.yml +++ b/.github/workflows/manual-deploy-testnet-l1.yml @@ -39,7 +39,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Set up Docker' uses: docker/setup-buildx-action@v1 diff --git a/.github/workflows/manual-deploy-testnet-l2.yml b/.github/workflows/manual-deploy-testnet-l2.yml index ce7c72a9e0..e283acb891 100644 --- a/.github/workflows/manual-deploy-testnet-l2.yml +++ b/.github/workflows/manual-deploy-testnet-l2.yml @@ -47,9 +47,9 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: 1.21.8 @@ -124,7 +124,7 @@ jobs: done - name: 'Upload L1 deployer container logs' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: deploy-l1-artifacts path: | @@ -224,6 +224,9 @@ jobs: --command-id RunShellScript \ --scripts 'mkdir -p /home/obscuro \ && git clone --depth 1 -b ${{ env.BRANCH_NAME }} https://github.com/ten-protocol/go-ten.git /home/obscuro/go-obscuro \ + && cp /home/obscuro/go-obscuro/tools/edbconnect/edb-connect.sh /home/obscurouser/edb-connect.sh \ + && chown obscurouser:obscurouser /home/obscurouser/edb-connect.sh \ + && chmod u+x /home/obscurouser/edb-connect.sh \ && docker network create --driver bridge node_network || true \ && docker run -d --name datadog-agent \ --network node_network \ @@ -272,7 +275,7 @@ jobs: environment: name: ${{ github.event.inputs.testnet_type }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Wait until obscuro node is healthy" shell: bash @@ -289,7 +292,7 @@ jobs: environment: name: ${{ github.event.inputs.testnet_type }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Deploy L2 contracts' id: deployL2Contracts @@ -313,7 +316,7 @@ jobs: docker logs `docker ps -aqf "name=hh-l2-deployer"` > deploy-l2-contracts.out 2>&1 - name: 'Upload L2 deployer container logs' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: deploy-l2-artifacts path: | @@ -327,7 +330,7 @@ jobs: environment: name: ${{ github.event.inputs.testnet_type }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Login via Azure CLI' uses: azure/login@v1 diff --git a/.github/workflows/manual-deploy-testnet-validator.yml b/.github/workflows/manual-deploy-testnet-validator.yml index 936791f841..cda3c96da4 100644 --- a/.github/workflows/manual-deploy-testnet-validator.yml +++ b/.github/workflows/manual-deploy-testnet-validator.yml @@ -73,9 +73,9 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: 1.21.8 @@ -201,7 +201,7 @@ jobs: # environment: # name: ${{ github.event.inputs.testnet_type }} # steps: -# - uses: actions/checkout@v3 +# - uses: actions/checkout@v4 # # - name: 'Login via Azure CLI' # uses: azure/login@v1 @@ -230,7 +230,7 @@ jobs: environment: name: ${{ github.event.inputs.testnet_type }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Wait until obscuro node is healthy" shell: bash diff --git a/.github/workflows/manual-recover-network-funds.yml b/.github/workflows/manual-recover-network-funds.yml index 4d722a12da..7642d110d9 100644 --- a/.github/workflows/manual-recover-network-funds.yml +++ b/.github/workflows/manual-recover-network-funds.yml @@ -34,7 +34,7 @@ jobs: run: | echo "GitHub Variables = ${{ toJSON(vars) }}" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: 'Login to Azure docker registry' uses: azure/docker-login@v1 @@ -65,7 +65,7 @@ jobs: docker logs `docker ps -aqf "name=recover-funds"` > recover-funds.out 2>&1 - name: 'Upload container logs on failure' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: failure() with: name: recover-funds diff --git a/.github/workflows/manual-upgrade-testnet-l2.yml b/.github/workflows/manual-upgrade-testnet-l2.yml index ecc8e05254..33395c9074 100644 --- a/.github/workflows/manual-upgrade-testnet-l2.yml +++ b/.github/workflows/manual-upgrade-testnet-l2.yml @@ -43,9 +43,9 @@ jobs: VM_BUILD_NUMBER: ${{ steps.outputVars.outputs.VM_BUILD_NUMBER }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: 1.21.8 @@ -188,7 +188,7 @@ jobs: environment: name: ${{ github.event.inputs.testnet_type }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Wait until obscuro node is healthy" shell: bash diff --git a/design/architecture/db_schema.png b/design/architecture/db_schema.png index d0e162451e..9f53639295 100644 Binary files a/design/architecture/db_schema.png and b/design/architecture/db_schema.png differ diff --git a/design/architecture/db_schema.puml b/design/architecture/db_schema.puml index 9266d65f93..86f20a273f 100644 --- a/design/architecture/db_schema.puml +++ b/design/architecture/db_schema.puml @@ -1,175 +1,112 @@ @startuml -' avoid problems with angled crows feet -skinparam linetype ortho - -package "Batch Execution" as BE #LightYellow{ - -note as N0 - This section is populated - when a batch is processed. -end note - -note as N1 - There can be multiple - sibling batches, but - they will always - share a body. -end note -note as N2 - The same transaction can be part - of multiple reorged sibling batches, - and generate different receipts and events. -end note - -note as N3 - Transactions can be bound - to a batch body only. -end note +!theme plain +top to bottom direction +skinparam linetype ortho +class attestation_key { + party: binary(20) + ky: binary(33) } - -package "Block Submission" as BS #LightBlue{ -note as N4 - This section is populated - when an L1 block is processed. - The logic will store the rollups - and the cross chain messages. - It will also mark "is_canonical" - when there are reorgs. -end note - -note as N6 -Add constraint on -block(is_canonical, height) -end note - +class batch { + converted_hash: binary(32) + hash: binary(32) + height: int + is_canonical: boolean + header: blob + l1_proof_hash: binary(32) + l1_proof: integer + is_executed: boolean + sequence: int } - -package "Config" as C #LightGreen{ -note as N5 - This table contains general - information like the shared secret, - the attestation, etc. -end note - +class block { + hash: binary(32) + is_canonical: boolean + header: blob + height: int + id: integer } - -package "Key Value" as KV { -note as N7 -This is where the stateDB lives. -Maybe some stuff needed by the embedded geth. -end note +class config { + val: mediumblob + ky: varchar(64) } - -entity "Batch" as BE.batch { - *batchHash : binary(32) - -- - sequence : int - height : int - is_canonical : boolean - source : P2P/ROLLUP - header : string - *l1Proof : binary(32) <> - *bodyHash : binary(32) <> - *parentHash : binary(32) <> +class contract { + address: binary(20) + owner: int + id: integer } - -entity "Batch Body" as BE.batch_body { - *bodyHash : binary(32) - -- - content : mediumblob +class event_log { + event_type: integer + topic1: integer + topic2: integer + topic3: integer + datablob: mediumblob + log_idx: integer + receipt: integer + id: integer } - -entity "Transaction" as BE.tx { - *txHash : binary(32) - -- - content : binary - senderAddress : binary(20) - nonce : int - idx : int - *bodyHash : binary(32) <> +class event_topic { + topic: binary(32) + rel_address: integer + id: integer } - -entity "Executed Transaction" as BE.exec_tx { - *execTxId : number <> - -- - createdContractAddress : binary(32) - receipt : mediumblob - *txHash : binary(32) <> - *batchHash : binary(32) <> +class event_type { + contract: integer + event_sig: binary(32) + lifecycle_event: boolean + id: integer } - -entity "Event" as BE.event { - -- - topic0 : binary(32) - topic1 : binary(32) - topic2 : binary(32) - topic3 : binary(32) - topic4 : binary(32) - datablob : mediumblob - logIdx : int - address : binary(32) - lifecycleEvent : boolean - relAddress1 : binary(20) - relAddress2 : binary(20) - relAddress3 : binary(20) - relAddress4 : binary(20) - *execTxId : number <> +class externally_owned_account { + address: binary(20) + id: integer } - -entity "Block" as BS.block { - *blockHash : binary(32) - -- - *parentHash : binary(32) <> - is_canonical : boolean - height : int - header : string +class keyvalue { + ky: varbinary(64) + val: mediumblob + id: integer } - -entity "L1 Message" as BS.l1_msg{ - __ - message : mediumblob - *blockHash : binary(32) <> +class l1_msg { + message: varbinary(1024) + block: integer + is_transfer: boolean + id: integer } - -entity "Rollup" as BS.rollup { - *rollupHash : binary(32) - -- - start_seq : int - end_seq : int - *blockHash : binary(32) <> +class receipt { + content: mediumblob + tx: integer + batch: integer + id: integer } - -entity "Config" as C.cfg{ - *key : string - -- - value : binary +class rollup { + hash: binary(32) + start_seq: int + end_seq: int + time_stamp: int + header: blob + compression_block: integer + id: integer } - -entity "KeyValue" as KV.kv{ - *key : binary - -- - value : binary +class tx { + hash: binary(32) + content: mediumblob + sender_address: int + idx: int + batch_height: int + id: integer } - - -batch_body ||..o{ tx : included in -batch_body ||..|{ batch -block ||..|{ batch : L1 proof -block ||..|{ rollup : Published in -exec_tx ||..|{ event -batch ||..|{ exec_tx -tx ||..|{ exec_tx -block ||..o{ l1_msg -block ||..|{ block -batch ||..|{ batch - - -batch_body .. N1 -exec_tx .. N2 -tx .. N3 -block .. N6 - -@enduml \ No newline at end of file +batch -[#595959,plain]-^ block : "l1_proof:id" +contract -[#595959,plain]-^ externally_owned_account : "owner:id" +event_log -[#595959,plain]-^ event_topic : "topic2:id" +event_log -[#595959,plain]-^ event_topic : "topic1:id" +event_log -[#595959,plain]-^ event_topic : "topic3:id" +event_log -[#595959,plain]-^ event_type : "event_type:id" +event_log -[#595959,plain]-^ receipt : "receipt:id" +event_topic -[#595959,plain]-^ externally_owned_account : "rel_address:id" +event_type -[#595959,plain]-^ contract : "contract:id" +l1_msg -[#595959,plain]-^ block : "block:id" +receipt -[#595959,plain]-^ batch : "batch:sequence" +receipt -[#595959,plain]-^ tx : "tx:id" +rollup -[#595959,plain]-^ block : "compression_block:id" +tx -[#595959,plain]-^ externally_owned_account : "sender_address:id" +@enduml diff --git a/go/common/headers.go b/go/common/headers.go index 8c06b9c05e..bea9c8ed91 100644 --- a/go/common/headers.go +++ b/go/common/headers.go @@ -47,6 +47,12 @@ type BatchHeader struct { CrossChainTree SerializedCrossChainTree `json:"crossChainTree"` // Those are the leafs of the merkle tree hashed for privacy. Necessary for clients to be able to build proofs as they have no access to all transactions in a batch or their receipts. } +// IsGenesis indicates whether the batch is the genesis batch. +// todo (#718) - Change this to a check against a hardcoded genesis hash. +func (b *BatchHeader) IsGenesis() bool { + return b.Number.Cmp(big.NewInt(int64(L2GenesisHeight))) == 0 +} + type batchHeaderEncoding struct { Hash common.Hash `json:"hash"` ParentHash L2BatchHash `json:"parentHash"` diff --git a/go/enclave/components/batch_executor.go b/go/enclave/components/batch_executor.go index ea5b47516f..16ffdc108c 100644 --- a/go/enclave/components/batch_executor.go +++ b/go/enclave/components/batch_executor.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math/big" "sort" "sync" @@ -144,7 +145,7 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE } // These variables will be used to create the new batch - parentBatch, err := executor.storage.FetchBatch(ctx, context.ParentPtr) + parentBatch, err := executor.storage.FetchBatchHeader(ctx, context.ParentPtr) if errors.Is(err, errutil.ErrNotFound) { executor.logger.Error(fmt.Sprintf("can't find parent batch %s. Seq %d", context.ParentPtr, context.SequencerNo)) return nil, errutil.ErrAncestorBatchNotFound @@ -154,9 +155,9 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE } parentBlock := block - if parentBatch.Header.L1Proof != block.Hash() { + if parentBatch.L1Proof != block.Hash() { var err error - parentBlock, err = executor.storage.FetchBlock(ctx, parentBatch.Header.L1Proof) + parentBlock, err = executor.storage.FetchBlock(ctx, parentBatch.L1Proof) if err != nil { executor.logger.Error(fmt.Sprintf("Could not retrieve a proof for batch %s", parentBatch.Hash()), log.ErrKey, err) return nil, err @@ -164,7 +165,7 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE } // Create a new batch based on the fromBlock of inclusion of the previous, including all new transactions - batch := core.DeterministicEmptyBatch(parentBatch.Header, block, context.AtTime, context.SequencerNo, context.BaseFee, context.Creator) + batch := core.DeterministicEmptyBatch(parentBatch, block, context.AtTime, context.SequencerNo, context.BaseFee, context.Creator) stateDB, err := executor.batchRegistry.GetBatchState(ctx, &batch.Header.ParentHash) if err != nil { @@ -194,14 +195,14 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE syntheticTransactions := append(xchainTxs, freeTransactions...) // fromTxIndex - Here we start from the 0 index. This will be the same for a validator. - successfulTxs, excludedTxs, txReceipts, err := executor.processTransactions(ctx, batch, 0, transactionsToProcess, stateDB, context.ChainConfig, false) + successfulTxs, excludedTxs, txReceipts, createdContracts, err := executor.processTransactions(ctx, batch, 0, transactionsToProcess, stateDB, context.ChainConfig, false) if err != nil { return nil, fmt.Errorf("could not process transactions. Cause: %w", err) } // fromTxIndex - Here we start from the len of the successful transactions; As long as we have the exact same successful transactions in a batch, // we will start from the same place. - ccSuccessfulTxs, _, ccReceipts, err := executor.processTransactions(ctx, batch, len(successfulTxs), syntheticTransactions, stateDB, context.ChainConfig, true) + ccSuccessfulTxs, _, ccReceipts, createdContractsSyn, err := executor.processTransactions(ctx, batch, len(successfulTxs), syntheticTransactions, stateDB, context.ChainConfig, true) if err != nil { return nil, err } @@ -244,10 +245,11 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE l.BlockHash = copyBatch.Hash() } } - + maps.Copy(createdContracts, createdContractsSyn) return &ComputedBatch{ - Batch: ©Batch, - Receipts: allReceipts, + Batch: ©Batch, + Receipts: allReceipts, + CreatedContracts: createdContracts, Commit: func(deleteEmptyObjects bool) (gethcommon.Hash, error) { executor.stateDBMutex.Lock() defer executor.stateDBMutex.Unlock() @@ -262,7 +264,7 @@ func (executor *batchExecutor) ComputeBatch(ctx context.Context, context *BatchE }, nil } -func (executor *batchExecutor) ExecuteBatch(ctx context.Context, batch *core.Batch) (types.Receipts, error) { +func (executor *batchExecutor) ExecuteBatch(ctx context.Context, batch *core.Batch) (types.Receipts, map[gethcommon.Hash][]*gethcommon.Address, error) { defer core.LogMethodDuration(executor.logger, measure.NewStopwatch(), "Executed batch", log.BatchHashKey, batch.Hash()) // Validators recompute the entire batch using the same batch context @@ -281,20 +283,20 @@ func (executor *batchExecutor) ExecuteBatch(ctx context.Context, batch *core.Bat BaseFee: batch.Header.BaseFee, }, false) // this execution is not used when first producing a batch, we never want to fail for empty batches if err != nil { - return nil, fmt.Errorf("failed computing batch %s. Cause: %w", batch.Hash(), err) + return nil, nil, fmt.Errorf("failed computing batch %s. Cause: %w", batch.Hash(), err) } if cb.Batch.Hash() != batch.Hash() { // todo @stefan - generate a validator challenge here and return it - executor.logger.Error(fmt.Sprintf("Error validating batch. Calculated: %+v Incoming: %+v\n", cb.Batch.Header, batch.Header)) - return nil, fmt.Errorf("batch is in invalid state. Incoming hash: %s Computed hash: %s", batch.Hash(), cb.Batch.Hash()) + executor.logger.Error(fmt.Sprintf("Error validating batch. Calculated: %+v Incoming: %+v", cb.Batch.Header, batch.Header)) + return nil, nil, fmt.Errorf("batch is in invalid state. Incoming hash: %s Computed hash: %s", batch.Hash(), cb.Batch.Hash()) } if _, err := cb.Commit(true); err != nil { - return nil, fmt.Errorf("cannot commit stateDB for incoming valid batch %s. Cause: %w", batch.Hash(), err) + return nil, nil, fmt.Errorf("cannot commit stateDB for incoming valid batch %s. Cause: %w", batch.Hash(), err) } - return cb.Receipts, nil + return cb.Receipts, cb.CreatedContracts, nil } func (executor *batchExecutor) CreateGenesisState( @@ -434,11 +436,12 @@ func (executor *batchExecutor) processTransactions( stateDB *state.StateDB, cc *params.ChainConfig, noBaseFee bool, -) ([]*common.L2Tx, []*common.L2Tx, []*types.Receipt, error) { +) ([]*common.L2Tx, []*common.L2Tx, []*types.Receipt, map[gethcommon.Hash][]*gethcommon.Address, error) { var executedTransactions []*common.L2Tx var excludedTransactions []*common.L2Tx var txReceipts []*types.Receipt - txResults := evm.ExecuteTransactions( + createdContracts := make(map[gethcommon.Hash][]*gethcommon.Address) + txResults, err := evm.ExecuteTransactions( ctx, txs, stateDB, @@ -452,24 +455,27 @@ func (executor *batchExecutor) processTransactions( executor.batchGasLimit, executor.logger, ) + if err != nil { + return nil, nil, nil, nil, err + } for _, tx := range txs { result, f := txResults[tx.Tx.Hash()] if !f { - return nil, nil, nil, fmt.Errorf("there should be an entry for each transaction") + return nil, nil, nil, nil, fmt.Errorf("there should be an entry for each transaction") } - rec, foundReceipt := result.(*types.Receipt) - if foundReceipt { + if result.Receipt != nil { executedTransactions = append(executedTransactions, tx.Tx) - txReceipts = append(txReceipts, rec) + txReceipts = append(txReceipts, result.Receipt) + createdContracts[tx.Tx.Hash()] = result.CreatedContracts } else { - // Exclude all errors + // Exclude failed transactions excludedTransactions = append(excludedTransactions, tx.Tx) - executor.logger.Debug("Excluding transaction from batch", log.TxKey, tx.Tx.Hash(), log.BatchHashKey, batch.Hash(), "cause", result) + executor.logger.Debug("Excluding transaction from batch", log.TxKey, tx.Tx.Hash(), log.BatchHashKey, batch.Hash(), "cause", result.Err) } } sort.Sort(sortByTxIndex(txReceipts)) - return executedTransactions, excludedTransactions, txReceipts, nil + return executedTransactions, excludedTransactions, txReceipts, createdContracts, nil } type sortByTxIndex []*types.Receipt diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index c76b967f95..1d8d01d90b 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/ten-protocol/go-ten/go/common/measure" + "github.com/ten-protocol/go-ten/go/common" "github.com/ethereum/go-ethereum/core/types" @@ -18,7 +20,6 @@ import ( "github.com/ten-protocol/go-ten/go/common/async" "github.com/ten-protocol/go-ten/go/common/errutil" "github.com/ten-protocol/go-ten/go/common/log" - "github.com/ten-protocol/go-ten/go/common/measure" "github.com/ten-protocol/go-ten/go/enclave/core" "github.com/ten-protocol/go-ten/go/enclave/limiters" gethrpc "github.com/ten-protocol/go-ten/lib/gethfork/rpc" @@ -37,7 +38,7 @@ type batchRegistry struct { func NewBatchRegistry(storage storage.Storage, logger gethlog.Logger) BatchRegistry { var headBatchSeq *big.Int - headBatch, err := storage.FetchHeadBatch(context.Background()) + headBatch, err := storage.FetchHeadBatchHeader(context.Background()) if err != nil { if errors.Is(err, errutil.ErrNotFound) { headBatchSeq = nil @@ -46,7 +47,7 @@ func NewBatchRegistry(storage storage.Storage, logger gethlog.Logger) BatchRegis return nil } } else { - headBatchSeq = headBatch.SeqNo() + headBatchSeq = headBatch.SequencerOrderNo } return &batchRegistry{ @@ -77,22 +78,30 @@ func (br *batchRegistry) UnsubscribeFromBatches() { func (br *batchRegistry) OnL1Reorg(_ *BlockIngestionType) { // refresh the cached head batch from the database because there was an L1 reorg - headBatch, err := br.storage.FetchHeadBatch(context.Background()) + headBatch, err := br.storage.FetchHeadBatchHeader(context.Background()) if err != nil { br.logger.Error("Could not fetch head batch", log.ErrKey, err) return } - br.headBatchSeq = headBatch.SeqNo() + br.headBatchSeq = headBatch.SequencerOrderNo } -func (br *batchRegistry) OnBatchExecuted(batch *core.Batch, receipts types.Receipts) { +func (br *batchRegistry) OnBatchExecuted(batchHeader *common.BatchHeader, receipts types.Receipts) { + defer core.LogMethodDuration(br.logger, measure.NewStopwatch(), "OnBatchExecuted", log.BatchHashKey, batchHeader.Hash()) br.callbackMutex.RLock() defer br.callbackMutex.RUnlock() - defer core.LogMethodDuration(br.logger, measure.NewStopwatch(), "Sending batch and events", log.BatchHashKey, batch.Hash()) - - br.headBatchSeq = batch.SeqNo() + txs, err := br.storage.FetchBatchTransactionsBySeq(context.Background(), batchHeader.SequencerOrderNo.Uint64()) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + // this function is called after a batch was successfully executed. This is a catastrophic failure + br.logger.Crit("should not happen. cannot get transactions. ", log.ErrKey, err) + } + br.headBatchSeq = batchHeader.SequencerOrderNo if br.batchesCallback != nil { + batch := &core.Batch{ + Header: batchHeader, + Transactions: txs, + } br.batchesCallback(batch, receipts) } @@ -105,13 +114,13 @@ func (br *batchRegistry) HasGenesisBatch() (bool, error) { func (br *batchRegistry) BatchesAfter(ctx context.Context, batchSeqNo uint64, upToL1Height uint64, rollupLimiter limiters.RollupLimiter) ([]*core.Batch, []*types.Block, error) { // sanity check - headBatch, err := br.storage.FetchBatchBySeqNo(ctx, br.HeadBatchSeq().Uint64()) + headBatch, err := br.storage.FetchBatchHeaderBySeqNo(ctx, br.HeadBatchSeq().Uint64()) if err != nil { return nil, nil, err } - if headBatch.SeqNo().Uint64() < batchSeqNo { - return nil, nil, fmt.Errorf("head batch height %d is in the past compared to requested batch %d", headBatch.SeqNo().Uint64(), batchSeqNo) + if headBatch.SequencerOrderNo.Uint64() < batchSeqNo { + return nil, nil, fmt.Errorf("head batch height %d is in the past compared to requested batch %d", headBatch.SequencerOrderNo.Uint64(), batchSeqNo) } resultBatches := make([]*core.Batch, 0) @@ -119,7 +128,7 @@ func (br *batchRegistry) BatchesAfter(ctx context.Context, batchSeqNo uint64, up currentBatchSeq := batchSeqNo var currentBlock *types.Block - for currentBatchSeq <= headBatch.SeqNo().Uint64() { + for currentBatchSeq <= headBatch.SequencerOrderNo.Uint64() { batch, err := br.storage.FetchBatchBySeqNo(ctx, currentBatchSeq) if err != nil { return nil, nil, fmt.Errorf("could not retrieve batch by sequence number %d. Cause: %w", currentBatchSeq, err) @@ -168,11 +177,7 @@ func (br *batchRegistry) BatchesAfter(ctx context.Context, batchSeqNo uint64, up } func (br *batchRegistry) GetBatchState(ctx context.Context, hash *common.L2BatchHash) (*state.StateDB, error) { - batch, err := br.storage.FetchBatch(ctx, *hash) - if err != nil { - return nil, err - } - return getBatchState(ctx, br.storage, batch) + return getBatchState(ctx, br.storage, *hash) } func (br *batchRegistry) GetBatchStateAtHeight(ctx context.Context, blockNumber *gethrpc.BlockNumber) (*state.StateDB, error) { @@ -182,17 +187,17 @@ func (br *batchRegistry) GetBatchStateAtHeight(ctx context.Context, blockNumber return nil, err } - return getBatchState(ctx, br.storage, batch) + return getBatchState(ctx, br.storage, batch.Hash()) } -func getBatchState(ctx context.Context, storage storage.Storage, batch *core.Batch) (*state.StateDB, error) { - blockchainState, err := storage.CreateStateDB(ctx, batch.Hash()) +func getBatchState(ctx context.Context, storage storage.Storage, batchHash common.L2BatchHash) (*state.StateDB, error) { + blockchainState, err := storage.CreateStateDB(ctx, batchHash) if err != nil { return nil, fmt.Errorf("could not create stateDB. Cause: %w", err) } if blockchainState == nil { - return nil, fmt.Errorf("unable to fetch chain state for batch %s", batch.Hash().Hex()) + return nil, fmt.Errorf("unable to fetch chain state for batch %s", batchHash.Hex()) } return blockchainState, err diff --git a/go/enclave/components/interfaces.go b/go/enclave/components/interfaces.go index cb74f0bf4b..cad76c7292 100644 --- a/go/enclave/components/interfaces.go +++ b/go/enclave/components/interfaces.go @@ -65,7 +65,9 @@ type BatchExecutionContext struct { type ComputedBatch struct { Batch *core.Batch Receipts types.Receipts - Commit func(bool) (gethcommon.Hash, error) + // while executing the batch, we collect the newly created contracts mapped by the transaction that created them + CreatedContracts map[gethcommon.Hash][]*gethcommon.Address + Commit func(bool) (gethcommon.Hash, error) } type BatchExecutor interface { @@ -76,7 +78,7 @@ type BatchExecutor interface { ComputeBatch(ctx context.Context, batchContext *BatchExecutionContext, failForEmptyBatch bool) (*ComputedBatch, error) // ExecuteBatch - executes the transactions and xchain messages, returns the receipts, and updates the stateDB - ExecuteBatch(context.Context, *core.Batch) (types.Receipts, error) + ExecuteBatch(context.Context, *core.Batch) (types.Receipts, map[gethcommon.Hash][]*gethcommon.Address, error) // CreateGenesisState - will create and commit the genesis state in the stateDB for the given block hash, // and uint64 timestamp representing the time now. In this genesis state is where one can @@ -102,7 +104,7 @@ type BatchRegistry interface { SubscribeForExecutedBatches(func(*core.Batch, types.Receipts)) UnsubscribeFromBatches() - OnBatchExecuted(batch *core.Batch, receipts types.Receipts) + OnBatchExecuted(batch *common.BatchHeader, receipts types.Receipts) OnL1Reorg(*BlockIngestionType) // HasGenesisBatch - returns if genesis batch is available yet or not, or error in case diff --git a/go/enclave/components/rollup_compression.go b/go/enclave/components/rollup_compression.go index ca4536e0ca..6b36d188da 100644 --- a/go/enclave/components/rollup_compression.go +++ b/go/enclave/components/rollup_compression.go @@ -175,8 +175,8 @@ func (rc *RollupCompression) createRollupHeader(ctx context.Context, rollup *cor } reorgMap := make(map[uint64]bool) for _, batch := range reorgedBatches { - rc.logger.Info("Reorg batch", log.BatchSeqNoKey, batch.SeqNo().Uint64()) - reorgMap[batch.SeqNo().Uint64()] = true + rc.logger.Info("Reorg batch", log.BatchSeqNoKey, batch.SequencerOrderNo.Uint64()) + reorgMap[batch.SequencerOrderNo.Uint64()] = true } for i, batch := range batches { @@ -404,7 +404,7 @@ func (rc *RollupCompression) executeAndSaveIncompleteBatches(ctx context.Context parentHash := calldataRollupHeader.FirstCanonParentHash if calldataRollupHeader.FirstBatchSequence.Uint64() != common.L2GenesisSeqNo { - _, err := rc.storage.FetchBatch(ctx, parentHash) + _, err := rc.storage.FetchBatchHeader(ctx, parentHash) if err != nil { rc.logger.Error("Could not find batch mentioned in the rollup. This should not happen.", log.ErrKey, err) return err @@ -470,11 +470,11 @@ func (rc *RollupCompression) executeAndSaveIncompleteBatches(ctx context.Context if err != nil { return err } - err = rc.storage.StoreExecutedBatch(ctx, genBatch, nil) + err = rc.storage.StoreExecutedBatch(ctx, genBatch.Header, nil, nil) if err != nil { return err } - rc.batchRegistry.OnBatchExecuted(genBatch, nil) + rc.batchRegistry.OnBatchExecuted(genBatch.Header, nil) rc.logger.Info("Stored genesis", log.BatchHashKey, genBatch.Hash()) parentHash = genBatch.Hash() @@ -514,11 +514,11 @@ func (rc *RollupCompression) executeAndSaveIncompleteBatches(ctx context.Context if err != nil { return err } - err = rc.storage.StoreExecutedBatch(ctx, computedBatch.Batch, computedBatch.Receipts) + err = rc.storage.StoreExecutedBatch(ctx, computedBatch.Batch.Header, computedBatch.Receipts, computedBatch.CreatedContracts) if err != nil { return err } - rc.batchRegistry.OnBatchExecuted(computedBatch.Batch, nil) + rc.batchRegistry.OnBatchExecuted(computedBatch.Batch.Header, nil) parentHash = computedBatch.Batch.Hash() } diff --git a/go/enclave/core/batch.go b/go/enclave/core/batch.go index bc97ff22e1..cdc15cf9e4 100644 --- a/go/enclave/core/batch.go +++ b/go/enclave/core/batch.go @@ -49,12 +49,6 @@ func (b *Batch) NumberU64() uint64 { return b.Header.Number.Uint64() } func (b *Batch) Number() *big.Int { return new(big.Int).Set(b.Header.Number) } func (b *Batch) SeqNo() *big.Int { return new(big.Int).Set(b.Header.SequencerOrderNo) } -// IsGenesis indicates whether the batch is the genesis batch. -// todo (#718) - Change this to a check against a hardcoded genesis hash. -func (b *Batch) IsGenesis() bool { - return b.Header.Number.Cmp(big.NewInt(int64(common.L2GenesisHeight))) == 0 -} - func (b *Batch) ToExtBatch(transactionBlobCrypto crypto.DataEncryptionService, compression compression.DataCompressionService) (*common.ExtBatch, error) { txHashes := make([]gethcommon.Hash, len(b.Transactions)) for idx, tx := range b.Transactions { diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index 7db4324e61..a12e7a7028 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -510,7 +510,7 @@ func (e *enclaveImpl) SubmitBatch(ctx context.Context, extBatch *common.ExtBatch e.logger.Info("Received new p2p batch", log.BatchHeightKey, extBatch.Header.Number, log.BatchHashKey, extBatch.Hash(), "l1", extBatch.Header.L1Proof) seqNo := extBatch.Header.SequencerOrderNo.Uint64() if seqNo > common.L2GenesisSeqNo+1 { - _, err := e.storage.FetchBatchBySeqNo(ctx, seqNo-1) + _, err := e.storage.FetchBatchHeaderBySeqNo(ctx, seqNo-1) if err != nil { return responses.ToInternalError(fmt.Errorf("could not find previous batch with seq: %d", seqNo-1)) } @@ -961,7 +961,7 @@ func replayBatchesToValidState(ctx context.Context, storage storage.Storage, reg } // calculate the stateDB after this batch and store it in the cache - _, err := batchExecutor.ExecuteBatch(ctx, batch) + _, _, err := batchExecutor.ExecuteBatch(ctx, batch) if err != nil { return err } diff --git a/go/enclave/evm/chain_context.go b/go/enclave/evm/chain_context.go index 75e4b942e6..a69ef9b89e 100644 --- a/go/enclave/evm/chain_context.go +++ b/go/enclave/evm/chain_context.go @@ -42,7 +42,7 @@ func (occ *ObscuroChainContext) GetHeader(hash common.Hash, _ uint64) *types.Hea ctx, cancelCtx := context.WithTimeout(context.Background(), occ.config.RPCTimeout) defer cancelCtx() - batch, err := occ.storage.FetchBatch(ctx, hash) + batch, err := occ.storage.FetchBatchHeader(ctx, hash) if err != nil { if errors.Is(err, errutil.ErrNotFound) { return nil @@ -50,7 +50,7 @@ func (occ *ObscuroChainContext) GetHeader(hash common.Hash, _ uint64) *types.Hea occ.logger.Crit("Could not retrieve rollup", log.ErrKey, err) } - h, err := occ.gethEncodingService.CreateEthHeaderForBatch(ctx, batch.Header) + h, err := occ.gethEncodingService.CreateEthHeaderForBatch(ctx, batch) if err != nil { occ.logger.Crit("Could not convert to eth header", log.ErrKey, err) return nil diff --git a/go/enclave/evm/ethchainadapter/eth_chainadapter.go b/go/enclave/evm/ethchainadapter/eth_chainadapter.go index c64529be46..1dbb927724 100644 --- a/go/enclave/evm/ethchainadapter/eth_chainadapter.go +++ b/go/enclave/evm/ethchainadapter/eth_chainadapter.go @@ -59,12 +59,12 @@ func (e *EthChainAdapter) CurrentBlock() *gethtypes.Header { ctx, cancelCtx := context.WithTimeout(context.Background(), e.config.RPCTimeout) defer cancelCtx() - currentBatch, err := e.storage.FetchBatchBySeqNo(ctx, currentBatchSeqNo.Uint64()) + currentBatch, err := e.storage.FetchBatchHeaderBySeqNo(ctx, currentBatchSeqNo.Uint64()) if err != nil { e.logger.Warn("unable to retrieve batch seq no", "currentBatchSeqNo", currentBatchSeqNo, log.ErrKey, err) return nil } - batch, err := e.gethEncoding.CreateEthHeaderForBatch(ctx, currentBatch.Header) + batch, err := e.gethEncoding.CreateEthHeaderForBatch(ctx, currentBatch) if err != nil { e.logger.Warn("unable to convert batch to eth header ", "currentBatchSeqNo", currentBatchSeqNo, log.ErrKey, err) return nil diff --git a/go/enclave/evm/evm_facade.go b/go/enclave/evm/evm_facade.go index 059b6f851e..e3ecd3fd39 100644 --- a/go/enclave/evm/evm_facade.go +++ b/go/enclave/evm/evm_facade.go @@ -38,6 +38,12 @@ import ( var ErrGasNotEnoughForL1 = errors.New("gas limit too low to pay for execution and l1 fees") +type TxExecResult struct { + Receipt *types.Receipt + CreatedContracts []*gethcommon.Address + Err error +} + // ExecuteTransactions // header - the header of the rollup where this transaction will be included // fromTxIndex - for the receipts and events, the evm needs to know for each transaction the order in which it was executed in the block. @@ -54,17 +60,17 @@ func ExecuteTransactions( noBaseFee bool, batchGasLimit uint64, logger gethlog.Logger, -) map[common.TxHash]interface{} { // todo - return error +) (map[common.TxHash]*TxExecResult, error) { chain, vmCfg := initParams(storage, gethEncodingService, config, noBaseFee, logger) gp := gethcore.GasPool(batchGasLimit) zero := uint64(0) usedGas := &zero - result := map[common.TxHash]interface{}{} + result := map[common.TxHash]*TxExecResult{} ethHeader, err := gethEncodingService.CreateEthHeaderForBatch(ctx, header) if err != nil { - logger.Crit("Could not convert to eth header", log.ErrKey, err) - return nil + logger.Error("Could not convert to eth header", log.ErrKey, err) + return nil, err } hash := header.Hash() @@ -77,7 +83,9 @@ func ExecuteTransactions( // this should not open up any attack vectors on the randomness. tCountRollback := 0 for i, t := range txs { - r, err := executeTransaction( + txResult := &TxExecResult{} + result[t.Tx.Hash()] = txResult + r, createdContracts, err := executeTransaction( s, chainConfig, chain, @@ -92,7 +100,7 @@ func ExecuteTransactions( ) if err != nil { tCountRollback++ - result[t.Tx.Hash()] = err + txResult.Err = err // only log tx execution errors if they are unexpected logFailedTx := logger.Info if errors.Is(err, gethcore.ErrNonceTooHigh) || errors.Is(err, gethcore.ErrNonceTooLow) || errors.Is(err, gethcore.ErrFeeCapTooLow) || errors.Is(err, ErrGasNotEnoughForL1) { @@ -101,11 +109,13 @@ func ExecuteTransactions( logFailedTx("Failed to execute tx:", log.TxKey, t.Tx.Hash(), log.CtrErrKey, err) continue } - result[t.Tx.Hash()] = r + logReceipt(r, logger) + txResult.Receipt = r + txResult.CreatedContracts = createdContracts } s.Finalise(true) - return result + return result, nil } const ( @@ -127,16 +137,29 @@ func executeTransaction( tCount int, batchHash common.L2BatchHash, batchHeight uint64, -) (*types.Receipt, error) { +) (*types.Receipt, []*gethcommon.Address, error) { + var createdContracts []*gethcommon.Address rules := cc.Rules(big.NewInt(0), true, 0) from, err := types.Sender(types.LatestSigner(cc), t.Tx) if err != nil { - return nil, err + return nil, nil, err } s.Prepare(rules, from, gethcommon.Address{}, t.Tx.To(), nil, nil) snap := s.Snapshot() s.SetTxContext(t.Tx.Hash(), tCount) + s.SetLogger(&tracing.Hooks{ + // called when the code of a contract changes. + OnCodeChange: func(addr gethcommon.Address, prevCodeHash gethcommon.Hash, prevCode []byte, codeHash gethcommon.Hash, code []byte) { + // only proceed for new deployments. + if len(prevCode) > 0 { + return + } + createdContracts = append(createdContracts, &addr) + }, + }) + defer s.SetLogger(nil) + before := header.MixDigest // calculate a random value per transaction header.MixDigest = crypto.CalculateTxRnd(before.Bytes(), tCount) @@ -226,10 +249,10 @@ func executeTransaction( header.MixDigest = before if err != nil { s.RevertToSnapshot(snap) - return receipt, err + return receipt, nil, err } - return receipt, nil + return receipt, createdContracts, nil } func logReceipt(r *types.Receipt, logger gethlog.Logger) { diff --git a/go/enclave/l2chain/interfaces.go b/go/enclave/l2chain/interfaces.go index 6c29fd25a3..83eb0fee96 100644 --- a/go/enclave/l2chain/interfaces.go +++ b/go/enclave/l2chain/interfaces.go @@ -21,7 +21,7 @@ type ObscuroChain interface { // For Contracts - the address of the deployer. // Note - this might be subject to change if we implement a more flexible mechanism // todo - support BlockNumberOrHash - AccountOwner(ctx context.Context, address gethcommon.Address, blockNumber *gethrpc.BlockNumber) (*gethcommon.Address, error) + AccountOwner(ctx context.Context, address gethcommon.Address) (*gethcommon.Address, error) // GetBalanceAtBlock - will return the balance of a specific address at the specific given block number (batch number). GetBalanceAtBlock(ctx context.Context, accountAddr gethcommon.Address, blockNumber *gethrpc.BlockNumber) (*hexutil.Big, error) diff --git a/go/enclave/l2chain/l2_chain.go b/go/enclave/l2chain/l2_chain.go index b0c34c4c32..3b3cdb76a8 100644 --- a/go/enclave/l2chain/l2_chain.go +++ b/go/enclave/l2chain/l2_chain.go @@ -6,6 +6,8 @@ import ( "fmt" "math/big" + "github.com/ten-protocol/go-ten/go/common/errutil" + "github.com/ten-protocol/go-ten/go/config" "github.com/ten-protocol/go-ten/go/enclave/storage" @@ -64,32 +66,17 @@ func NewChain( } } -func (oc *obscuroChain) AccountOwner(ctx context.Context, address gethcommon.Address, blockNumber *gethrpc.BlockNumber) (*gethcommon.Address, error) { - // check if account is a contract - isContract, err := oc.isAccountContractAtBlock(ctx, address, blockNumber) - if err != nil { - return nil, err - } - if !isContract { - return &address, nil - } - - // If the address is a contract, find the signer of the deploy transaction - txHash, err := oc.storage.GetContractCreationTx(ctx, address) +func (oc *obscuroChain) AccountOwner(ctx context.Context, address gethcommon.Address) (*gethcommon.Address, error) { + // check if the account is a contract and return the owner + owner, err := oc.storage.ReadContractOwner(ctx, address) if err != nil { - return nil, err - } - transaction, _, _, _, err := oc.storage.GetTransaction(ctx, *txHash) //nolint:dogsled - if err != nil { - return nil, err - } - signer := types.NewLondonSigner(oc.chainConfig.ChainID) - - sender, err := signer.Sender(transaction) - if err != nil { - return nil, err + // it is not a contract, so it's an EOA + if errors.Is(err, errutil.ErrNotFound) { + return &address, nil + } + return nil, fmt.Errorf("could not read account owner. cause: %w", err) } - return &sender, nil + return owner, nil } func (oc *obscuroChain) GetBalanceAtBlock(ctx context.Context, accountAddr gethcommon.Address, blockNumber *gethrpc.BlockNumber) (*hexutil.Big, error) { @@ -212,13 +199,3 @@ func (oc *obscuroChain) GetChainStateAtTransaction(ctx context.Context, batch *c } return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for batch %#x", txIndex, batch.Hash()) } - -// Returns whether the account is a contract -func (oc *obscuroChain) isAccountContractAtBlock(ctx context.Context, accountAddr gethcommon.Address, blockNumber *gethrpc.BlockNumber) (bool, error) { - chainState, err := oc.Registry.GetBatchStateAtHeight(ctx, blockNumber) - if err != nil { - return false, fmt.Errorf("unable to get blockchain state - %w", err) - } - - return len(chainState.GetCode(accountAddr)) > 0, nil -} diff --git a/go/enclave/nodetype/common.go b/go/enclave/nodetype/common.go index a99dc0f690..a0c083ac4d 100644 --- a/go/enclave/nodetype/common.go +++ b/go/enclave/nodetype/common.go @@ -20,8 +20,8 @@ func ExportCrossChainData(ctx context.Context, storage storage.Storage, fromSeqN return nil, errutil.ErrCrossChainBundleNoBatches } - blockHash := canonicalBatches[len(canonicalBatches)-1].Header.L1Proof - batchHash := canonicalBatches[len(canonicalBatches)-1].Header.Hash() + blockHash := canonicalBatches[len(canonicalBatches)-1].L1Proof + batchHash := canonicalBatches[len(canonicalBatches)-1].Hash() block, err := storage.FetchBlock(ctx, blockHash) if err != nil { @@ -30,8 +30,8 @@ func ExportCrossChainData(ctx context.Context, storage storage.Storage, fromSeqN crossChainHashes := make([][]byte, 0) for _, batch := range canonicalBatches { - if batch.Header.CrossChainRoot != gethcommon.BigToHash(gethcommon.Big0) { - crossChainHashes = append(crossChainHashes, batch.Header.CrossChainRoot.Bytes()) + if batch.CrossChainRoot != gethcommon.BigToHash(gethcommon.Big0) { + crossChainHashes = append(crossChainHashes, batch.CrossChainRoot.Bytes()) } } diff --git a/go/enclave/nodetype/sequencer.go b/go/enclave/nodetype/sequencer.go index ff7ed22dc4..e992e0e64b 100644 --- a/go/enclave/nodetype/sequencer.go +++ b/go/enclave/nodetype/sequencer.go @@ -150,7 +150,7 @@ func (s *sequencer) createGenesisBatch(ctx context.Context, block *common.L1Bloc return fmt.Errorf("failed signing created batch. Cause: %w", err) } - if err := s.StoreExecutedBatch(ctx, batch, nil); err != nil { + if err := s.StoreExecutedBatch(ctx, batch, nil, nil); err != nil { return fmt.Errorf("1. failed storing batch. Cause: %w", err) } @@ -205,7 +205,7 @@ func (s *sequencer) createNewHeadBatch(ctx context.Context, l1HeadBlock *common. if headBatchSeq == nil { headBatchSeq = big.NewInt(int64(common.L2GenesisSeqNo)) } - headBatch, err := s.storage.FetchBatchBySeqNo(ctx, headBatchSeq.Uint64()) + headBatch, err := s.storage.FetchBatchHeaderBySeqNo(ctx, headBatchSeq.Uint64()) if err != nil { return err } @@ -220,7 +220,7 @@ func (s *sequencer) createNewHeadBatch(ctx context.Context, l1HeadBlock *common. } // sanity check that the headBatch.Header.L1Proof is an ancestor of the l1HeadBlock - b, err := s.storage.FetchBlock(ctx, headBatch.Header.L1Proof) + b, err := s.storage.FetchBlock(ctx, headBatch.L1Proof) if err != nil { return err } @@ -301,7 +301,7 @@ func (s *sequencer) produceBatch( return nil, fmt.Errorf("failed signing created batch. Cause: %w", err) } - if err := s.StoreExecutedBatch(ctx, cb.Batch, cb.Receipts); err != nil { + if err := s.StoreExecutedBatch(ctx, cb.Batch, cb.Receipts, cb.CreatedContracts); err != nil { return nil, fmt.Errorf("2. failed storing batch. Cause: %w", err) } @@ -319,7 +319,7 @@ func (s *sequencer) produceBatch( // StoreExecutedBatch - stores an executed batch in one go. This can be done for the sequencer because it is guaranteed // that all dependencies are in place for the execution to be successful. -func (s *sequencer) StoreExecutedBatch(ctx context.Context, batch *core.Batch, receipts types.Receipts) error { +func (s *sequencer) StoreExecutedBatch(ctx context.Context, batch *core.Batch, receipts types.Receipts, newContracts map[gethcommon.Hash][]*gethcommon.Address) error { defer core.LogMethodDuration(s.logger, measure.NewStopwatch(), "Registry StoreBatch() exit", log.BatchHashKey, batch.Hash()) // Check if this batch is already stored. @@ -337,11 +337,11 @@ func (s *sequencer) StoreExecutedBatch(ctx context.Context, batch *core.Batch, r return fmt.Errorf("failed to store batch. Cause: %w", err) } - if err := s.storage.StoreExecutedBatch(ctx, batch, receipts); err != nil { + if err := s.storage.StoreExecutedBatch(ctx, batch.Header, receipts, newContracts); err != nil { return fmt.Errorf("failed to store batch. Cause: %w", err) } - s.batchRegistry.OnBatchExecuted(batch, receipts) + s.batchRegistry.OnBatchExecuted(batch.Header, receipts) return nil } @@ -373,8 +373,8 @@ func (s *sequencer) CreateRollup(ctx context.Context, lastBatchNo uint64) (*comm } func (s *sequencer) duplicateBatches(ctx context.Context, l1Head *types.Block, nonCanonicalL1Path []common.L1BlockHash, canonicalL1Path []common.L1BlockHash) error { - batchesToDuplicate := make([]*core.Batch, 0) - batchesToExclude := make(map[uint64]*core.Batch, 0) + batchesToDuplicate := make([]*common.BatchHeader, 0) + batchesToExclude := make(map[uint64]*common.BatchHeader, 0) // read the batches attached to these blocks for _, l1BlockHash := range nonCanonicalL1Path { @@ -399,7 +399,7 @@ func (s *sequencer) duplicateBatches(ctx context.Context, l1Head *types.Block, n return fmt.Errorf("could not FetchBatchesByBlock %s. Cause %w", l1BlockHash, err) } for _, batch := range batches { - batchesToExclude[batch.NumberU64()] = batch + batchesToExclude[batch.Number.Uint64()] = batch } } @@ -409,20 +409,20 @@ func (s *sequencer) duplicateBatches(ctx context.Context, l1Head *types.Block, n // sort by height sort.Slice(batchesToDuplicate, func(i, j int) bool { - return batchesToDuplicate[i].Number().Cmp(batchesToDuplicate[j].Number()) == -1 + return batchesToDuplicate[i].Number.Cmp(batchesToDuplicate[j].Number) == -1 }) - currentHead := batchesToDuplicate[0].Header.ParentHash + currentHead := batchesToDuplicate[0].ParentHash // find all batches for that path for i, orphanBatch := range batchesToDuplicate { // sanity check that all these batches are consecutive - if i > 0 && batchesToDuplicate[i].Header.ParentHash != batchesToDuplicate[i-1].Hash() { + if i > 0 && batchesToDuplicate[i].ParentHash != batchesToDuplicate[i-1].Hash() { s.logger.Crit("the batches that must be duplicated are invalid") } - if batchesToExclude[orphanBatch.NumberU64()] != nil { - s.logger.Info("Not duplicating batch because there is already a canonical batch on that height", log.BatchSeqNoKey, orphanBatch.SeqNo()) - currentHead = batchesToExclude[orphanBatch.NumberU64()].Hash() + if batchesToExclude[orphanBatch.Number.Uint64()] != nil { + s.logger.Info("Not duplicating batch because there is already a canonical batch on that height", log.BatchSeqNoKey, orphanBatch.SequencerOrderNo) + currentHead = batchesToExclude[orphanBatch.Number.Uint64()].Hash() continue } sequencerNo, err := s.storage.FetchCurrentSequencerNo(ctx) @@ -430,8 +430,12 @@ func (s *sequencer) duplicateBatches(ctx context.Context, l1Head *types.Block, n return fmt.Errorf("could not fetch sequencer no. Cause %w", err) } sequencerNo = sequencerNo.Add(sequencerNo, big.NewInt(1)) + transactions, err := s.storage.FetchBatchTransactionsBySeq(ctx, orphanBatch.SequencerOrderNo.Uint64()) + if err != nil { + return fmt.Errorf("could not fetch transactions to duplicate. Cause %w", err) + } // create the duplicate and store/broadcast it, recreate batch even if it was empty - cb, err := s.produceBatch(ctx, sequencerNo, l1Head.Hash(), currentHead, orphanBatch.Transactions, orphanBatch.Header.Time, false) + cb, err := s.produceBatch(ctx, sequencerNo, l1Head.Hash(), currentHead, transactions, orphanBatch.Time, false) if err != nil { return fmt.Errorf("could not produce batch. Cause %w", err) } diff --git a/go/enclave/nodetype/validator.go b/go/enclave/nodetype/validator.go index 1ccd9ce81b..2227ad02de 100644 --- a/go/enclave/nodetype/validator.go +++ b/go/enclave/nodetype/validator.go @@ -105,62 +105,72 @@ func (val *obsValidator) ExecuteStoredBatches(ctx context.Context) error { startMempool(val.batchRegistry, val.mempool) - for _, batch := range batches { - if batch.IsGenesis() { - if err = val.handleGenesis(ctx, batch); err != nil { + for _, batchHeader := range batches { + if batchHeader.IsGenesis() { + if err = val.handleGenesis(ctx, batchHeader); err != nil { return err } } - val.logger.Trace("Executing stored batch", log.BatchSeqNoKey, batch.SeqNo()) + val.logger.Trace("Executing stored batchHeader", log.BatchSeqNoKey, batchHeader.SequencerOrderNo) - // check batch execution prerequisites - canExecute, err := val.executionPrerequisites(ctx, batch) + // check batchHeader execution prerequisites + canExecute, err := val.executionPrerequisites(ctx, batchHeader) if err != nil { - return fmt.Errorf("could not determine the execution prerequisites for batch %s. Cause: %w", batch.Hash(), err) + return fmt.Errorf("could not determine the execution prerequisites for batchHeader %s. Cause: %w", batchHeader.Hash(), err) } - val.logger.Trace("Can execute stored batch", log.BatchSeqNoKey, batch.SeqNo(), "can", canExecute) + val.logger.Trace("Can execute stored batchHeader", log.BatchSeqNoKey, batchHeader.SequencerOrderNo, "can", canExecute) if canExecute { - receipts, err := val.batchExecutor.ExecuteBatch(ctx, batch) + txs, err := val.storage.FetchBatchTransactionsBySeq(ctx, batchHeader.SequencerOrderNo.Uint64()) if err != nil { - return fmt.Errorf("could not execute batch %s. Cause: %w", batch.Hash(), err) + return fmt.Errorf("could not get txs for batch %s. Cause: %w", batchHeader.Hash(), err) } - err = val.storage.StoreExecutedBatch(ctx, batch, receipts) + + batch := &core.Batch{ + Header: batchHeader, + Transactions: txs, + } + + receipts, contracts, err := val.batchExecutor.ExecuteBatch(ctx, batch) + if err != nil { + return fmt.Errorf("could not execute batchHeader %s. Cause: %w", batchHeader.Hash(), err) + } + err = val.storage.StoreExecutedBatch(ctx, batchHeader, receipts, contracts) if err != nil { - return fmt.Errorf("could not store executed batch %s. Cause: %w", batch.Hash(), err) + return fmt.Errorf("could not store executed batchHeader %s. Cause: %w", batchHeader.Hash(), err) } err = val.mempool.Chain.IngestNewBlock(batch) if err != nil { - return fmt.Errorf("failed to feed batch into the virtual eth chain- %w", err) + return fmt.Errorf("failed to feed batchHeader into the virtual eth chain- %w", err) } - val.batchRegistry.OnBatchExecuted(batch, receipts) + val.batchRegistry.OnBatchExecuted(batchHeader, receipts) } } return nil } -func (val *obsValidator) executionPrerequisites(ctx context.Context, batch *core.Batch) (bool, error) { +func (val *obsValidator) executionPrerequisites(ctx context.Context, batch *common.BatchHeader) (bool, error) { // 1.l1 block exists - block, err := val.storage.FetchBlock(ctx, batch.Header.L1Proof) + block, err := val.storage.FetchBlock(ctx, batch.L1Proof) if err != nil && errors.Is(err, errutil.ErrNotFound) { - val.logger.Warn("Error fetching block", log.BlockHashKey, batch.Header.L1Proof, log.ErrKey, err) + val.logger.Warn("Error fetching block", log.BlockHashKey, batch.L1Proof, log.ErrKey, err) return false, err } - val.logger.Trace("l1 block exists", log.BatchSeqNoKey, batch.SeqNo()) + val.logger.Trace("l1 block exists", log.BatchSeqNoKey, batch.SequencerOrderNo) // 2. parent was executed - parentExecuted, err := val.storage.BatchWasExecuted(ctx, batch.Header.ParentHash) + parentExecuted, err := val.storage.BatchWasExecuted(ctx, batch.ParentHash) if err != nil { - val.logger.Info("Error reading execution status of batch", log.BatchHashKey, batch.Header.ParentHash, log.ErrKey, err) + val.logger.Info("Error reading execution status of batch", log.BatchHashKey, batch.ParentHash, log.ErrKey, err) return false, err } - val.logger.Trace("parentExecuted", log.BatchSeqNoKey, batch.SeqNo(), "val", parentExecuted) + val.logger.Trace("parentExecuted", log.BatchSeqNoKey, batch.SequencerOrderNo, "val", parentExecuted) return block != nil && parentExecuted, nil } -func (val *obsValidator) handleGenesis(ctx context.Context, batch *core.Batch) error { - genBatch, _, err := val.batchExecutor.CreateGenesisState(ctx, batch.Header.L1Proof, batch.Header.Time, batch.Header.Coinbase, batch.Header.BaseFee) +func (val *obsValidator) handleGenesis(ctx context.Context, batch *common.BatchHeader) error { + genBatch, _, err := val.batchExecutor.CreateGenesisState(ctx, batch.L1Proof, batch.Time, batch.Coinbase, batch.BaseFee) if err != nil { return err } @@ -169,7 +179,7 @@ func (val *obsValidator) handleGenesis(ctx context.Context, batch *core.Batch) e return fmt.Errorf("received invalid genesis batch") } - err = val.storage.StoreExecutedBatch(ctx, genBatch, nil) + err = val.storage.StoreExecutedBatch(ctx, genBatch.Header, nil, nil) if err != nil { return err } diff --git a/go/enclave/rpc/EstimateGas.go b/go/enclave/rpc/EstimateGas.go index 0c583da43e..f09a3d4b96 100644 --- a/go/enclave/rpc/EstimateGas.go +++ b/go/enclave/rpc/EstimateGas.go @@ -75,7 +75,7 @@ func EstimateGasExecute(builder *CallBuilder[CallParamsWithBlock, hexutil.Uint64 } headBatchSeq := rpc.registry.HeadBatchSeq() - batch, err := rpc.storage.FetchBatchBySeqNo(builder.ctx, headBatchSeq.Uint64()) + batch, err := rpc.storage.FetchBatchHeaderBySeqNo(builder.ctx, headBatchSeq.Uint64()) if err != nil { return err } @@ -83,7 +83,7 @@ func EstimateGasExecute(builder *CallBuilder[CallParamsWithBlock, hexutil.Uint64 // We divide the total estimated l1 cost by the l2 fee per gas in order to convert // the expected cost into l2 gas based on current pricing. // todo @siliev - add overhead when the base fee becomes dynamic. - publishingGas := big.NewInt(0).Div(l1Cost, batch.Header.BaseFee) + publishingGas := big.NewInt(0).Div(l1Cost, batch.BaseFee) // The one additional gas captures the modulo leftover in some edge cases // where BaseFee is bigger than the l1cost. diff --git a/go/enclave/rpc/GetBalance.go b/go/enclave/rpc/GetBalance.go index 4fae78d2ab..18446a1d2a 100644 --- a/go/enclave/rpc/GetBalance.go +++ b/go/enclave/rpc/GetBalance.go @@ -43,9 +43,9 @@ func GetBalanceValidate(reqParams []any, builder *CallBuilder[BalanceReq, hexuti } func GetBalanceExecute(builder *CallBuilder[BalanceReq, hexutil.Big], rpc *EncryptionManager) error { - acctOwner, err := rpc.chain.AccountOwner(builder.ctx, *builder.Param.Addr, builder.Param.Block.BlockNumber) + acctOwner, err := rpc.chain.AccountOwner(builder.ctx, *builder.Param.Addr) if err != nil { - return err + return fmt.Errorf("cannot determine account owner. Cause: %w", err) } // authorise the call diff --git a/go/enclave/rpc/GetLogs.go b/go/enclave/rpc/GetLogs.go index 3d938883d0..8aac4ee974 100644 --- a/go/enclave/rpc/GetLogs.go +++ b/go/enclave/rpc/GetLogs.go @@ -51,12 +51,12 @@ func GetLogsExecute(builder *CallBuilder[filters.FilterCriteria, []*types.Log], from := filter.FromBlock if from != nil && from.Int64() < 0 { - batch, err := rpc.storage.FetchBatchBySeqNo(builder.ctx, rpc.registry.HeadBatchSeq().Uint64()) + batch, err := rpc.storage.FetchBatchHeaderBySeqNo(builder.ctx, rpc.registry.HeadBatchSeq().Uint64()) if err != nil { // system error return fmt.Errorf("could not retrieve head batch. Cause: %w", err) } - from = batch.Number() + from = batch.Number } // Set from to the height of the block hash diff --git a/go/enclave/rpc/GetTransactionCount.go b/go/enclave/rpc/GetTransactionCount.go index 99989b32af..5282f733fe 100644 --- a/go/enclave/rpc/GetTransactionCount.go +++ b/go/enclave/rpc/GetTransactionCount.go @@ -52,7 +52,7 @@ func GetTransactionCountExecute(builder *CallBuilder[uint64, string], rpc *Encry } var nonce uint64 - l2Head, err := rpc.storage.FetchBatchBySeqNo(builder.ctx, *builder.Param) + l2Head, err := rpc.storage.FetchBatchHeaderBySeqNo(builder.ctx, *builder.Param) if err == nil { // todo - we should return an error when head state is not available, but for current test situations with race // conditions we allow it to return zero while head state is uninitialized diff --git a/go/enclave/storage/enclavedb/batch.go b/go/enclave/storage/enclavedb/batch.go index 6ed0cd035a..005cb69f73 100644 --- a/go/enclave/storage/enclavedb/batch.go +++ b/go/enclave/storage/enclavedb/batch.go @@ -19,53 +19,21 @@ import ( ) const ( - selectBatch = "select b.header, bb.content from batch b join batch_body bb on b.body=bb.id" - - queryReceipts = "select exec_tx.receipt, tx.content, batch.hash, batch.height from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch " + queryReceipts = "select receipt.content, tx.content, batch.hash, batch.height from receipt join tx on tx.id=receipt.tx join batch on batch.sequence=receipt.batch " ) -// WriteBatchAndTransactions - persists the batch and the transactions -func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Batch, convertedHash gethcommon.Hash, blockId int64) error { - // todo - optimize for reorgs - batchBodyID := batch.SeqNo().Uint64() - - body, err := rlp.EncodeToBytes(batch.Transactions) - if err != nil { - return fmt.Errorf("could not encode L2 transactions. Cause: %w", err) - } +func WriteBatchHeader(ctx context.Context, dbtx *sql.Tx, batch *core.Batch, convertedHash gethcommon.Hash, blockId int64, isCanonical bool) error { header, err := rlp.EncodeToBytes(batch.Header) if err != nil { return fmt.Errorf("could not encode batch header. Cause: %w", err) } - - _, err = dbtx.ExecContext(ctx, "replace into batch_body values (?,?)", batchBodyID, body) - if err != nil { - return err - } - - isL1ProofCanonical, err := IsCanonicalBlock(ctx, dbtx, &batch.Header.L1Proof) - if err != nil { - return err - } - parentIsCanon, err := IsCanonicalBatch(ctx, dbtx, &batch.Header.ParentHash) - if err != nil { - return err - } - parentIsCanon = parentIsCanon || batch.SeqNo().Uint64() <= common.L2GenesisSeqNo+2 - - // sanity check that the parent is canonical - if isL1ProofCanonical && !parentIsCanon { - panic(fmt.Errorf("invalid chaining. Batch %s is canonical. Parent %s is not", batch.Hash(), batch.Header.ParentHash)) - } - args := []any{ batch.Header.SequencerOrderNo.Uint64(), // sequence convertedHash, // converted_hash batch.Hash(), // hash batch.Header.Number.Uint64(), // height - isL1ProofCanonical, // is_canonical + isCanonical, // is_canonical header, // header blob - batchBodyID, // reference to the batch body batch.Header.L1Proof.Bytes(), // l1 proof hash } if blockId == 0 { @@ -74,14 +42,36 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba args = append(args, blockId) } args = append(args, false) // executed - _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?,?)", args...) + _, err = dbtx.ExecContext(ctx, "insert into batch values (?,?,?,?,?,?,?,?,?)", args...) + return err +} + +func UpdateCanonicalBatch(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash) error { + args := make([]any, 0) + args = append(args, isCanonical) + for _, blockHash := range blocks { + args = append(args, blockHash.Bytes()) + } + + updateBatches := "update batch set is_canonical=? where " + repeat(" l1_proof_hash=? ", "OR", len(blocks)) + _, err := dbtx.ExecContext(ctx, updateBatches, args...) + return err +} + +func ExistsBatchAtHeight(ctx context.Context, dbTx *sql.Tx, height *big.Int) (bool, error) { + var exists bool + err := dbTx.QueryRowContext(ctx, "select exists(select 1 from batch where height=?)", height.Uint64()).Scan(&exists) if err != nil { - return err + return false, err } + return exists, nil +} - // creates a big insert statement for all transactions +// WriteTransactions - persists the batch and the transactions +func WriteTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Batch, senders []*uint64) error { + // creates a batch insert statement for all entries if len(batch.Transactions) > 0 { - insert := "replace into tx (hash, content, sender_address, nonce, idx, body) values " + repeat("(?,?,?,?,?,?)", ",", len(batch.Transactions)) + insert := "insert into tx (hash, content, sender_address, idx, batch_height) values " + repeat("(?,?,?,?,?)", ",", len(batch.Transactions)) args := make([]any, 0) for i, transaction := range batch.Transactions { @@ -90,28 +80,21 @@ func WriteBatchAndTransactions(ctx context.Context, dbtx *sql.Tx, batch *core.Ba return fmt.Errorf("failed to encode block receipts. Cause: %w", err) } - from, err := types.Sender(types.LatestSignerForChainID(transaction.ChainId()), transaction) - if err != nil { - return fmt.Errorf("unable to convert tx to message - %w", err) - } - - args = append(args, transaction.Hash()) // tx_hash - args = append(args, txBytes) // content - args = append(args, from.Bytes()) // sender_address - args = append(args, transaction.Nonce()) // nonce - args = append(args, i) // idx - args = append(args, batchBodyID) // the batch body which contained it + args = append(args, transaction.Hash()) // tx_hash + args = append(args, txBytes) // content + args = append(args, senders[i]) // sender_address + args = append(args, i) // idx + args = append(args, batch.Header.Number.Uint64()) // the batch height which contained it } - _, err = dbtx.ExecContext(ctx, insert, args...) + _, err := dbtx.ExecContext(ctx, insert, args...) if err != nil { return err } } - return nil } -func IsCanonicalBatch(ctx context.Context, dbtx *sql.Tx, hash *gethcommon.Hash) (bool, error) { +func IsCanonicalBatchHash(ctx context.Context, dbtx *sql.Tx, hash *gethcommon.Hash) (bool, error) { var isCanon bool err := dbtx.QueryRowContext(ctx, "select is_canonical from batch where hash=? ", hash.Bytes()).Scan(&isCanon) if err != nil { @@ -135,75 +118,64 @@ func IsCanonicalBatchSeq(ctx context.Context, db *sql.DB, seqNo uint64) (bool, e return isCanon, err } -// WriteBatchExecution - save receipts -func WriteBatchExecution(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int, receipts []*types.Receipt) error { +func MarkBatchExecuted(ctx context.Context, dbtx *sql.Tx, seqNo *big.Int) error { _, err := dbtx.ExecContext(ctx, "update batch set is_executed=true where sequence=?", seqNo.Uint64()) - if err != nil { - return err - } - - args := make([]any, 0) - for _, receipt := range receipts { - // Convert the receipt into their storage form and serialize them - storageReceipt := (*types.ReceiptForStorage)(receipt) - receiptBytes, err := rlp.EncodeToBytes(storageReceipt) - if err != nil { - return fmt.Errorf("failed to encode block receipts. Cause: %w", err) - } + return err +} - // ignore the error because synthetic transactions will not be inserted - txId, _ := GetTxId(ctx, dbtx, storageReceipt.TxHash) - args = append(args, receipt.ContractAddress.Bytes()) // created_contract_address - args = append(args, receiptBytes) // the serialised receipt - if txId == 0 { - args = append(args, nil) // tx id - } else { - args = append(args, txId) // tx id - } - args = append(args, seqNo.Uint64()) // batch_seq +func WriteReceipt(ctx context.Context, dbtx *sql.Tx, batchSeqNo uint64, txId *uint64, receipt []byte) (uint64, error) { + insert := "insert into receipt (content, tx, batch) values " + "(?,?,?)" + res, err := dbtx.ExecContext(ctx, insert, receipt, txId, batchSeqNo) + if err != nil { + return 0, err } - if len(args) > 0 { - insert := "insert into exec_tx (created_contract_address, receipt, tx, batch) values " + repeat("(?,?,?,?)", ",", len(receipts)) - _, err = dbtx.ExecContext(ctx, insert, args...) - if err != nil { - return err - } + id, err := res.LastInsertId() + if err != nil { + return 0, err } - return nil + return uint64(id), nil } -func GetTxId(ctx context.Context, dbtx *sql.Tx, txHash gethcommon.Hash) (int64, error) { - var txId int64 - err := dbtx.QueryRowContext(ctx, "select id from tx where hash=? ", txHash.Bytes()).Scan(&txId) - return txId, err +func ReadTransactionIdAndSender(ctx context.Context, dbtx *sql.Tx, txHash gethcommon.Hash) (*uint64, *uint64, error) { + var txId uint64 + var senderId uint64 + err := dbtx.QueryRowContext(ctx, "select id,sender_address from tx where hash=? ", txHash.Bytes()).Scan(&txId, &senderId) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return nil, nil, errutil.ErrNotFound + } + return nil, nil, err + } + return &txId, &senderId, err } -func ReadBatchBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*core.Batch, error) { - return fetchBatch(ctx, db, " where sequence=?", seqNo) +func ReadBatchHeaderBySeqNo(ctx context.Context, db *sql.DB, seqNo uint64) (*common.BatchHeader, error) { + return fetchBatchHeader(ctx, db, " where sequence=?", seqNo) } -func ReadBatchByHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (*core.Batch, error) { - return fetchBatch(ctx, db, " where b.hash=? ", hash.Bytes()) +func ReadBatchHeaderByHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash) (*common.BatchHeader, error) { + return fetchBatchHeader(ctx, db, " where b.hash=? ", hash.Bytes()) } -func ReadCanonicalBatchByHeight(ctx context.Context, db *sql.DB, height uint64) (*core.Batch, error) { - return fetchBatch(ctx, db, " where b.height=? and is_canonical=true", height) +func ReadCanonicalBatchHeaderByHeight(ctx context.Context, db *sql.DB, height uint64) (*common.BatchHeader, error) { + return fetchBatchHeader(ctx, db, " where b.height=? and is_canonical=true", height) } -func ReadNonCanonicalBatches(ctx context.Context, db *sql.DB, startAtSeq uint64, endSeq uint64) ([]*core.Batch, error) { +func ReadNonCanonicalBatches(ctx context.Context, db *sql.DB, startAtSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) { return fetchBatches(ctx, db, " where b.sequence>=? and b.sequence <=? and b.is_canonical=false order by b.sequence", startAtSeq, endSeq) } -func ReadCanonicalBatches(ctx context.Context, db *sql.DB, startAtSeq uint64, endSeq uint64) ([]*core.Batch, error) { +func ReadCanonicalBatches(ctx context.Context, db *sql.DB, startAtSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) { return fetchBatches(ctx, db, " where b.sequence>=? and b.sequence <=? and b.is_canonical=true order by b.sequence", startAtSeq, endSeq) } // todo - is there a better way to write this query? -func ReadCurrentHeadBatch(ctx context.Context, db *sql.DB) (*core.Batch, error) { - return fetchBatch(ctx, db, " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true)") +func ReadCurrentHeadBatchHeader(ctx context.Context, db *sql.DB) (*common.BatchHeader, error) { + return fetchBatchHeader(ctx, db, " where b.is_canonical=true and b.is_executed=true and b.height=(select max(b1.height) from batch b1 where b1.is_canonical=true and b1.is_executed=true)") } -func ReadBatchesByBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) ([]*core.Batch, error) { +func ReadBatchesByBlock(ctx context.Context, db *sql.DB, hash common.L1BlockHash) ([]*common.BatchHeader, error) { return fetchBatches(ctx, db, " where l1_proof_hash=? order by b.sequence", hash.Bytes()) } @@ -224,15 +196,14 @@ func ReadCurrentSequencerNo(ctx context.Context, db *sql.DB) (*big.Int, error) { return big.NewInt(seq.Int64), nil } -func fetchBatch(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*core.Batch, error) { +func fetchBatchHeader(ctx context.Context, db *sql.DB, whereQuery string, args ...any) (*common.BatchHeader, error) { var header string - var body []byte - query := selectBatch + " " + whereQuery + query := "select b.header from batch b " + whereQuery var err error if len(args) > 0 { - err = db.QueryRowContext(ctx, query, args...).Scan(&header, &body) + err = db.QueryRowContext(ctx, query, args...).Scan(&header) } else { - err = db.QueryRowContext(ctx, query).Scan(&header, &body) + err = db.QueryRowContext(ctx, query).Scan(&header) } if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -245,23 +216,14 @@ func fetchBatch(ctx context.Context, db *sql.DB, whereQuery string, args ...any) if err := rlp.DecodeBytes([]byte(header), h); err != nil { return nil, fmt.Errorf("could not decode batch header. Cause: %w", err) } - txs := new([]*common.L2Tx) - if err := rlp.DecodeBytes(body, txs); err != nil { - return nil, fmt.Errorf("could not decode L2 transactions %v. Cause: %w", body, err) - } - - b := core.Batch{ - Header: h, - Transactions: *txs, - } - return &b, nil + return h, nil } -func fetchBatches(ctx context.Context, db *sql.DB, whereQuery string, args ...any) ([]*core.Batch, error) { - result := make([]*core.Batch, 0) +func fetchBatches(ctx context.Context, db *sql.DB, whereQuery string, args ...any) ([]*common.BatchHeader, error) { + result := make([]*common.BatchHeader, 0) - rows, err := db.QueryContext(ctx, selectBatch+" "+whereQuery, args...) + rows, err := db.QueryContext(ctx, "select b.header from batch b "+whereQuery, args...) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -275,8 +237,7 @@ func fetchBatches(ctx context.Context, db *sql.DB, whereQuery string, args ...an } for rows.Next() { var header string - var body []byte - err := rows.Scan(&header, &body) + err := rows.Scan(&header) if err != nil { return nil, err } @@ -284,16 +245,8 @@ func fetchBatches(ctx context.Context, db *sql.DB, whereQuery string, args ...an if err := rlp.DecodeBytes([]byte(header), h); err != nil { return nil, fmt.Errorf("could not decode batch header. Cause: %w", err) } - txs := new([]*common.L2Tx) - if err := rlp.DecodeBytes(body, txs); err != nil { - return nil, fmt.Errorf("could not decode L2 transactions %v. Cause: %w", body, err) - } - result = append(result, - &core.Batch{ - Header: h, - Transactions: *txs, - }) + result = append(result, h) } return result, nil } @@ -347,20 +300,8 @@ func selectReceipts(ctx context.Context, db *sql.DB, config *params.ChainConfig, return allReceipts, nil } -// ReadReceiptsByBatchHash retrieves all the transaction receipts belonging to a block, including -// its corresponding metadata fields. If it is unable to populate these metadata -// fields then nil is returned. -// -// The current implementation populates these metadata fields by reading the receipts' -// corresponding block body, so if the block body is not found it will return nil even -// if the receipt itself is stored. -func ReadReceiptsByBatchHash(ctx context.Context, db *sql.DB, hash common.L2BatchHash, config *params.ChainConfig) (types.Receipts, error) { - return selectReceipts(ctx, db, config, "where batch.hash=? ", hash.Bytes()) -} - func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config *params.ChainConfig) (*types.Receipt, error) { - // todo - canonical? - row := db.QueryRowContext(ctx, queryReceipts+" where tx.hash=? ", txHash.Bytes()) + row := db.QueryRowContext(ctx, queryReceipts+" where batch.is_canonical=true AND tx.hash=? ", txHash.Bytes()) // receipt, tx, batch, height var receiptData []byte var txData []byte @@ -397,7 +338,7 @@ func ReadReceipt(ctx context.Context, db *sql.DB, txHash common.L2TxHash, config func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) { row := db.QueryRowContext(ctx, - "select tx.content, batch.hash, batch.height, tx.idx from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch where batch.is_canonical=true and tx.hash=?", + "select tx.content, batch.hash, batch.height, tx.idx from receipt join tx on tx.id=receipt.tx join batch on batch.sequence=receipt.batch where batch.is_canonical=true and tx.hash=?", txHash.Bytes()) // tx, batch, height, idx @@ -422,11 +363,10 @@ func ReadTransaction(ctx context.Context, db *sql.DB, txHash gethcommon.Hash) (* return tx, batch, height, idx, nil } -func GetContractCreationTx(ctx context.Context, db *sql.DB, address gethcommon.Address) (*gethcommon.Hash, error) { - row := db.QueryRowContext(ctx, "select tx.hash from exec_tx join tx on tx.id=exec_tx.tx where created_contract_address=? ", address.Bytes()) +func ReadBatchTransactions(ctx context.Context, db *sql.DB, height uint64) ([]*common.L2Tx, error) { + var txs []*common.L2Tx - var txHashBytes []byte - err := row.Scan(&txHashBytes) + rows, err := db.QueryContext(ctx, "select content from tx where batch_height=? order by idx", height) if err != nil { if errors.Is(err, sql.ErrNoRows) { // make sure the error is converted to obscuro-wide not found error @@ -434,13 +374,29 @@ func GetContractCreationTx(ctx context.Context, db *sql.DB, address gethcommon.A } return nil, err } - txHash := gethcommon.Hash{} - txHash.SetBytes(txHashBytes) - return &txHash, nil + defer rows.Close() + for rows.Next() { + // receipt, tx, batch, height + var txContent []byte + err := rows.Scan(&txContent) + if err != nil { + return nil, err + } + tx := new(common.L2Tx) + if err := rlp.DecodeBytes(txContent, tx); err != nil { + return nil, fmt.Errorf("could not decode L2 transaction. Cause: %w", err) + } + txs = append(txs, tx) + } + if rows.Err() != nil { + return nil, rows.Err() + } + + return txs, nil } func ReadContractCreationCount(ctx context.Context, db *sql.DB) (*big.Int, error) { - row := db.QueryRowContext(ctx, "select count( distinct created_contract_address) from exec_tx ") + row := db.QueryRowContext(ctx, "select count(id) from contract") var count int64 err := row.Scan(&count) @@ -451,7 +407,7 @@ func ReadContractCreationCount(ctx context.Context, db *sql.DB) (*big.Int, error return big.NewInt(count), nil } -func ReadUnexecutedBatches(ctx context.Context, db *sql.DB, from *big.Int) ([]*core.Batch, error) { +func ReadUnexecutedBatches(ctx context.Context, db *sql.DB, from *big.Int) ([]*common.BatchHeader, error) { return fetchBatches(ctx, db, "where is_executed=false and is_canonical=true and sequence >= ? order by b.sequence", from.Uint64()) } @@ -476,7 +432,7 @@ func GetTransactionsPerAddress(ctx context.Context, db *sql.DB, config *params.C } func CountTransactionsPerAddress(ctx context.Context, db *sql.DB, address *gethcommon.Address) (uint64, error) { - row := db.QueryRowContext(ctx, "select count(1) from exec_tx join tx on tx.id=exec_tx.tx join batch on batch.sequence=exec_tx.batch "+" where tx.sender_address = ?", address.Bytes()) + row := db.QueryRowContext(ctx, "select count(1) from receipt join tx on tx.id=receipt.tx join batch on batch.sequence=receipt.batch "+" where tx.sender_address = ?", address.Bytes()) var count uint64 err := row.Scan(&count) diff --git a/go/enclave/storage/enclavedb/block.go b/go/enclave/storage/enclavedb/block.go index 3ded5d150d..2e9cd6e35e 100644 --- a/go/enclave/storage/enclavedb/block.go +++ b/go/enclave/storage/enclavedb/block.go @@ -10,8 +10,6 @@ import ( gethcommon "github.com/ethereum/go-ethereum/common" - gethlog "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" "github.com/ten-protocol/go-ten/go/common" @@ -33,28 +31,16 @@ func WriteBlock(ctx context.Context, dbtx *sql.Tx, b *types.Header) error { return err } -func UpdateCanonicalValue(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash, _ gethlog.Logger) error { - currentBlocks := repeat(" hash=? ", "OR", len(blocks)) - +func UpdateCanonicalBlock(ctx context.Context, dbtx *sql.Tx, isCanonical bool, blocks []common.L1BlockHash) error { args := make([]any, 0) args = append(args, isCanonical) for _, blockHash := range blocks { args = append(args, blockHash.Bytes()) } - updateBlocks := "update block set is_canonical=? where " + currentBlocks + updateBlocks := "update block set is_canonical=? where " + repeat(" hash=? ", "OR", len(blocks)) _, err := dbtx.ExecContext(ctx, updateBlocks, args...) - if err != nil { - return err - } - - updateBatches := "update batch set is_canonical=? where l1_proof in (select id from block where " + currentBlocks + ")" - _, err = dbtx.ExecContext(ctx, updateBatches, args...) - if err != nil { - return err - } - - return nil + return err } func IsCanonicalBlock(ctx context.Context, dbtx *sql.Tx, hash *gethcommon.Hash) (bool, error) { diff --git a/go/enclave/storage/enclavedb/events.go b/go/enclave/storage/enclavedb/events.go index 67a0a90ca7..dc53b3994d 100644 --- a/go/enclave/storage/enclavedb/events.go +++ b/go/enclave/storage/enclavedb/events.go @@ -3,137 +3,90 @@ package enclavedb import ( "context" "database/sql" + "errors" "fmt" "math/big" + "strconv" - "github.com/ten-protocol/go-ten/go/enclave/core" + "github.com/ten-protocol/go-ten/go/common/errutil" gethcommon "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ten-protocol/go-ten/go/common" "github.com/ten-protocol/go-ten/go/common/tracers" ) const ( - baseEventsJoin = "from events e join exec_tx extx on e.tx=extx.tx and e.batch=extx.batch join tx on extx.tx=tx.id join batch b on extx.batch=b.sequence where b.is_canonical=true " + baseEventsJoin = "from event_log e " + + "join receipt extx on e.receipt=extx.id" + + " join tx on extx.tx=tx.id " + + " join batch b on extx.batch=b.sequence " + + "join event_type et on e.event_type=et.id " + + " join contract c on et.contract=c.id " + + "left join event_topic t1 on e.topic1=t1.id " + + " left join externally_owned_account eoa1 on t1.rel_address=eoa1.id " + + "left join event_topic t2 on e.topic2=t2.id " + + " left join externally_owned_account eoa2 on t2.rel_address=eoa2.id " + + "left join event_topic t3 on e.topic3=t3.id" + + " left join externally_owned_account eoa3 on t3.rel_address=eoa3.id " + + "where b.is_canonical=true " ) -func StoreEventLogs(ctx context.Context, dbtx *sql.Tx, receipts []*types.Receipt, batch *core.Batch, stateDB *state.StateDB) error { - var args []any - totalLogs := 0 - for _, receipt := range receipts { - for _, l := range receipt.Logs { - logArgs, err := logDBValues(ctx, dbtx, l, stateDB) - if err != nil { - return err - } - args = append(args, logArgs...) - txId, _ := GetTxId(ctx, dbtx, l.TxHash) - if txId == 0 { - args = append(args, nil) - } else { - args = append(args, txId) - } - args = append(args, batch.SeqNo().Uint64()) - totalLogs++ - } +func WriteEventType(ctx context.Context, dbTX *sql.Tx, contractID *uint64, eventSignature gethcommon.Hash, isLifecycle bool) (uint64, error) { + res, err := dbTX.ExecContext(ctx, "insert into event_type (contract, event_sig, lifecycle_event) values (?, ?, ?)", contractID, eventSignature.Bytes(), isLifecycle) + if err != nil { + return 0, err } - if totalLogs > 0 { - query := "insert into events (topic0,topic1,topic2,topic3,topic4,datablob,log_idx,address,lifecycle_event,rel_address1,rel_address2,rel_address3,rel_address4,tx,batch) values " + - repeat("(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ",", totalLogs) - _, err := dbtx.ExecContext(ctx, query, args...) - if err != nil { - return err - } + id, err := res.LastInsertId() + if err != nil { + return 0, err } - return nil + return uint64(id), nil } -// This method stores a log entry together with relevancy metadata -// Each types.Log has 5 indexable topics, where the first one is the event signature hash -// The other 4 topics are set by the programmer -// According to the data relevancy rules, an event is relevant to accounts referenced directly in topics -// If the event is not referring any user address, it is considered a "lifecycle event", and is relevant to everyone -func logDBValues(ctx context.Context, db *sql.Tx, l *types.Log, stateDB *state.StateDB) ([]any, error) { - // The topics are stored in an array with a maximum of 5 entries, but usually less - var t0, t1, t2, t3, t4 []byte - - // these are the addresses to which this event might be relevant to. - var addr1, addr2, addr3, addr4 *gethcommon.Address - var a1, a2, a3, a4 []byte - - // start with true, and as soon as a user address is discovered, it becomes false - isLifecycle := true - - // internal variable - var isUserAccount bool - - n := len(l.Topics) - if n > 0 { - t0 = l.Topics[0].Bytes() - } - var err error - // for every indexed topic, check whether it is an end user account - // if yes, then mark it as relevant for that account - if n > 1 { - t1 = l.Topics[1].Bytes() - isUserAccount, addr1, err = isEndUserAccount(ctx, db, l.Topics[1], stateDB) - if err != nil { - return nil, err - } - isLifecycle = isLifecycle && !isUserAccount - if addr1 != nil { - a1 = addr1.Bytes() - } +func ReadEventType(ctx context.Context, dbTX *sql.Tx, contractId uint64, eventSignature gethcommon.Hash) (uint64, bool, error) { + var id uint64 + var isLifecycle bool + err := dbTX.QueryRowContext(ctx, "select id, lifecycle_event from event_type where contract=? and event_sig=?", contractId, eventSignature.Bytes()).Scan(&id, &isLifecycle) + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return 0, false, errutil.ErrNotFound } - if n > 2 { - t2 = l.Topics[2].Bytes() - isUserAccount, addr2, err = isEndUserAccount(ctx, db, l.Topics[2], stateDB) - if err != nil { - return nil, err - } - isLifecycle = isLifecycle && !isUserAccount - if addr2 != nil { - a2 = addr2.Bytes() - } - } - if n > 3 { - t3 = l.Topics[3].Bytes() - isUserAccount, addr3, err = isEndUserAccount(ctx, db, l.Topics[3], stateDB) - if err != nil { - return nil, err - } - isLifecycle = isLifecycle && !isUserAccount - if addr3 != nil { - a3 = addr3.Bytes() - } + return id, isLifecycle, err +} + +func WriteEventTopic(ctx context.Context, dbTX *sql.Tx, topic *gethcommon.Hash, addressId *uint64) (uint64, error) { + res, err := dbTX.ExecContext(ctx, "insert into event_topic (topic, rel_address) values (?, ?)", topic.Bytes(), addressId) + if err != nil { + return 0, err } - if n > 4 { - t4 = l.Topics[4].Bytes() - isUserAccount, addr4, err = isEndUserAccount(ctx, db, l.Topics[4], stateDB) - if err != nil { - return nil, err - } - isLifecycle = isLifecycle && !isUserAccount - if addr4 != nil { - a4 = addr4.Bytes() - } + id, err := res.LastInsertId() + if err != nil { + return 0, err } + return uint64(id), nil +} + +func UpdateEventTopicLifecycle(ctx context.Context, dbTx *sql.Tx, etId uint64, isLifecycle bool) error { + _, err := dbTx.ExecContext(ctx, "update event_topic set lifecycle_event=? where id=?", isLifecycle, etId) + return err +} - // normalise the data field to nil to avoid duplicates - data := l.Data - if len(data) == 0 { - data = nil +func ReadEventTopic(ctx context.Context, dbTX *sql.Tx, topic []byte) (uint64, *uint64, error) { + var id uint64 + var address *uint64 + err := dbTX.QueryRowContext(ctx, "select id, rel_address from event_topic where topic=? ", topic).Scan(&id, &address) + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return 0, nil, errutil.ErrNotFound } + return id, address, err +} - return []any{ - t0, t1, t2, t3, t4, - data, l.Index, - l.Address.Bytes(), - isLifecycle, - a1, a2, a3, a4, - }, nil +func WriteEventLog(ctx context.Context, dbTX *sql.Tx, eventTypeId uint64, userTopics []*uint64, data []byte, logIdx uint, execTx uint64) error { + _, err := dbTX.ExecContext(ctx, "insert into event_log (event_type, topic1, topic2, topic3, datablob, log_idx, receipt) values (?,?,?,?,?,?,?)", + eventTypeId, userTopics[0], userTopics[1], userTopics[2], data, logIdx, execTx) + return err } func FilterLogs( @@ -163,25 +116,25 @@ func FilterLogs( } if len(addresses) > 0 { - cond := repeat("(address=?)", " OR ", len(addresses)) - query += " AND (" + cond + ")" + query += " AND c.address in (" + repeat("?", ",", len(addresses)) + ")" for _, address := range addresses { queryParams = append(queryParams, address.Bytes()) } } - if len(topics) > 5 { + if len(topics) > 4 { return nil, fmt.Errorf("invalid filter. Too many topics") } - if len(topics) > 0 { - for i, sub := range topics { - // empty rule set == wildcard - if len(sub) > 0 { - topicColumn := fmt.Sprintf("topic%d", i) - cond := repeat(fmt.Sprintf("(%s=? )", topicColumn), " OR ", len(sub)) - query += " AND (" + cond + ")" - for _, topic := range sub { - queryParams = append(queryParams, topic.Bytes()) - } + + for i := 0; i < len(topics); i++ { + if len(topics[i]) > 0 { + valuesIn := "IN (" + repeat("?", ",", len(topics[i])) + ")" + if i == 0 { + query += " AND et.event_sig " + valuesIn + } else { + query += " AND t" + strconv.Itoa(i) + ".topic " + valuesIn + } + for _, hash := range topics[i] { + queryParams = append(queryParams, hash.Bytes()) } } } @@ -192,7 +145,7 @@ func FilterLogs( func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tracers.DebugLogs, error) { var queryParams []any - query := "select rel_address1, rel_address2, rel_address3, rel_address4, lifecycle_event, topic0, topic1, topic2, topic3, topic4, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, address " + + query := "select eoa1.address, eoa2.address, eoa3.address, et.lifecycle_event, et.event_sig, t1.topic, t2.topic, t3.topic, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, c.address " + baseEventsJoin + " AND tx.hash = ? " @@ -214,15 +167,14 @@ func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tra LifecycleEvent: false, } - var t0, t1, t2, t3, t4 sql.NullString - var relAddress1, relAddress2, relAddress3, relAddress4 []byte + var t0, t1, t2, t3 sql.NullString + var relAddress1, relAddress2, relAddress3 []byte err = rows.Scan( &relAddress1, &relAddress2, &relAddress3, - &relAddress4, &l.LifecycleEvent, - &t0, &t1, &t2, &t3, &t4, + &t0, &t1, &t2, &t3, &l.Data, &l.BlockHash, &l.BlockNumber, @@ -235,7 +187,7 @@ func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tra return nil, fmt.Errorf("could not load log entry from db: %w", err) } - for _, topic := range []sql.NullString{t0, t1, t2, t3, t4} { + for _, topic := range []sql.NullString{t0, t1, t2, t3} { if topic.Valid { l.Topics = append(l.Topics, stringToHash(topic)) } @@ -244,7 +196,6 @@ func DebugGetLogs(ctx context.Context, db *sql.DB, txHash common.TxHash) ([]*tra l.RelAddress1 = bytesToAddress(relAddress1) l.RelAddress2 = bytesToAddress(relAddress2) l.RelAddress3 = bytesToAddress(relAddress3) - l.RelAddress4 = bytesToAddress(relAddress4) result = append(result, &l) } @@ -264,60 +215,21 @@ func bytesToAddress(b []byte) *gethcommon.Address { return nil } -// Of the log's topics, returns those that are (potentially) user addresses. A topic is considered a user address if: -// - It has at least 12 leading zero bytes (since addresses are 20 bytes long, while hashes are 32) and at most 22 leading zero bytes -// - It does not have associated code (meaning it's a smart-contract address) -// - It has a non-zero nonce (to prevent accidental or malicious creation of the address matching a given topic, -// forcing its events to become permanently private (this is not implemented for now) -// -// todo - find a more efficient way -func isEndUserAccount(ctx context.Context, db *sql.Tx, topic gethcommon.Hash, stateDB *state.StateDB) (bool, *gethcommon.Address, error) { - potentialAddr := common.ExtractPotentialAddress(topic) - if potentialAddr == nil { - return false, nil, nil - } - addrBytes := potentialAddr.Bytes() - // Check the database if there are already entries for this address - var count int - query := "select count(*) from events where (rel_address1=?) OR (rel_address2=?) OR (rel_address3=? ) OR (rel_address4=? )" - err := db.QueryRowContext(ctx, query, addrBytes, addrBytes, addrBytes, addrBytes).Scan(&count) - if err != nil { - // exit here - return false, nil, err - } - - if count > 0 { - return true, potentialAddr, nil - } - - // TODO A user address must have a non-zero nonce. This prevents accidental or malicious sending of funds to an - // address matching a topic, forcing its events to become permanently private. - // if db.GetNonce(potentialAddr) != 0 - - // If the address has code, it's a smart contract address instead. - if stateDB.GetCode(*potentialAddr) == nil { - return true, potentialAddr, nil - } - - return false, nil, nil -} - // utility function that knows how to load relevant logs from the database // todo always pass in the actual batch hashes because of reorgs, or make sure to clean up log entries from discarded batches func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Address, whereCondition string, whereParams []any) ([]*types.Log, error) { - if requestingAccount == nil { + if requestingAccount == nil { // todo - only restrict to lifecycle events if requesting==nil return nil, fmt.Errorf("logs can only be requested for an account") } result := make([]*types.Log, 0) - query := "select topic0, topic1, topic2, topic3, topic4, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, address" + " " + baseEventsJoin + query := "select et.event_sig, t1.topic, t2.topic, t3.topic, datablob, b.hash, b.height, tx.hash, tx.idx, log_idx, c.address" + " " + baseEventsJoin var queryParams []any // Add relevancy rules // An event is considered relevant to all account owners whose addresses are used as topics in the event. // In case there are no account addresses in an event's topics, then the event is considered relevant to everyone (known as a "lifecycle event"). - query += " AND (lifecycle_event OR (rel_address1=? OR rel_address2=? OR rel_address3=? OR rel_address4=?)) " - queryParams = append(queryParams, requestingAccount.Bytes()) + query += " AND (et.lifecycle_event=true OR eoa1.address=? OR eoa2.address=? OR eoa3.address=?) " queryParams = append(queryParams, requestingAccount.Bytes()) queryParams = append(queryParams, requestingAccount.Bytes()) queryParams = append(queryParams, requestingAccount.Bytes()) @@ -335,15 +247,15 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add for rows.Next() { l := types.Log{ - Topics: []gethcommon.Hash{}, + Topics: make([]gethcommon.Hash, 0), } - var t0, t1, t2, t3, t4 []byte - err = rows.Scan(&t0, &t1, &t2, &t3, &t4, &l.Data, &l.BlockHash, &l.BlockNumber, &l.TxHash, &l.TxIndex, &l.Index, &l.Address) + var t0, t1, t2, t3 []byte + err = rows.Scan(&t0, &t1, &t2, &t3, &l.Data, &l.BlockHash, &l.BlockNumber, &l.TxHash, &l.TxIndex, &l.Index, &l.Address) if err != nil { return nil, fmt.Errorf("could not load log entry from db: %w", err) } - for _, topic := range [][]byte{t0, t1, t2, t3, t4} { + for _, topic := range [][]byte{t0, t1, t2, t3} { if len(topic) > 0 { l.Topics = append(l.Topics, byteArrayToHash(topic)) } @@ -359,6 +271,81 @@ func loadLogs(ctx context.Context, db *sql.DB, requestingAccount *gethcommon.Add return result, nil } +func WriteEoa(ctx context.Context, dbTX *sql.Tx, sender gethcommon.Address) (uint64, error) { + insert := "insert into externally_owned_account (address) values (?)" + res, err := dbTX.ExecContext(ctx, insert, sender.Bytes()) + if err != nil { + return 0, err + } + id, err := res.LastInsertId() + if err != nil { + return 0, err + } + return uint64(id), nil +} + +func ReadEoa(ctx context.Context, dbTx *sql.Tx, addr gethcommon.Address) (uint64, error) { + row := dbTx.QueryRowContext(ctx, "select id from externally_owned_account where address = ?", addr.Bytes()) + + var id uint64 + err := row.Scan(&id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return 0, errutil.ErrNotFound + } + return 0, err + } + + return id, nil +} + +func WriteContractAddress(ctx context.Context, dbTX *sql.Tx, contractAddress *gethcommon.Address, eoaId uint64) (*uint64, error) { + insert := "insert into contract (address, owner) values (?,?)" + res, err := dbTX.ExecContext(ctx, insert, contractAddress.Bytes(), eoaId) + if err != nil { + return nil, err + } + id, err := res.LastInsertId() + if err != nil { + return nil, err + } + v := uint64(id) + return &v, nil +} + +func ReadContractAddress(ctx context.Context, dbTx *sql.Tx, addr gethcommon.Address) (*uint64, error) { + row := dbTx.QueryRowContext(ctx, "select id from contract where address = ?", addr.Bytes()) + + var id uint64 + err := row.Scan(&id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return nil, errutil.ErrNotFound + } + return nil, err + } + + return &id, nil +} + +func ReadContractOwner(ctx context.Context, db *sql.DB, address gethcommon.Address) (*gethcommon.Address, error) { + row := db.QueryRowContext(ctx, "select eoa.address from contract c join externally_owned_account eoa on c.owner=eoa.id where c.address = ?", address.Bytes()) + + var eoaAddress gethcommon.Address + err := row.Scan(&eoaAddress) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // make sure the error is converted to obscuro-wide not found error + return nil, errutil.ErrNotFound + } + return nil, err + } + + return &eoaAddress, nil +} + func stringToHash(ns sql.NullString) gethcommon.Hash { value, err := ns.Value() if err != nil { diff --git a/go/enclave/storage/init/edgelessdb/001_init.sql b/go/enclave/storage/init/edgelessdb/001_init.sql index 878b2452c2..48046641b0 100644 --- a/go/enclave/storage/init/edgelessdb/001_init.sql +++ b/go/enclave/storage/init/edgelessdb/001_init.sql @@ -7,7 +7,6 @@ create table if not exists obsdb.keyvalue ky varbinary(64) NOT NULL, val mediumblob NOT NULL, primary key (id), - UNIQUE (ky), INDEX USING HASH (ky) ); GRANT ALL ON obsdb.keyvalue TO obscuro; @@ -39,7 +38,7 @@ create table if not exists obsdb.block height int NOT NULL, primary key (id), INDEX (height), - INDEX USING HASH (hash(8)) + INDEX USING HASH (hash) ); GRANT ALL ON obsdb.block TO obscuro; @@ -64,19 +63,11 @@ create table if not exists obsdb.rollup header blob NOT NULL, compression_block INTEGER NOT NULL, INDEX (compression_block), - INDEX USING HASH (hash(8)), + INDEX USING HASH (hash), primary key (id) ); GRANT ALL ON obsdb.rollup TO obscuro; -create table if not exists obsdb.batch_body -( - id INTEGER, - content mediumblob NOT NULL, - primary key (id) -); -GRANT ALL ON obsdb.batch_body TO obscuro; - create table if not exists obsdb.batch ( sequence INTEGER, @@ -85,14 +76,12 @@ create table if not exists obsdb.batch height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, - body int NOT NULL, l1_proof_hash binary(32) NOT NULL, l1_proof INTEGER, is_executed boolean NOT NULL, primary key (sequence), - INDEX USING HASH (hash(8)), - INDEX USING HASH (l1_proof_hash(8)), - INDEX (body), + INDEX USING HASH (hash), + INDEX USING HASH (l1_proof_hash), INDEX (l1_proof), INDEX (height) ); @@ -103,58 +92,80 @@ create table if not exists obsdb.tx id INTEGER AUTO_INCREMENT, hash binary(32) NOT NULL, content mediumblob NOT NULL, - sender_address binary(20) NOT NULL, - nonce int NOT NULL, + sender_address int NOT NULL, idx int NOT NULL, - body int NOT NULL, - INDEX USING HASH (hash(8)), - INDEX USING HASH (sender_address), + batch_height int NOT NULL, + INDEX USING HASH (hash), + INDEX (sender_address), + INDEX (batch_height, idx), primary key (id) ); GRANT ALL ON obsdb.tx TO obscuro; -create table if not exists obsdb.exec_tx +create table if not exists obsdb.receipt ( id INTEGER AUTO_INCREMENT, - created_contract_address binary(20), - receipt mediumblob, + content mediumblob, tx int, batch int NOT NULL, INDEX (batch), - INDEX (tx, created_contract_address(4)), + INDEX (tx), primary key (id) ); -GRANT ALL ON obsdb.exec_tx TO obscuro; +GRANT ALL ON obsdb.receipt TO obscuro; -create table if not exists obsdb.events +create table if not exists obsdb.contract +( + id INTEGER AUTO_INCREMENT, + address binary(20) NOT NULL, + owner int NOT NULL, + primary key (id), + INDEX USING HASH (address) +); +GRANT ALL ON obsdb.contract TO obscuro; + +create table if not exists obsdb.externally_owned_account +( + id INTEGER AUTO_INCREMENT, + address binary(20) NOT NULL, + primary key (id), + INDEX USING HASH (address) +); +GRANT ALL ON obsdb.externally_owned_account TO obscuro; + +create table if not exists obsdb.event_type ( id INTEGER AUTO_INCREMENT, - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(20) NOT NULL, + contract int NOT NULL, + event_sig binary(32) NOT NULL, lifecycle_event boolean NOT NULL, - rel_address1 binary(20), - rel_address2 binary(20), - rel_address3 binary(20), - rel_address4 binary(20), - tx int NOT NULL, - batch int NOT NULL, primary key (id), - INDEX (tx, batch), - INDEX USING HASH (address(8)), - INDEX USING HASH (rel_address1(8)), - INDEX USING HASH (rel_address2(8)), - INDEX USING HASH (rel_address3(8)), - INDEX USING HASH (rel_address4(8)), - INDEX USING HASH (topic0(8)), - INDEX USING HASH (topic1(8)), - INDEX USING HASH (topic2(8)), - INDEX USING HASH (topic3(8)), - INDEX USING HASH (topic4(8)) + INDEX USING HASH (contract, event_sig) +); +GRANT ALL ON obsdb.event_type TO obscuro; + +create table if not exists obsdb.event_topic +( + id INTEGER AUTO_INCREMENT, + topic binary(32) NOT NULL, + rel_address INTEGER, + primary key (id), + INDEX USING HASH (topic), + INDEX (rel_address) +); +GRANT ALL ON obsdb.event_topic TO obscuro; + +create table if not exists obsdb.event_log +( + id INTEGER AUTO_INCREMENT, + event_type INTEGER NOT NULL, + topic1 INTEGER, + topic2 INTEGER, + topic3 INTEGER, + datablob mediumblob, + log_idx INTEGER NOT NULL, + receipt INTEGER NOT NULL, + primary key (id), + INDEX (receipt, event_type, topic1, topic2, topic3) ); -GRANT ALL ON obsdb.events TO obscuro; \ No newline at end of file +GRANT ALL ON obsdb.event_log TO obscuro; \ No newline at end of file diff --git a/go/enclave/storage/init/edgelessdb/edgelessdb.go b/go/enclave/storage/init/edgelessdb/edgelessdb.go index 06d08e40b9..d9bcab2e7f 100644 --- a/go/enclave/storage/init/edgelessdb/edgelessdb.go +++ b/go/enclave/storage/init/edgelessdb/edgelessdb.go @@ -41,15 +41,15 @@ import ( ) /* - The Obscuro Enclave (OE) needs a way to persist data into a trusted database. Trusted not to reveal that data to anyone but that particular enclave. + The Ten Enclave (TE) needs a way to persist data into a trusted database. Trusted not to reveal that data to anyone but that particular enclave. - To achieve this, the OE must first perform Remote Attestation (RA), which gives it confidence that it is connected to + To achieve this, the TE must first perform Remote Attestation (RA), which gives it confidence that it is connected to a trusted version of software running on trusted hardware. The result of this process is a Certificate which can be used to set up a trusted TLS connection into the database. - The next step is to configure the database schema and users in such a way that the OE knows that the db engine will + The next step is to configure the database schema and users in such a way that the TE knows that the db engine will only allow itself access to it. This is achieved by creating a "Manifest" file that contains the SQL init code and a - DBClient Certificate that is known only to the OE. + DBClient Certificate that is known only to the TE. This "DBClient" Cert is used by the database to authenticate that it is communicating to the entity that has initialised that schema. @@ -130,6 +130,7 @@ type Credentials struct { UserKeyPEM string // db user private key, generated in our enclave } +// Connector (re-)establishes a connection to the Edgeless DB for the Ten enclave func Connector(edbCfg *Config, config config.EnclaveConfig, logger gethlog.Logger) (enclavedb.EnclaveDB, error) { // rather than fail immediately if EdgelessDB is not available yet we wait up for `edgelessDBStartTimeout` for it to be available err := waitForEdgelessDBToStart(edbCfg.Host, logger) @@ -143,12 +144,12 @@ func Connector(edbCfg *Config, config config.EnclaveConfig, logger gethlog.Logge return nil, err } - tlsCfg, err := createTLSCfg(edbCredentials) + tlsCfg, err := CreateTLSCfg(edbCredentials) if err != nil { return nil, err } - sqlDB, err := connectToEdgelessDB(edbCfg.Host, tlsCfg, logger) + sqlDB, err := ConnectToEdgelessDB(edbCfg.Host, tlsCfg, logger) if err != nil { return nil, err } @@ -183,7 +184,7 @@ func waitForEdgelessDBToStart(edbHost string, logger gethlog.Logger) error { func getHandshakeCredentials(enclaveConfig config.EnclaveConfig, edbCfg *Config, logger gethlog.Logger) (*Credentials, error) { // if we have previously performed the handshake we can retrieve the creds from disk and proceed - edbCreds, found, err := loadCredentialsFromFile() + edbCreds, found, err := LoadCredentialsFromFile() if err != nil { return nil, err } @@ -198,8 +199,8 @@ func getHandshakeCredentials(enclaveConfig config.EnclaveConfig, edbCfg *Config, return edbCreds, nil } -// loadCredentialsFromFile returns (credentials object, found flag, error), if file not found it will return nil error but found=false -func loadCredentialsFromFile() (*Credentials, bool, error) { +// LoadCredentialsFromFile returns (credentials object, found flag, error), if file not found it will return nil error but found=false +func LoadCredentialsFromFile() (*Credentials, bool, error) { b, err := egoutils.ReadAndUnseal(edbCredentialsFilepath) if err != nil { if os.IsNotExist(err) { @@ -227,8 +228,8 @@ func performHandshake(enclaveConfig config.EnclaveConfig, edbCfg *Config, logger // the RA will ensure that we are connecting to a database that will not leak any data. // The RA will return a Certificate which we'll use for the TLS mutual authentication when we connect to the database. // The trust path is as follows: - // 1. The Obscuro Enclave performs RA on the database enclave, and the RA object contains a certificate which only the database enclave controls. - // 2. Connecting to the database via mutually authenticated TLS using the above certificate, will give the Obscuro enclave confidence that it is only giving data away to some code and hardware it trusts. + // 1. The Ten Enclave performs RA on the database enclave, and the RA object contains a certificate which only the database enclave controls. + // 2. Connecting to the database via mutually authenticated TLS using the above certificate, will give the Ten enclave confidence that it is only giving data away to some code and hardware it trusts. edbPEM, err := performEDBRemoteAttestation(enclaveConfig, edbCfg.Host, defaultEDBConstraints, logger) if err != nil { return nil, err @@ -304,7 +305,7 @@ func createManifestFormat(content string) (result []string) { return } -func createTLSCfg(creds *Credentials) (*tls.Config, error) { +func CreateTLSCfg(creds *Credentials) (*tls.Config, error) { caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM([]byte(creds.EDBCACertPEM)); !ok { @@ -458,7 +459,7 @@ func verifyEdgelessDB(edbHost string, m *manifest, httpClient *http.Client, logg return nil } -func connectToEdgelessDB(edbHost string, tlsCfg *tls.Config, logger gethlog.Logger) (*sql.DB, error) { +func ConnectToEdgelessDB(edbHost string, tlsCfg *tls.Config, logger gethlog.Logger) (*sql.DB, error) { err := mysql.RegisterTLSConfig("custom", tlsCfg) if err != nil { return nil, fmt.Errorf("failed to register tls config for mysql connection - %w", err) diff --git a/go/enclave/storage/init/sqlite/001_init.sql b/go/enclave/storage/init/sqlite/001_init.sql index 9a0ea2e06b..1085d1a45e 100644 --- a/go/enclave/storage/init/sqlite/001_init.sql +++ b/go/enclave/storage/init/sqlite/001_init.sql @@ -57,12 +57,6 @@ create table if not exists rollup create index ROLLUP_COMPRESSION_BLOCK_IDX on rollup (compression_block); create index ROLLUP_COMPRESSION_HASH_IDX on rollup (hash); -create table if not exists batch_body -( - id int NOT NULL primary key, - content mediumblob NOT NULL -); - create table if not exists batch ( sequence int primary key, @@ -71,7 +65,6 @@ create table if not exists batch height int NOT NULL, is_canonical boolean NOT NULL, header blob NOT NULL, - body int NOT NULL REFERENCES batch_body, l1_proof_hash binary(32) NOT NULL, l1_proof INTEGER, -- normally this would be a FK, but there is a weird edge case where an L2 node might not have the block used to create this batch is_executed boolean NOT NULL @@ -80,7 +73,6 @@ create table if not exists batch ); create index IDX_BATCH_HASH on batch (hash); create index IDX_BATCH_BLOCK on batch (l1_proof_hash); -create index IDX_BATCH_BODY on batch (body); create index IDX_BATCH_L1 on batch (l1_proof); create index IDX_BATCH_HEIGHT on batch (height); @@ -89,54 +81,102 @@ create table if not exists tx id INTEGER PRIMARY KEY AUTOINCREMENT, hash binary(32) NOT NULL, content mediumblob NOT NULL, - sender_address binary(20) NOT NULL, - nonce int NOT NULL, + sender_address int NOT NULL REFERENCES externally_owned_account, idx int NOT NULL, - body int NOT NULL REFERENCES batch_body + batch_height int NOT NULL ); create index IDX_TX_HASH on tx (hash); create index IDX_TX_SENDER_ADDRESS on tx (sender_address); +create index IDX_TX_BATCH_HEIGHT on tx (batch_height, idx); -create table if not exists exec_tx +create table if not exists receipt ( id INTEGER PRIMARY KEY AUTOINCREMENT, - created_contract_address binary(20), - receipt mediumblob, + content mediumblob, -- commenting out the fk until synthetic transactions are also stored tx INTEGER, batch INTEGER NOT NULL REFERENCES batch ); -create index IDX_EX_TX_BATCH on exec_tx (batch); -create index IDX_EX_TX_CCA on exec_tx (tx, created_contract_address); +create index IDX_EX_TX_BATCH on receipt (batch); +create index IDX_EX_TX_CCA on receipt (tx); + +create table if not exists contract +( + id INTEGER PRIMARY KEY AUTOINCREMENT, + address binary(20) NOT NULL, +-- denormalised for ease of access during balance checks + owner int NOT NULL REFERENCES externally_owned_account +); +create index IDX_CONTRACT_AD on contract (address, owner); --- todo denormalize. Extract contract and user table and point topic0 and rel_addreses to it -create table if not exists events +create table if not exists externally_owned_account +( + id INTEGER PRIMARY KEY AUTOINCREMENT, + address binary(20) NOT NULL +); +create index IDX_EOA on externally_owned_account (address); + +-- not very large. An entry for every event_type +create table if not exists event_type ( id INTEGER PRIMARY KEY AUTOINCREMENT, - topic0 binary(32) NOT NULL, - topic1 binary(32), - topic2 binary(32), - topic3 binary(32), - topic4 binary(32), - datablob mediumblob, - log_idx int NOT NULL, - address binary(20) NOT NULL, - lifecycle_event boolean NOT NULL, - rel_address1 binary(20), - rel_address2 binary(20), - rel_address3 binary(20), - rel_address4 binary(20), - tx INTEGER NOT NULL references tx, - batch INTEGER NOT NULL REFERENCES batch + contract INTEGER NOT NULL references contract, + event_sig binary(32) NOT NULL, -- no need to index because there are only a few events for an address + lifecycle_event boolean NOT NULL -- set based on the first event, and then updated to false if it turns out it is true ); -create index IDX_BATCH_TX on events (tx, batch); -create index IDX_AD on events (address); -create index IDX_RAD1 on events (rel_address1); -create index IDX_RAD2 on events (rel_address2); -create index IDX_RAD3 on events (rel_address3); -create index IDX_RAD4 on events (rel_address4); -create index IDX_T0 on events (topic0); -create index IDX_T1 on events (topic1); -create index IDX_T2 on events (topic2); -create index IDX_T3 on events (topic3); -create index IDX_T4 on events (topic4); +create index IDX_EV_CONTRACT on event_type (contract, event_sig); + +-- very large table with user values +create table if not exists event_topic +( + id INTEGER PRIMARY KEY AUTOINCREMENT, + topic binary(32) NOT NULL, + rel_address INTEGER references externally_owned_account +-- pos INTEGER NOT NULL -- todo +); +-- create index IDX_TOP on event_topic (topic, pos); +create index IDX_TOP on event_topic (topic); + +create table if not exists event_log +( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_type INTEGER NOT NULL references event_type, + topic1 INTEGER references event_topic, + topic2 INTEGER references event_topic, + topic3 INTEGER references event_topic, + datablob mediumblob, + log_idx INTEGER NOT NULL, + receipt INTEGER NOT NULL references receipt +); +-- create index IDX_BATCH_TX on event_log (receipt); +create index IDX_EV on event_log (receipt, event_type, topic1, topic2, topic3); + +-- requester - address +-- receipt - range of batch heights or a single batch +-- address []list of contract addresses +-- topic0 - event sig []list +-- topic1 []list +-- topic2 []list +-- topic3 []list + + +-- select * from event_log +-- join receipt on receipt +-- join batch on receipt.batch -- to get the batch height range +-- join event_type ec on event_type +-- join contract c on +-- left join event_topic t1 on topic1 +-- left join externally_owned_account eoa1 on t1.rel_address +-- left join event_topic t2 on topic2 +-- left join externally_owned_account eoa2 on t2.rel_address +-- left join event_topic t3 on topic3 +-- left join externally_owned_account eoa3 on t3.rel_address +-- where +-- receipt. +-- c.address in [address..] AND +-- ec.event_sig in [topic0..] AND +-- t1.topic in [topic1..] AND +-- t2.topic in [topic2..] AND +-- t3.topic in [topic3..] AND +-- b.height in [] and b.is_canonical=true +-- (ec.lifecycle_event OR eoa1.address=requester OR eoa2.address=requester OR eoa3.address=requester) diff --git a/go/enclave/storage/interfaces.go b/go/enclave/storage/interfaces.go index 88d2340be4..01b7f81586 100644 --- a/go/enclave/storage/interfaces.go +++ b/go/enclave/storage/interfaces.go @@ -42,24 +42,26 @@ type BatchResolver interface { FetchBatch(ctx context.Context, hash common.L2BatchHash) (*core.Batch, error) // FetchBatchHeader returns the batch header with the given hash. FetchBatchHeader(ctx context.Context, hash common.L2BatchHash) (*common.BatchHeader, error) + FetchBatchTransactionsBySeq(ctx context.Context, seqNo uint64) ([]*common.L2Tx, error) // FetchBatchByHeight returns the batch on the canonical chain with the given height. FetchBatchByHeight(ctx context.Context, height uint64) (*core.Batch, error) // FetchBatchBySeqNo returns the batch with the given seq number. FetchBatchBySeqNo(ctx context.Context, seqNum uint64) (*core.Batch, error) - // FetchHeadBatch returns the current head batch of the canonical chain. - FetchHeadBatch(ctx context.Context) (*core.Batch, error) + // FetchBatchHeaderBySeqNo returns the batch header with the given seq number. + FetchBatchHeaderBySeqNo(ctx context.Context, seqNum uint64) (*common.BatchHeader, error) + FetchHeadBatchHeader(ctx context.Context) (*common.BatchHeader, error) // FetchCurrentSequencerNo returns the sequencer number FetchCurrentSequencerNo(ctx context.Context) (*big.Int, error) // FetchBatchesByBlock returns all batches with the block hash as the L1 proof - FetchBatchesByBlock(ctx context.Context, hash common.L1BlockHash) ([]*core.Batch, error) + FetchBatchesByBlock(ctx context.Context, hash common.L1BlockHash) ([]*common.BatchHeader, error) // FetchNonCanonicalBatchesBetween - returns all reorged batches between the sequences - FetchNonCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*core.Batch, error) + FetchNonCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) // FetchCanonicalBatchesBetween - returns all canon batches between the sequences - FetchCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*core.Batch, error) + FetchCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) // IsBatchCanonical - true if the batch is canonical IsBatchCanonical(ctx context.Context, seq uint64) (bool, error) // FetchCanonicalUnexecutedBatches - return the list of the unexecuted batches that are canonical - FetchCanonicalUnexecutedBatches(context.Context, *big.Int) ([]*core.Batch, error) + FetchCanonicalUnexecutedBatches(context.Context, *big.Int) ([]*common.BatchHeader, error) FetchConvertedHash(ctx context.Context, hash common.L2BatchHash) (gethcommon.Hash, error) @@ -69,7 +71,7 @@ type BatchResolver interface { // StoreBatch stores an un-executed batch. StoreBatch(ctx context.Context, batch *core.Batch, convertedHash gethcommon.Hash) error // StoreExecutedBatch - store the batch after it was executed - StoreExecutedBatch(ctx context.Context, batch *core.Batch, receipts []*types.Receipt) error + StoreExecutedBatch(ctx context.Context, batch *common.BatchHeader, receipts []*types.Receipt, contracts map[gethcommon.Hash][]*gethcommon.Address) error // StoreRollup StoreRollup(ctx context.Context, rollup *common.ExtRollup, header *common.CalldataRollupHeader) error @@ -96,10 +98,6 @@ type TransactionStorage interface { GetTransaction(ctx context.Context, txHash common.L2TxHash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) // GetTransactionReceipt - returns the receipt of a tx by tx hash GetTransactionReceipt(ctx context.Context, txHash common.L2TxHash) (*types.Receipt, error) - // GetReceiptsByBatchHash retrieves the receipts for all transactions in a given rollup. - GetReceiptsByBatchHash(ctx context.Context, hash common.L2BatchHash) (types.Receipts, error) - // GetContractCreationTx returns the hash of the tx that created a contract - GetContractCreationTx(ctx context.Context, address gethcommon.Address) (*gethcommon.Hash, error) } type AttestationStorage interface { @@ -151,6 +149,11 @@ type Storage interface { // StateDB - return the underlying state database StateDB() state.Database + + ReadEOA(ctx context.Context, addr gethcommon.Address) (*uint64, error) + + ReadContractAddress(ctx context.Context, addr gethcommon.Address) (*uint64, error) + ReadContractOwner(ctx context.Context, address gethcommon.Address) (*gethcommon.Address, error) } type ScanStorage interface { diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index c804424271..662eeb08ea 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -10,6 +10,8 @@ import ( "math/big" "time" + "github.com/ten-protocol/go-ten/go/common/errutil" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/triedb/hashdb" @@ -47,6 +49,12 @@ const ( masterSeedCfg = "MASTER_SEED" ) +type eventType struct { + id uint64 + isLifecycle bool +} + +// todo - this file needs splitting up based on concerns type storageImpl struct { db enclavedb.EnclaveDB @@ -55,7 +63,7 @@ type storageImpl struct { blockCache *cache.Cache[*types.Block] // stores batches using the sequence number as key - batchCacheBySeqNo *cache.Cache[*core.Batch] + batchCacheBySeqNo *cache.Cache[*common.BatchHeader] // mapping between the hash and the sequence number // note: to fetch a batch by hash will require 2 cache hits @@ -65,6 +73,16 @@ type storageImpl struct { // note: to fetch a batch by height will require 2 cache hits seqCacheByHeight *cache.Cache[*big.Int] + // batch hash - geth converted hash + convertedHashCache *cache.Cache[*gethcommon.Hash] + + // from address ( either eoa or contract) to the id of the db entry + eoaCache *cache.Cache[*uint64] + contractAddressCache *cache.Cache[*uint64] + + // from contract_address||event_sig to the event_type (id, isLifecycle) object + eventTypeCache *cache.Cache[*eventType] + cachedSharedSecret *crypto.SharedEnclaveSecret stateCache state.Database @@ -114,14 +132,18 @@ func NewStorage(backingDB enclavedb.EnclaveDB, chainConfig *params.ChainConfig, } ristrettoStore := ristretto_store.NewRistretto(ristrettoCache) return &storageImpl{ - db: backingDB, - stateCache: stateDB, - chainConfig: chainConfig, - blockCache: cache.New[*types.Block](ristrettoStore), - batchCacheBySeqNo: cache.New[*core.Batch](ristrettoStore), - seqCacheByHash: cache.New[*big.Int](ristrettoStore), - seqCacheByHeight: cache.New[*big.Int](ristrettoStore), - logger: logger, + db: backingDB, + stateCache: stateDB, + chainConfig: chainConfig, + blockCache: cache.New[*types.Block](ristrettoStore), + batchCacheBySeqNo: cache.New[*common.BatchHeader](ristrettoStore), + seqCacheByHash: cache.New[*big.Int](ristrettoStore), + seqCacheByHeight: cache.New[*big.Int](ristrettoStore), + convertedHashCache: cache.New[*gethcommon.Hash](ristrettoStore), + eoaCache: cache.New[*uint64](ristrettoStore), + contractAddressCache: cache.New[*uint64](ristrettoStore), + eventTypeCache: cache.New[*eventType](ristrettoStore), + logger: logger, } } @@ -137,9 +159,13 @@ func (s *storageImpl) Close() error { return s.db.GetSQLDB().Close() } -func (s *storageImpl) FetchHeadBatch(ctx context.Context) (*core.Batch, error) { - defer s.logDuration("FetchHeadBatch", measure.NewStopwatch()) - return enclavedb.ReadCurrentHeadBatch(ctx, s.db.GetSQLDB()) +func (s *storageImpl) FetchHeadBatchHeader(ctx context.Context) (*common.BatchHeader, error) { + defer s.logDuration("FetchHeadBatchHeader", measure.NewStopwatch()) + b, err := enclavedb.ReadCurrentHeadBatchHeader(ctx, s.db.GetSQLDB()) + if err != nil { + return nil, err + } + return b, nil } func (s *storageImpl) FetchCurrentSequencerNo(ctx context.Context) (*big.Int, error) { @@ -149,46 +175,72 @@ func (s *storageImpl) FetchCurrentSequencerNo(ctx context.Context) (*big.Int, er func (s *storageImpl) FetchBatch(ctx context.Context, hash common.L2BatchHash) (*core.Batch, error) { defer s.logDuration("FetchBatch", measure.NewStopwatch()) + seqNo, err := s.fetchSeqNoByHash(ctx, hash) + if err != nil { + return nil, err + } + return s.FetchBatchBySeqNo(ctx, seqNo.Uint64()) +} + +func (s *storageImpl) fetchSeqNoByHash(ctx context.Context, hash common.L2BatchHash) (*big.Int, error) { seqNo, err := common.GetCachedValue(ctx, s.seqCacheByHash, s.logger, hash, func(v any) (*big.Int, error) { - batch, err := enclavedb.ReadBatchByHash(ctx, s.db.GetSQLDB(), v.(common.L2BatchHash)) + batch, err := enclavedb.ReadBatchHeaderByHash(ctx, s.db.GetSQLDB(), v.(common.L2BatchHash)) if err != nil { return nil, err } - return batch.SeqNo(), nil + return batch.SequencerOrderNo, nil }) - if err != nil { - return nil, err - } - return s.FetchBatchBySeqNo(ctx, seqNo.Uint64()) + return seqNo, err } func (s *storageImpl) FetchConvertedHash(ctx context.Context, hash common.L2BatchHash) (gethcommon.Hash, error) { defer s.logDuration("FetchConvertedHash", measure.NewStopwatch()) - batch, err := s.FetchBatch(ctx, hash) + batch, err := s.FetchBatchHeader(ctx, hash) + if err != nil { + return gethcommon.Hash{}, err + } + + convertedHash, err := common.GetCachedValue(ctx, s.convertedHashCache, s.logger, hash, func(v any) (*gethcommon.Hash, error) { + ch, err := enclavedb.FetchConvertedBatchHash(ctx, s.db.GetSQLDB(), batch.SequencerOrderNo.Uint64()) + if err != nil { + return nil, err + } + return &ch, nil + }) if err != nil { return gethcommon.Hash{}, err } - return enclavedb.FetchConvertedBatchHash(ctx, s.db.GetSQLDB(), batch.Header.SequencerOrderNo.Uint64()) + return *convertedHash, nil } func (s *storageImpl) FetchBatchHeader(ctx context.Context, hash common.L2BatchHash) (*common.BatchHeader, error) { defer s.logDuration("FetchBatchHeader", measure.NewStopwatch()) - b, err := s.FetchBatch(ctx, hash) + seqNo, err := s.fetchSeqNoByHash(ctx, hash) + if err != nil { + return nil, err + } + + return s.FetchBatchHeaderBySeqNo(ctx, seqNo.Uint64()) +} + +func (s *storageImpl) FetchBatchTransactionsBySeq(ctx context.Context, seqNo uint64) ([]*common.L2Tx, error) { + defer s.logDuration("FetchBatchTransactionsBySeq", measure.NewStopwatch()) + batch, err := s.FetchBatchHeaderBySeqNo(ctx, seqNo) if err != nil { return nil, err } - return b.Header, nil + return enclavedb.ReadBatchTransactions(ctx, s.db.GetSQLDB(), batch.Number.Uint64()) } func (s *storageImpl) FetchBatchByHeight(ctx context.Context, height uint64) (*core.Batch, error) { defer s.logDuration("FetchBatchByHeight", measure.NewStopwatch()) // the key is (height+1), because for some reason it doesn't like a key of 0 seqNo, err := common.GetCachedValue(ctx, s.seqCacheByHeight, s.logger, height+1, func(h any) (*big.Int, error) { - batch, err := enclavedb.ReadCanonicalBatchByHeight(ctx, s.db.GetSQLDB(), height) + batch, err := enclavedb.ReadCanonicalBatchHeaderByHeight(ctx, s.db.GetSQLDB(), height) if err != nil { return nil, err } - return batch.SeqNo(), nil + return batch.SequencerOrderNo, nil }) if err != nil { return nil, err @@ -196,12 +248,12 @@ func (s *storageImpl) FetchBatchByHeight(ctx context.Context, height uint64) (*c return s.FetchBatchBySeqNo(ctx, seqNo.Uint64()) } -func (s *storageImpl) FetchNonCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*core.Batch, error) { +func (s *storageImpl) FetchNonCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) { defer s.logDuration("FetchNonCanonicalBatchesBetween", measure.NewStopwatch()) return enclavedb.ReadNonCanonicalBatches(ctx, s.db.GetSQLDB(), startSeq, endSeq) } -func (s *storageImpl) FetchCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*core.Batch, error) { +func (s *storageImpl) FetchCanonicalBatchesBetween(ctx context.Context, startSeq uint64, endSeq uint64) ([]*common.BatchHeader, error) { defer s.logDuration("FetchCanonicalBatchesBetween", measure.NewStopwatch()) return enclavedb.ReadCanonicalBatches(ctx, s.db.GetSQLDB(), startSeq, endSeq) } @@ -240,11 +292,19 @@ func (s *storageImpl) StoreBlock(ctx context.Context, block *types.Block, chainF if chainFork != nil && chainFork.IsFork() { s.logger.Info(fmt.Sprintf("Update Fork. %s", chainFork)) - err := enclavedb.UpdateCanonicalValue(ctx, dbTx, false, chainFork.NonCanonicalPath, s.logger) + err := enclavedb.UpdateCanonicalBlock(ctx, dbTx, false, chainFork.NonCanonicalPath) if err != nil { return err } - err = enclavedb.UpdateCanonicalValue(ctx, dbTx, true, chainFork.CanonicalPath, s.logger) + err = enclavedb.UpdateCanonicalBlock(ctx, dbTx, true, chainFork.CanonicalPath) + if err != nil { + return err + } + err = enclavedb.UpdateCanonicalBatch(ctx, dbTx, false, chainFork.NonCanonicalPath) + if err != nil { + return err + } + err = enclavedb.UpdateCanonicalBatch(ctx, dbTx, true, chainFork.CanonicalPath) if err != nil { return err } @@ -384,14 +444,14 @@ func (s *storageImpl) HealthCheck(ctx context.Context) (bool, error) { func (s *storageImpl) CreateStateDB(ctx context.Context, batchHash common.L2BatchHash) (*state.StateDB, error) { defer s.logDuration("CreateStateDB", measure.NewStopwatch()) - batch, err := s.FetchBatch(ctx, batchHash) + batch, err := s.FetchBatchHeader(ctx, batchHash) if err != nil { return nil, err } - statedb, err := state.New(batch.Header.Root, s.stateCache, nil) + statedb, err := state.New(batch.Root, s.stateCache, nil) if err != nil { - return nil, fmt.Errorf("could not create state DB for %s. Cause: %w", batch.Header.Root, err) + return nil, fmt.Errorf("could not create state DB for batch: %d. Cause: %w", batch.SequencerOrderNo, err) } return statedb, nil } @@ -405,22 +465,11 @@ func (s *storageImpl) EmptyStateDB() (*state.StateDB, error) { return statedb, nil } -// GetReceiptsByBatchHash retrieves the receipts for all transactions in a given batch. -func (s *storageImpl) GetReceiptsByBatchHash(ctx context.Context, hash gethcommon.Hash) (types.Receipts, error) { - defer s.logDuration("GetReceiptsByBatchHash", measure.NewStopwatch()) - return enclavedb.ReadReceiptsByBatchHash(ctx, s.db.GetSQLDB(), hash, s.chainConfig) -} - func (s *storageImpl) GetTransaction(ctx context.Context, txHash gethcommon.Hash) (*types.Transaction, common.L2BatchHash, uint64, uint64, error) { defer s.logDuration("GetTransaction", measure.NewStopwatch()) return enclavedb.ReadTransaction(ctx, s.db.GetSQLDB(), txHash) } -func (s *storageImpl) GetContractCreationTx(ctx context.Context, address gethcommon.Address) (*gethcommon.Hash, error) { - defer s.logDuration("GetContractCreationTx", measure.NewStopwatch()) - return enclavedb.GetContractCreationTx(ctx, s.db.GetSQLDB(), address) -} - func (s *storageImpl) GetTransactionReceipt(ctx context.Context, txHash gethcommon.Hash) (*types.Receipt, error) { defer s.logDuration("GetTransactionReceipt", measure.NewStopwatch()) return enclavedb.ReadReceipt(ctx, s.db.GetSQLDB(), txHash, s.chainConfig) @@ -461,16 +510,30 @@ func (s *storageImpl) StoreAttestedKey(ctx context.Context, aggregator gethcommo func (s *storageImpl) FetchBatchBySeqNo(ctx context.Context, seqNum uint64) (*core.Batch, error) { defer s.logDuration("FetchBatchBySeqNo", measure.NewStopwatch()) - b, err := common.GetCachedValue(ctx, s.batchCacheBySeqNo, s.logger, seqNum, func(seq any) (*core.Batch, error) { - return enclavedb.ReadBatchBySeqNo(ctx, s.db.GetSQLDB(), seqNum) + h, err := common.GetCachedValue(ctx, s.batchCacheBySeqNo, s.logger, seqNum, func(seq any) (*common.BatchHeader, error) { + return enclavedb.ReadBatchHeaderBySeqNo(ctx, s.db.GetSQLDB(), seqNum) }) - if err == nil && b == nil { - return nil, fmt.Errorf("not found") + if err != nil { + return nil, err + } + txs, err := s.FetchBatchTransactionsBySeq(ctx, seqNum) + if err != nil { + return nil, err } - return b, err + return &core.Batch{ + Header: h, + Transactions: txs, + }, err +} + +func (s *storageImpl) FetchBatchHeaderBySeqNo(ctx context.Context, seqNum uint64) (*common.BatchHeader, error) { + defer s.logDuration("FetchBatchHeaderBySeqNo", measure.NewStopwatch()) + return common.GetCachedValue(ctx, s.batchCacheBySeqNo, s.logger, seqNum, func(seq any) (*common.BatchHeader, error) { + return enclavedb.ReadBatchHeaderBySeqNo(ctx, s.db.GetSQLDB(), seqNum) + }) } -func (s *storageImpl) FetchBatchesByBlock(ctx context.Context, block common.L1BlockHash) ([]*core.Batch, error) { +func (s *storageImpl) FetchBatchesByBlock(ctx context.Context, block common.L1BlockHash) ([]*common.BatchHeader, error) { defer s.logDuration("FetchBatchesByBlock", measure.NewStopwatch()) return enclavedb.ReadBatchesByBlock(ctx, s.db.GetSQLDB(), block) } @@ -503,15 +566,48 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert } s.logger.Trace("write batch", log.BatchHashKey, batch.Hash(), "l1Proof", batch.Header.L1Proof, log.BatchSeqNoKey, batch.SeqNo(), "block_id", blockId) - if err := enclavedb.WriteBatchAndTransactions(ctx, dbTx, batch, convertedHash, blockId); err != nil { - return fmt.Errorf("could not write batch. Cause: %w", err) + // the batch is canonical only if the l1 proof is canonical + isL1ProofCanonical, err := enclavedb.IsCanonicalBlock(ctx, dbTx, &batch.Header.L1Proof) + if err != nil { + return err + } + + // sanity check: a batch can't be canonical if its parent is not + parentIsCanon, err := enclavedb.IsCanonicalBatchHash(ctx, dbTx, &batch.Header.ParentHash) + if err != nil { + return err + } + parentIsCanon = parentIsCanon || batch.SeqNo().Uint64() <= common.L2GenesisSeqNo+2 + if isL1ProofCanonical && !parentIsCanon { + s.logger.Crit("invalid chaining. Batch is canonical. Parent is not", log.BatchHashKey, batch.Hash(), "parentHash", batch.Header.ParentHash) + } + + existsHeight, err := enclavedb.ExistsBatchAtHeight(ctx, dbTx, batch.Header.Number) + if err != nil { + return fmt.Errorf("could not read ExistsBatchAtHeight. Cause: %w", err) + } + + if err := enclavedb.WriteBatchHeader(ctx, dbTx, batch, convertedHash, blockId, isL1ProofCanonical); err != nil { + return fmt.Errorf("could not write batch header. Cause: %w", err) + } + + // only insert transactions if this is the first time a batch of this height is created + if !existsHeight { + senders, err := s.handleTxSenders(ctx, batch, dbTx) + if err != nil { + return err + } + + if err := enclavedb.WriteTransactions(ctx, dbTx, batch, senders); err != nil { + return fmt.Errorf("could not write transactions. Cause: %w", err) + } } if err := dbTx.Commit(); err != nil { return fmt.Errorf("could not commit batch %w", err) } - common.CacheValue(ctx, s.batchCacheBySeqNo, s.logger, batch.SeqNo().Uint64(), batch) + common.CacheValue(ctx, s.batchCacheBySeqNo, s.logger, batch.SeqNo().Uint64(), batch.Header) common.CacheValue(ctx, s.seqCacheByHash, s.logger, batch.Hash(), batch.SeqNo()) // note: the key is (height+1), because for some reason it doesn't like a key of 0 // should always contain the canonical batch because the cache is overwritten by each new batch after a reorg @@ -519,7 +615,24 @@ func (s *storageImpl) StoreBatch(ctx context.Context, batch *core.Batch, convert return nil } -func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, receipts []*types.Receipt) error { +func (s *storageImpl) handleTxSenders(ctx context.Context, batch *core.Batch, dbTx *sql.Tx) ([]*uint64, error) { + senders := make([]*uint64, len(batch.Transactions)) + // insert the tx signers as externally owned accounts + for i, tx := range batch.Transactions { + sender, err := types.Sender(types.LatestSignerForChainID(tx.ChainId()), tx) + if err != nil { + return nil, fmt.Errorf("could not read tx sender. Cause: %w", err) + } + eoaID, err := s.readOrWriteEOA(ctx, dbTx, sender) + if err != nil { + return nil, fmt.Errorf("could not insert EOA. cause: %w", err) + } + senders[i] = eoaID + } + return senders, nil +} + +func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *common.BatchHeader, receipts []*types.Receipt, newContracts map[gethcommon.Hash][]*gethcommon.Address) error { defer s.logDuration("StoreExecutedBatch", measure.NewStopwatch()) executed, err := enclavedb.BatchWasExecuted(ctx, s.db.GetSQLDB(), batch.Hash()) if err != nil { @@ -530,35 +643,212 @@ func (s *storageImpl) StoreExecutedBatch(ctx context.Context, batch *core.Batch, return nil } + s.logger.Trace("storing executed batch", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SequencerOrderNo, "receipts", len(receipts)) + dbTx, err := s.db.NewDBTransaction(ctx) if err != nil { return fmt.Errorf("could not create DB transaction - %w", err) } defer dbTx.Rollback() - if err := enclavedb.WriteBatchExecution(ctx, dbTx, batch.SeqNo(), receipts); err != nil { - return fmt.Errorf("could not write transaction receipts. Cause: %w", err) + + if err := enclavedb.MarkBatchExecuted(ctx, dbTx, batch.SequencerOrderNo); err != nil { + return fmt.Errorf("could not set the executed flag. Cause: %w", err) } - s.logger.Trace("store executed batch", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo(), "receipts", len(receipts)) - if batch.Number().Uint64() > common.L2GenesisSeqNo { - stateDB, err := s.CreateStateDB(ctx, batch.Header.ParentHash) + + for _, receipt := range receipts { + err = s.storeReceiptAndEventLogs(ctx, dbTx, batch, receipt, newContracts[receipt.TxHash]) if err != nil { - return fmt.Errorf("could not create state DB to filter logs. Cause: %w", err) + return fmt.Errorf("could not store receipt. Cause: %w", err) } + } + if err = dbTx.Commit(); err != nil { + return fmt.Errorf("could not commit batch %w", err) + } + + return nil +} - err = enclavedb.StoreEventLogs(ctx, dbTx, receipts, batch, stateDB) +// todo - move this to a separate service +func (s *storageImpl) storeReceiptAndEventLogs(ctx context.Context, dbTX *sql.Tx, batch *common.BatchHeader, receipt *types.Receipt, createdContracts []*gethcommon.Address) error { + txId, senderId, err := enclavedb.ReadTransactionIdAndSender(ctx, dbTX, receipt.TxHash) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return fmt.Errorf("could not get transaction id. Cause: %w", err) + } + + for _, createdContract := range createdContracts { + _, err = enclavedb.WriteContractAddress(ctx, dbTX, createdContract, *senderId) if err != nil { - return fmt.Errorf("could not save logs %w", err) + return fmt.Errorf("could not write contract address. cause %w", err) } } - if err = dbTx.Commit(); err != nil { - return fmt.Errorf("could not commit batch %w", err) + // Convert the receipt into its storage form and serialize + // this removes information that can be recreated + // todo - in a future iteration, this can be slimmed down further because we already store the logs separately + storageReceipt := (*types.ReceiptForStorage)(receipt) + receiptBytes, err := rlp.EncodeToBytes(storageReceipt) + if err != nil { + return fmt.Errorf("failed to encode block receipts. Cause: %w", err) } + execTxId, err := enclavedb.WriteReceipt(ctx, dbTX, batch.SequencerOrderNo.Uint64(), txId, receiptBytes) + if err != nil { + return fmt.Errorf("could not write receipt. Cause: %w", err) + } + + for _, l := range receipt.Logs { + err := s.storeEventLog(ctx, dbTX, execTxId, l) + if err != nil { + return fmt.Errorf("could not store log entry %v. Cause: %w", l, err) + } + } return nil } +func (s *storageImpl) storeEventLog(ctx context.Context, dbTX *sql.Tx, execTxId uint64, l *types.Log) error { + topicIds, isLifecycle, err := s.handleUserTopics(ctx, dbTX, l) + if err != nil { + return err + } + + eventTypeId, err := s.handleEventType(ctx, dbTX, l, isLifecycle) + if err != nil { + return err + } + + // normalize data + data := l.Data + if len(data) == 0 { + data = nil + } + err = enclavedb.WriteEventLog(ctx, dbTX, eventTypeId, topicIds, data, l.Index, execTxId) + if err != nil { + return fmt.Errorf("could not write event log. Cause: %w", err) + } + + return nil +} + +func (s *storageImpl) handleEventType(ctx context.Context, dbTX *sql.Tx, l *types.Log, isLifecycle bool) (uint64, error) { + et, err := s.readEventType(ctx, dbTX, l.Address, l.Topics[0]) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return 0, fmt.Errorf("could not read event type. Cause: %w", err) + } + if err == nil { + // in case we determined the current emitted event is not lifecycle, we must update the eventType + if !isLifecycle && et.isLifecycle { + err := enclavedb.UpdateEventTopicLifecycle(ctx, dbTX, et.id, isLifecycle) + if err != nil { + return 0, fmt.Errorf("could not update the event type. cause: %w", err) + } + } + return et.id, nil + } + + // the first time an event of this type is emitted we must store it + contractAddId, err := s.readContractAddress(ctx, dbTX, l.Address) + if err != nil { + // the contract was already stored when it was created + return 0, fmt.Errorf("could not read contract address. %s. Cause: %w", l.Address, err) + } + return enclavedb.WriteEventType(ctx, dbTX, contractAddId, l.Topics[0], isLifecycle) +} + +func (s *storageImpl) handleUserTopics(ctx context.Context, dbTX *sql.Tx, l *types.Log) ([]*uint64, bool, error) { + topicIds := make([]*uint64, 3) + // iterate the topics containing user values + // reuse them if already inserted + // if not, discover if there is a relevant externally owned address + isLifecycle := true + for i := 1; i < len(l.Topics); i++ { + topic := l.Topics[i] + // first check if there is an entry already for this topic + eventTopicId, relAddressId, err := s.findEventTopic(ctx, dbTX, topic.Bytes()) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return nil, false, fmt.Errorf("could not read the event topic. Cause: %w", err) + } + if errors.Is(err, errutil.ErrNotFound) { + // check whether the topic is an EOA + relAddressId, err = s.findRelevantAddress(ctx, dbTX, topic) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return nil, false, fmt.Errorf("could not read relevant address. Cause %w", err) + } + eventTopicId, err = enclavedb.WriteEventTopic(ctx, dbTX, &topic, relAddressId) + if err != nil { + return nil, false, fmt.Errorf("could not write event topic. Cause: %w", err) + } + } + + if relAddressId != nil { + isLifecycle = false + } + topicIds[i-1] = &eventTopicId + } + return topicIds, isLifecycle, nil +} + +// Of the log's topics, returns those that are (potentially) user addresses. A topic is considered a user address if: +// - It has at least 12 leading zero bytes (since addresses are 20 bytes long, while hashes are 32) and at most 22 leading zero bytes +// - It is not a smart contract address +func (s *storageImpl) findRelevantAddress(ctx context.Context, dbTX *sql.Tx, topic gethcommon.Hash) (*uint64, error) { + potentialAddr := common.ExtractPotentialAddress(topic) + if potentialAddr == nil { + return nil, errutil.ErrNotFound + } + + // first check whether there is already an entry in the EOA table + eoaID, err := s.readEOA(ctx, dbTX, *potentialAddr) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return nil, err + } + if err == nil { + return eoaID, nil + } + + // if the address is a contract then it's clearly not an EOA + _, err = s.readContractAddress(ctx, dbTX, *potentialAddr) + if err != nil && !errors.Is(err, errutil.ErrNotFound) { + return nil, err + } + if err == nil { + return nil, errutil.ErrNotFound + } + + // when we reach this point, the value looks like an address, but we haven't yet seen it + // for the first iteration, we'll just assume it's an EOA + // we can make this smarter by passing in more information about the event + id, err := enclavedb.WriteEoa(ctx, dbTX, *potentialAddr) + if err != nil { + return nil, err + } + + return &id, nil +} + +func (s *storageImpl) readEventType(ctx context.Context, dbTX *sql.Tx, contractAddress gethcommon.Address, eventSignature gethcommon.Hash) (*eventType, error) { + defer s.logDuration("readEventType", measure.NewStopwatch()) + + key := make([]byte, 0) + key = append(key, contractAddress.Bytes()...) + key = append(key, eventSignature.Bytes()...) + return common.GetCachedValue(ctx, s.eventTypeCache, s.logger, key, func(v any) (*eventType, error) { + contractAddrId, err := enclavedb.ReadContractAddress(ctx, dbTX, contractAddress) + if err != nil { + return nil, err + } + id, isLifecycle, err := enclavedb.ReadEventType(ctx, dbTX, *contractAddrId, eventSignature) + if err != nil { + return nil, err + } + return &eventType{ + id: id, + isLifecycle: isLifecycle, + }, nil + }) +} + func (s *storageImpl) StoreValueTransfers(ctx context.Context, blockHash common.L1BlockHash, transfers common.ValueTransferEvents) error { + defer s.logDuration("StoreValueTransfers", measure.NewStopwatch()) dbtx, err := s.db.NewDBTransaction(ctx) if err != nil { return fmt.Errorf("could not create DB transaction - %w", err) @@ -599,6 +889,7 @@ func (s *storageImpl) GetL1Messages(ctx context.Context, blockHash common.L1Bloc } func (s *storageImpl) GetL1Transfers(ctx context.Context, blockHash common.L1BlockHash) (common.ValueTransferEvents, error) { + defer s.logDuration("GetL1Transfers", measure.NewStopwatch()) return enclavedb.FetchL1Messages[common.ValueTransferEvent](ctx, s.db.GetSQLDB(), blockHash, true) } @@ -665,10 +956,12 @@ func (s *storageImpl) StoreRollup(ctx context.Context, rollup *common.ExtRollup, } func (s *storageImpl) FetchReorgedRollup(ctx context.Context, reorgedBlocks []common.L1BlockHash) (*common.L2BatchHash, error) { + defer s.logDuration("FetchReorgedRollup", measure.NewStopwatch()) return enclavedb.FetchReorgedRollup(ctx, s.db.GetSQLDB(), reorgedBlocks) } func (s *storageImpl) FetchRollupMetadata(ctx context.Context, hash common.L2RollupHash) (*common.PublicRollupMetadata, error) { + defer s.logDuration("FetchRollupMetadata", measure.NewStopwatch()) return enclavedb.FetchRollupMetadata(ctx, s.db.GetSQLDB(), hash) } @@ -694,7 +987,7 @@ func (s *storageImpl) GetContractCount(ctx context.Context) (*big.Int, error) { return enclavedb.ReadContractCreationCount(ctx, s.db.GetSQLDB()) } -func (s *storageImpl) FetchCanonicalUnexecutedBatches(ctx context.Context, from *big.Int) ([]*core.Batch, error) { +func (s *storageImpl) FetchCanonicalUnexecutedBatches(ctx context.Context, from *big.Int) ([]*common.BatchHeader, error) { defer s.logDuration("FetchCanonicalUnexecutedBatches", measure.NewStopwatch()) return enclavedb.ReadUnexecutedBatches(ctx, s.db.GetSQLDB(), from) } @@ -714,6 +1007,69 @@ func (s *storageImpl) CountTransactionsPerAddress(ctx context.Context, address * return enclavedb.CountTransactionsPerAddress(ctx, s.db.GetSQLDB(), address) } +func (s *storageImpl) ReadEOA(ctx context.Context, addr gethcommon.Address) (*uint64, error) { + dbtx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return nil, err + } + defer dbtx.Rollback() + return s.readEOA(ctx, dbtx, addr) +} + +func (s *storageImpl) readEOA(ctx context.Context, dbTX *sql.Tx, addr gethcommon.Address) (*uint64, error) { + defer s.logDuration("readEOA", measure.NewStopwatch()) + return common.GetCachedValue(ctx, s.eoaCache, s.logger, addr, func(v any) (*uint64, error) { + id, err := enclavedb.ReadEoa(ctx, dbTX, addr) + if err != nil { + return nil, err + } + return &id, nil + }) +} + +func (s *storageImpl) readOrWriteEOA(ctx context.Context, dbTX *sql.Tx, addr gethcommon.Address) (*uint64, error) { + defer s.logDuration("readOrWriteEOA", measure.NewStopwatch()) + return common.GetCachedValue(ctx, s.eoaCache, s.logger, addr, func(v any) (*uint64, error) { + id, err := enclavedb.ReadEoa(ctx, dbTX, addr) + if err != nil { + if errors.Is(err, errutil.ErrNotFound) { + wid, err := enclavedb.WriteEoa(ctx, dbTX, addr) + if err != nil { + return nil, fmt.Errorf("could not write the eoa. Cause: %w", err) + } + return &wid, nil + } + return nil, fmt.Errorf("count not read eoa. cause: %w", err) + } + return &id, nil + }) +} + +func (s *storageImpl) ReadContractAddress(ctx context.Context, addr gethcommon.Address) (*uint64, error) { + dbtx, err := s.db.NewDBTransaction(ctx) + if err != nil { + return nil, err + } + defer dbtx.Commit() + return s.readContractAddress(ctx, dbtx, addr) +} + +func (s *storageImpl) ReadContractOwner(ctx context.Context, address gethcommon.Address) (*gethcommon.Address, error) { + return enclavedb.ReadContractOwner(ctx, s.db.GetSQLDB(), address) +} + +func (s *storageImpl) readContractAddress(ctx context.Context, dbTX *sql.Tx, addr gethcommon.Address) (*uint64, error) { + defer s.logDuration("readContractAddress", measure.NewStopwatch()) + return common.GetCachedValue(ctx, s.contractAddressCache, s.logger, addr, func(v any) (*uint64, error) { + return enclavedb.ReadContractAddress(ctx, dbTX, addr) + }) +} + +func (s *storageImpl) findEventTopic(ctx context.Context, dbTX *sql.Tx, topic []byte) (uint64, *uint64, error) { + defer s.logDuration("findEventTopic", measure.NewStopwatch()) + return enclavedb.ReadEventTopic(ctx, dbTX, topic) +} + func (s *storageImpl) logDuration(method string, stopWatch *measure.Stopwatch) { core.LogMethodDuration(s.logger, stopWatch, fmt.Sprintf("Storage::%s completed", method)) } diff --git a/go/wallet/wallet.go b/go/wallet/wallet.go index 01922ca91b..6c6209cd29 100644 --- a/go/wallet/wallet.go +++ b/go/wallet/wallet.go @@ -69,7 +69,7 @@ func NewInMemoryWalletFromConfig(pkStr string, l1ChainID int64, logger gethlog.L // SignTransaction returns a signed transaction func (m *inMemoryWallet) SignTransaction(tx types.TxData) (*types.Transaction, error) { - return types.SignNewTx(m.prvKey, types.NewLondonSigner(m.chainID), tx) + return types.SignNewTx(m.prvKey, types.NewCancunSigner(m.chainID), tx) } func (m *inMemoryWallet) SignTransactionForChainID(tx types.TxData, chainID *big.Int) (*types.Transaction, error) { diff --git a/integration/common/testlog/testlog.go b/integration/common/testlog/testlog.go index 67493fa90d..9d12e2ab36 100644 --- a/integration/common/testlog/testlog.go +++ b/integration/common/testlog/testlog.go @@ -6,10 +6,10 @@ import ( "os" "time" - "github.com/ten-protocol/go-ten/lib/gethfork/debug" - "github.com/ten-protocol/go-ten/go/common/log" + "github.com/ten-protocol/go-ten/lib/gethfork/debug" + gethlog "github.com/ethereum/go-ethereum/log" ) @@ -56,3 +56,12 @@ func Setup(cfg *Cfg) *os.File { testlog = gethlog.New(log.CmpKey, log.TestLogCmp) return f } + +// SetupSysOut will direct the test logs to stdout +func SetupSysOut() { + err := debug.Setup("terminal", "", false, 10000000, 0, 0, false, false, slog.LevelDebug, "") + if err != nil { + panic(err) + } + testlog = gethlog.New(log.CmpKey, log.TestLogCmp) +} diff --git a/integration/networktest/tests/helpful/accs_and_contracts_test.go b/integration/networktest/tests/helpful/accs_and_contracts_test.go index e96271ee05..502f84e381 100644 --- a/integration/networktest/tests/helpful/accs_and_contracts_test.go +++ b/integration/networktest/tests/helpful/accs_and_contracts_test.go @@ -102,7 +102,6 @@ func TestTransferL1Funds(t *testing.T) { } return nil }, retry.NewTimeoutStrategy(70*time.Second, 20*time.Second)) - if err != nil { panic(err) } diff --git a/integration/tenscan/tenscan_test.go b/integration/tenscan/tenscan_test.go index 3a75a19522..9c0ce273c0 100644 --- a/integration/tenscan/tenscan_test.go +++ b/integration/tenscan/tenscan_test.go @@ -82,7 +82,7 @@ func TestTenscan(t *testing.T) { statusCode, body, err := fasthttp.Get(nil, fmt.Sprintf("%s/count/contracts/", serverAddress)) assert.NoError(t, err) assert.Equal(t, 200, statusCode) - assert.Equal(t, "{\"count\":2}", string(body)) + assert.Equal(t, "{\"count\":1}", string(body)) statusCode, body, err = fasthttp.Get(nil, fmt.Sprintf("%s/count/transactions/", serverAddress)) assert.NoError(t, err) diff --git a/tools/edbconnect/Dockerfile b/tools/edbconnect/Dockerfile new file mode 100644 index 0000000000..d8428035c0 --- /dev/null +++ b/tools/edbconnect/Dockerfile @@ -0,0 +1,41 @@ +# Build Stages: +# build-base = downloads modules and prepares the directory for compilation. Based on the ego-dev image +# build-enclave = copies over the actual source code of the project and builds it using a compiler cache +# deploy = copies over only the enclave executable without the source +# in a lightweight base image specialized for deployment and prepares the /data/ folder. + +FROM ghcr.io/edgelesssys/ego-dev:v1.5.0 AS build-base + +# setup container data structure +RUN mkdir -p /home/ten/go-ten + +# Ensures container layer caching when dependencies are not changed +WORKDIR /home/ten/go-ten +COPY go.mod . +COPY go.sum . +RUN ego-go mod download + +# Trigger new build stage for compiling the enclave +FROM build-base as build-enclave +COPY . . + +WORKDIR /home/ten/go-ten/tools/edbconnect/main + +# Build the enclave using the cross image build cache. +RUN --mount=type=cache,target=/root/.cache/go-build \ + ego-go build + +# New build stage for compiling the enclave with restricted flags mode +FROM build-enclave as sign-built-enclave +# Sign the enclave executable +RUN ego sign edb-enclave.json + + +# Trigger a new build stage and use the smaller ego version: +FROM ghcr.io/edgelesssys/ego-deploy:v1.5.0 + +# Copy the binary and the entrypoint script +COPY --from=sign-built-enclave \ + /home/ten/go-ten/tools/edbconnect/main /home/ten/go-ten/tools/edbconnect/main + +WORKDIR /home/ten/go-ten/tools/edbconnect/main diff --git a/tools/edbconnect/edb-connect.sh b/tools/edbconnect/edb-connect.sh new file mode 100644 index 0000000000..37d14c740a --- /dev/null +++ b/tools/edbconnect/edb-connect.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Variables +IMAGE_NAME="testnetobscuronet.azurecr.io/obscuronet/edbconnect:latest" +CONTAINER_BASE_NAME="edb-connect" +UNIQUE_ID=$(date +%s%3N) # Using milliseconds for uniqueness +CONTAINER_NAME="${CONTAINER_BASE_NAME}-${UNIQUE_ID}" +VOLUME_NAME="obscuronode-enclave-volume" +NETWORK_NAME="node_network" +SGX_ENCLAVE_DEVICE="/dev/sgx_enclave" +SGX_PROVISION_DEVICE="/dev/sgx_provision" +COMMAND="ego run /home/ten/go-ten/tools/edbconnect/main/main" + +# Function to destroy exited containers matching the base name +destroy_exited_containers() { + exited_containers=$(sudo docker ps -a -q -f name=${CONTAINER_BASE_NAME} -f status=exited) + if [ "$exited_containers" ];then + echo "Removing exited containers matching ${CONTAINER_BASE_NAME}..." + sudo docker rm $exited_containers || true + else + echo "No exited containers to remove." + fi +} + +# Destroy exited containers that match the base name +destroy_exited_containers + +# Pull the latest image from Azure Docker repository +echo "Pulling the latest Docker image..." +sudo docker pull $IMAGE_NAME + +# Run the container with the specified command +echo "Running the new container with name ${CONTAINER_NAME}..." +sudo docker run --name $CONTAINER_NAME \ + --network $NETWORK_NAME \ + -v $VOLUME_NAME:/enclavedata \ + --device $SGX_ENCLAVE_DEVICE:$SGX_ENCLAVE_DEVICE:rwm \ + --device $SGX_PROVISION_DEVICE:$SGX_PROVISION_DEVICE:rwm \ + -it $IMAGE_NAME $COMMAND + +# After the REPL exits, destroy the container +echo "Destroying the container ${CONTAINER_NAME} after command exits..." +sudo docker rm $CONTAINER_NAME || true \ No newline at end of file diff --git a/tools/edbconnect/main/edb-enclave.json b/tools/edbconnect/main/edb-enclave.json new file mode 100644 index 0000000000..66ac4e2284 --- /dev/null +++ b/tools/edbconnect/main/edb-enclave.json @@ -0,0 +1,19 @@ +{ + "exe": "main", + "key": "testnet.pem", + "debug": true, + "heapSize": 1024, + "executableHeap": true, + "productID": 1, + "securityVersion": 1, + "mounts": [ + { + "source": "/enclavedata", + "target": "/data", + "type": "hostfs", + "readOnly": false + } + ], + "env": [ + ] +} \ No newline at end of file diff --git a/tools/edbconnect/main/main.go b/tools/edbconnect/main/main.go new file mode 100644 index 0000000000..42b2c265b8 --- /dev/null +++ b/tools/edbconnect/main/main.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "database/sql" + "fmt" + "os" + "strings" + + "github.com/ten-protocol/go-ten/go/enclave/storage/init/edgelessdb" + "github.com/ten-protocol/go-ten/integration/common/testlog" +) + +func main() { + fmt.Println("Retrieving Edgeless DB credentials...") + creds, found, err := edgelessdb.LoadCredentialsFromFile() + if err != nil { + fmt.Println("Error loading credentials from file:", err) + panic(err) + } + if !found { + panic("No existing EDB credentials found.") + } + fmt.Println("Found existing EDB credentials. Creating TLS config...") + cfg, err := edgelessdb.CreateTLSCfg(creds) + if err != nil { + fmt.Println("Error creating TLS config from credentials:", err) + panic(err) + } + fmt.Println("TLS config created. Connecting to Edgeless DB...") + testlog.SetupSysOut() + db, err := edgelessdb.ConnectToEdgelessDB("obscuronode-edgelessdb", cfg, testlog.Logger()) + if err != nil { + fmt.Println("Error connecting to Edgeless DB:", err) + panic(err) + } + fmt.Println("Connected to Edgeless DB.") + + startREPL(db) + + err = db.Close() + if err != nil { + fmt.Println("Error closing Edgeless DB connection:", err) + panic(err) + } +} + +// Starts a loop that reads user input and runs queries against the Edgeless DB until user types "exit" +func startREPL(db *sql.DB) { + for { + fmt.Println("\nEnter a query to run against the Edgeless DB (or type 'exit' to quit):") + reader := bufio.NewReader(os.Stdin) + fmt.Print(">>> ") // Display the prompt + + query, err := reader.ReadString('\n') + if err != nil { + fmt.Println("Error reading user input:", err) + continue + } + // Trim the newline character and surrounding whitespace + query = strings.TrimSpace(query) + + // line break for readability + fmt.Println() + + if query == "" { + continue + } + + if query == "exit" { + break + } + + // Determine the type of query, so we can show appropriate output + queryType := strings.ToUpper(strings.Split(query, " ")[0]) + switch queryType { + case "SELECT", "SHOW", "DESCRIBE", "DESC", "EXPLAIN": + // output rows + runQuery(db, query) + default: + // output number of rows affected + runExec(db, query) + } + } + fmt.Println("Exiting...") +} + +func runQuery(db *sql.DB, query string) { + rows, err := db.Query(query) + if err != nil { + fmt.Println("Error executing query:", err) + return + } + defer rows.Close() + + cols, err := rows.Columns() + if err != nil { + fmt.Println("Error fetching columns:", err) + return + } + + // Print column headers + for _, colName := range cols { + fmt.Printf("%s\t", colName) + } + fmt.Println() + + // Prepare a slice to hold the values + values := make([]interface{}, len(cols)) + valuePtrs := make([]interface{}, len(cols)) + for rows.Next() { + for i := range values { + valuePtrs[i] = &values[i] + } + + err = rows.Scan(valuePtrs...) + if err != nil { + fmt.Println("Error scanning row:", err) + return + } + + // Print the row values + for _, val := range values { + // Handle NULL values and convert byte slices to strings + switch v := val.(type) { + case nil: + fmt.Print("NULL\t") + case []byte: + if isPrintableString(v) { + fmt.Printf("%s\t", string(v)) + } else { + fmt.Printf("%x\t", v) // Print binary data as hexadecimal + } + default: + fmt.Printf("%v\t", v) + } + } + fmt.Println() + } + + if err = rows.Err(); err != nil { + fmt.Println("Error during row iteration:", err) + } +} + +func runExec(db *sql.DB, query string) { + result, err := db.Exec(query) + if err != nil { + fmt.Println("Error executing query against Edgeless DB:", err) + return + } + rowsAffected, err := result.RowsAffected() + if err != nil { + fmt.Println("Error getting number of rows affected:", err) + return + } + fmt.Println("Number of rows affected:", rowsAffected) +} + +// isPrintableString checks if a byte slice contains only printable characters +func isPrintableString(data []byte) bool { + for _, b := range data { + if b < 32 || b > 126 { + return false + } + } + return true +} diff --git a/tools/edbconnect/main/testnet.pem b/tools/edbconnect/main/testnet.pem new file mode 100644 index 0000000000..832ae56944 --- /dev/null +++ b/tools/edbconnect/main/testnet.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4QIBAAKCAYEAmJTM/Ik4i3JzKGvNc8gNWBKlh77oKte2raJFpsTDTLEFN105 +dPgyb+29kaxh200IgzE4PyBaCMCyG8KyuIbwNlfPSF+bDsy1L+U43IkKpr1JdzOb +O/RtP1X8iwTSu8wPinP1hEPraJvv0LSDbchW+QGNnXclrPwgnQm31erOCO1qAmxV +YIR55h+5xREOOut9MbovaSveUGrMcoxuy2t079a9nDbPsYRnt1lrXSZ8wBOLwJxZ +QkD3AK1667PozDcab7JD+grzg4FhwepJ7F3SsIlK8VaN+4C7eZRvEnxXOEteNu7w +I5dqziN3d7qdVwPGiRQFmVF89u96B3YcVxJKQDVsIktatHwkvTWYhIZllf0d/P01 +8voV16JTmzpJpClBnuOA8YNUSaxzkWTYQWy+nLcUEXAF75JjavvVC1rHRtybp8pL +ZgBrh85qOUQErgFhYX4aK9w48XfCcI8RLLBDFZNIXWX4+p9rAdb6Itei/qVdBA05 +KQ2X5nmiX7gu3xwTAgEDAoIBgGW4iKhbewehohryiPfas5AMblp/RXHlJHPBg8SD +LN3LWM+Te6NQIZ/z07Zy6+eIsFd2JX9q5rCAdr0sdyWvSs7lNNrqZ18zI3VDez2w +scR+MPoiZ31Nnio5UwdYjH0ytQb3+QLX8kW9SosjAkkwOftWXmj6GR39axNbz+Px +3rCeRqxIOOsC++6/0S4LXtHyU3Z8H5tylDWciExdnzJHo0qPKRLPNSEC78+Q8j4Z +qIANB9W9kNbV+gBzp0fNRd16EJhHsR04XClI14TDUX3x+uLsDtowJMgy7bE43k17 +MJ8oghThz8iQhGxEsQO7QXMrknUQ3BkHpiT/kKgw403IDoGCsugyTh/xkecVZPUF +m1TZ4TqzHRlJMdn2G2jxuSPzEAf5dJGIXkzOp0Hzy+/tEI8VTnuzg6RtRWCpxt52 +RJkHItOtSCG+cXfe8ALEv04eIs2/y/8xHwp8gYrSlWOGA1ILL6Gj5oGxBFcA03xr +n6dFL0GSSLWbomq4DNwANJrCSwKBwQDF0l5DROmUqt3rQAtXn4VhyMN9cpjC9yeJ +KxelRkR8bR5K70ooAFKADVVJbc2g000Tv8ldUNcDaECVw2V2H11T7e8mR2+HwZRL +JHt33vyUciS5z6ZB3vSlcgop/3o+TcAxEntBFGKRuircfI/ItW8Dr6e2GJsHVdx0 +1ZohVyEHAAdGgmwOPAVSgWWX3hIC2XwHbDDFmZSz//GIjeIO7lknScfT0bC6iqky +rqrEWcJfbWruYlqIMDHLycKjDRZKq0sCgcEAxXRcCvF/sOlAj4VEV9NU+l5xLJu+ +DD0vZpQJ+P1JzSF8zKzuTr5Rq68YqLPtiW8dxbryFnUsvAfgdWlh7EbXKgNwn7h1 +/NA1l3EFnR8AAkQnayDkCy1Waz8gU9A5r+7pYdrW1iJkRLxN0fqWkNO2wmd2ocol +cZie5SeQnFI/WlHgI8PzJSa8AX6cnT7TtfqxJXI3Z3j1rb0Ol8VPCHjk8zi5Fx5u +fYs7TKcSI9xxJFArM09xkHPyepvMcqrJrE1ZAoHBAIPhlCzYm7hx6UeAB4+/rkEw +glOhuyykxQYcum4u2FLzaYdKMXAANwAI44ZJM8CM3g0qhj415KzwKw6CQ6QU6OKe +n27aSlqBDYdtp6U/Uw2hbdE1GYE/TcOhXBv/ptQz1XYMUiti7GEmxz2oX9sjn1fK +b867EgTj6E3jvBY6FgSqr4RW8rQoA4xWQ7qUDAHmUq+dddkRDc1VS7BelrSe5hox +L+KLyycHG3cfHILmgZTznJ7sPFrKy90xLGyzZDHHhwKBwQCDougHS6p18NW1A4Ll +N438PvYdvSldfh+ZuAal/jEza6iIc0mJ1DZydLsbIp5bn2kufKFkTh3Sr+r48Ovy +2eTGrPW/0E6ois5k9gO+FKqsLW+cwJgHc47yKhWNNXvKn0ZBPI85bELYfYk2pw8L +N88sRPnBMW5LuxSYxQsS4X+Ri+rCgqIYxH1WVGho1I0j/HYY9s+aUKPJKLRlLjSw +UJiiJdC6FEmpB3zdxLbCkvYYNXIiNPZgTUxRvTL3HIZy3jsCgb8ZLq3nA5jZtEli +g86vAnCLGjHyscz0oBqlpxPyAJF1y6ldm/ySdiGav4mai3BqNKQnUKwbxnPlMYbO +NR52ofDfCaiLElJChr+E0VPyu/cBLBmLYpZKwSo5XkRQENHWpqQoxhPM3a866sL/ +PSo2mdwX67OgJHGr+Gyfq6K6rNJeOGNkm1C/y8tw932GFnnMt6IxgWsJOxrV6o8R ++/k0iY10nu2ZvaDrmM6irlTKxNTbgUXqR/CgxIuMbcfZpo4UxQ== +-----END RSA PRIVATE KEY----- diff --git a/tools/tenscan/frontend/pages/_app.tsx b/tools/tenscan/frontend/pages/_app.tsx index e8c8d12077..d2115793f1 100644 --- a/tools/tenscan/frontend/pages/_app.tsx +++ b/tools/tenscan/frontend/pages/_app.tsx @@ -69,9 +69,25 @@ export default function App({ Component, pageProps }: AppProps) { ogTwitterImage={siteMetadata.siteLogo} ogType={"website"} > - - - + + + + + =18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.0", - "@next/swc-darwin-x64": "14.1.0", - "@next/swc-linux-arm64-gnu": "14.1.0", - "@next/swc-linux-arm64-musl": "14.1.0", - "@next/swc-linux-x64-gnu": "14.1.0", - "@next/swc-linux-x64-musl": "14.1.0", - "@next/swc-win32-arm64-msvc": "14.1.0", - "@next/swc-win32-ia32-msvc": "14.1.0", - "@next/swc-win32-x64-msvc": "14.1.0" + "@next/swc-darwin-arm64": "14.2.3", + "@next/swc-darwin-x64": "14.2.3", + "@next/swc-linux-arm64-gnu": "14.2.3", + "@next/swc-linux-arm64-musl": "14.2.3", + "@next/swc-linux-x64-gnu": "14.2.3", + "@next/swc-linux-x64-musl": "14.2.3", + "@next/swc-win32-arm64-msvc": "14.2.3", + "@next/swc-win32-ia32-msvc": "14.2.3", + "@next/swc-win32-x64-msvc": "14.2.3" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5017,6 +5024,9 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } @@ -7361,9 +7371,9 @@ } }, "@next/env": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.0.tgz", - "integrity": "sha512-Py8zIo+02ht82brwwhTg36iogzFqGLPXlRGKQw5s+qP/kMNc4MAyDeEwBKDijk6zTIbegEgu8Qy7C1LboslQAw==" + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.3.tgz", + "integrity": "sha512-W7fd7IbkfmeeY2gXrzJYDx8D2lWKbVoTIj1o1ScPHNzvp30s1AuoEFSdr39bC5sjxJaxTtq3OTCZboNp0lNWHA==" }, "@next/eslint-plugin-next": { "version": "14.0.3", @@ -7375,57 +7385,57 @@ } }, "@next/swc-darwin-arm64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.0.tgz", - "integrity": "sha512-nUDn7TOGcIeyQni6lZHfzNoo9S0euXnu0jhsbMOmMJUBfgsnESdjN97kM7cBqQxZa8L/bM9om/S5/1dzCrW6wQ==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.3.tgz", + "integrity": "sha512-3pEYo/RaGqPP0YzwnlmPN2puaF2WMLM3apt5jLW2fFdXD9+pqcoTzRk+iZsf8ta7+quAe4Q6Ms0nR0SFGFdS1A==", "optional": true }, "@next/swc-darwin-x64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.0.tgz", - "integrity": "sha512-1jgudN5haWxiAl3O1ljUS2GfupPmcftu2RYJqZiMJmmbBT5M1XDffjUtRUzP4W3cBHsrvkfOFdQ71hAreNQP6g==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.3.tgz", + "integrity": "sha512-6adp7waE6P1TYFSXpY366xwsOnEXM+y1kgRpjSRVI2CBDOcbRjsJ67Z6EgKIqWIue52d2q/Mx8g9MszARj8IEA==", "optional": true }, "@next/swc-linux-arm64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.0.tgz", - "integrity": "sha512-RHo7Tcj+jllXUbK7xk2NyIDod3YcCPDZxj1WLIYxd709BQ7WuRYl3OWUNG+WUfqeQBds6kvZYlc42NJJTNi4tQ==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.3.tgz", + "integrity": "sha512-cuzCE/1G0ZSnTAHJPUT1rPgQx1w5tzSX7POXSLaS7w2nIUJUD+e25QoXD/hMfxbsT9rslEXugWypJMILBj/QsA==", "optional": true }, "@next/swc-linux-arm64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.0.tgz", - "integrity": "sha512-v6kP8sHYxjO8RwHmWMJSq7VZP2nYCkRVQ0qolh2l6xroe9QjbgV8siTbduED4u0hlk0+tjS6/Tuy4n5XCp+l6g==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.3.tgz", + "integrity": "sha512-0D4/oMM2Y9Ta3nGuCcQN8jjJjmDPYpHX9OJzqk42NZGJocU2MqhBq5tWkJrUQOQY9N+In9xOdymzapM09GeiZw==", "optional": true }, "@next/swc-linux-x64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.0.tgz", - "integrity": "sha512-zJ2pnoFYB1F4vmEVlb/eSe+VH679zT1VdXlZKX+pE66grOgjmKJHKacf82g/sWE4MQ4Rk2FMBCRnX+l6/TVYzQ==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.3.tgz", + "integrity": "sha512-ENPiNnBNDInBLyUU5ii8PMQh+4XLr4pG51tOp6aJ9xqFQ2iRI6IH0Ds2yJkAzNV1CfyagcyzPfROMViS2wOZ9w==", "optional": true }, "@next/swc-linux-x64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.0.tgz", - "integrity": "sha512-rbaIYFt2X9YZBSbH/CwGAjbBG2/MrACCVu2X0+kSykHzHnYH5FjHxwXLkcoJ10cX0aWCEynpu+rP76x0914atg==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.3.tgz", + "integrity": "sha512-BTAbq0LnCbF5MtoM7I/9UeUu/8ZBY0i8SFjUMCbPDOLv+un67e2JgyN4pmgfXBwy/I+RHu8q+k+MCkDN6P9ViQ==", "optional": true }, "@next/swc-win32-arm64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.0.tgz", - "integrity": "sha512-o1N5TsYc8f/HpGt39OUQpQ9AKIGApd3QLueu7hXk//2xq5Z9OxmV6sQfNp8C7qYmiOlHYODOGqNNa0e9jvchGQ==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.3.tgz", + "integrity": "sha512-AEHIw/dhAMLNFJFJIJIyOFDzrzI5bAjI9J26gbO5xhAKHYTZ9Or04BesFPXiAYXDNdrwTP2dQceYA4dL1geu8A==", "optional": true }, "@next/swc-win32-ia32-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.0.tgz", - "integrity": "sha512-XXIuB1DBRCFwNO6EEzCTMHT5pauwaSj4SWs7CYnME57eaReAKBXCnkUE80p/pAZcewm7hs+vGvNqDPacEXHVkw==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.3.tgz", + "integrity": "sha512-vga40n1q6aYb0CLrM+eEmisfKCR45ixQYXuBXxOOmmoV8sYST9k7E3US32FsY+CkkF7NtzdcebiFT4CHuMSyZw==", "optional": true }, "@next/swc-win32-x64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.0.tgz", - "integrity": "sha512-9WEbVRRAqJ3YFVqEZIxUqkiO8l1nool1LmNxygr5HWF8AcSYsEpneUDhmjUVJEzO2A04+oPtZdombzzPPkTtgg==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.3.tgz", + "integrity": "sha512-Q1/zm43RWynxrO7lW4ehciQVj+5ePBhOK+/K2P7pLFX3JaJ/IZVC69SHidrmZSOkqz7ECIOhhy7XhAFG4JYyHA==", "optional": true }, "@nodelib/fs.scandir": { @@ -7861,11 +7871,17 @@ "integrity": "sha512-RbhOOTCNoCrbfkRyoXODZp75MlpiHMgbE5MEBZAnnnLyQNgrigEj4p0lzsMDyc1zVsJDLrivB58tgg3emX0eEA==", "dev": true }, + "@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" + }, "@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "requires": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, @@ -10111,21 +10127,21 @@ "dev": true }, "next": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.0.tgz", - "integrity": "sha512-wlzrsbfeSU48YQBjZhDzOwhWhGsy+uQycR8bHAOt1LY1bn3zZEcDyHQOEoN3aWzQ8LHCAJ1nqrWCc9XF2+O45Q==", - "requires": { - "@next/env": "14.1.0", - "@next/swc-darwin-arm64": "14.1.0", - "@next/swc-darwin-x64": "14.1.0", - "@next/swc-linux-arm64-gnu": "14.1.0", - "@next/swc-linux-arm64-musl": "14.1.0", - "@next/swc-linux-x64-gnu": "14.1.0", - "@next/swc-linux-x64-musl": "14.1.0", - "@next/swc-win32-arm64-msvc": "14.1.0", - "@next/swc-win32-ia32-msvc": "14.1.0", - "@next/swc-win32-x64-msvc": "14.1.0", - "@swc/helpers": "0.5.2", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.3.tgz", + "integrity": "sha512-dowFkFTR8v79NPJO4QsBUtxv0g9BrS/phluVpMAt2ku7H+cbcBJlopXjkWlwxrk/xGqMemr7JkGPGemPrLLX7A==", + "requires": { + "@next/env": "14.2.3", + "@next/swc-darwin-arm64": "14.2.3", + "@next/swc-darwin-x64": "14.2.3", + "@next/swc-linux-arm64-gnu": "14.2.3", + "@next/swc-linux-arm64-musl": "14.2.3", + "@next/swc-linux-x64-gnu": "14.2.3", + "@next/swc-linux-x64-musl": "14.2.3", + "@next/swc-win32-arm64-msvc": "14.2.3", + "@next/swc-win32-ia32-msvc": "14.2.3", + "@next/swc-win32-x64-msvc": "14.2.3", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "graceful-fs": "^4.2.11", diff --git a/tools/walletextension/frontend/public/favicon/android-chrome-192x192.png b/tools/walletextension/frontend/public/favicon/android-chrome-192x192.png new file mode 100644 index 0000000000..8920a45ebd Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/android-chrome-192x192.png differ diff --git a/tools/walletextension/frontend/public/favicon/android-chrome-512x512.png b/tools/walletextension/frontend/public/favicon/android-chrome-512x512.png new file mode 100644 index 0000000000..89efc27d0c Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/android-chrome-512x512.png differ diff --git a/tools/walletextension/frontend/public/favicon/apple-touch-icon.png b/tools/walletextension/frontend/public/favicon/apple-touch-icon.png new file mode 100644 index 0000000000..e12ff03f4f Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/apple-touch-icon.png differ diff --git a/tools/walletextension/frontend/public/favicon/favicon-16x16.png b/tools/walletextension/frontend/public/favicon/favicon-16x16.png new file mode 100644 index 0000000000..747aa430ef Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/favicon-16x16.png differ diff --git a/tools/walletextension/frontend/public/favicon/favicon-32x32.png b/tools/walletextension/frontend/public/favicon/favicon-32x32.png new file mode 100644 index 0000000000..0f695ac8a2 Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/favicon-32x32.png differ diff --git a/tools/walletextension/frontend/public/favicon/favicon.ico b/tools/walletextension/frontend/public/favicon/favicon.ico new file mode 100644 index 0000000000..b87a7f2011 Binary files /dev/null and b/tools/walletextension/frontend/public/favicon/favicon.ico differ diff --git a/tools/walletextension/frontend/public/favicon/site.webmanifest b/tools/walletextension/frontend/public/favicon/site.webmanifest new file mode 100644 index 0000000000..ef69d666d6 --- /dev/null +++ b/tools/walletextension/frontend/public/favicon/site.webmanifest @@ -0,0 +1,19 @@ +{ + "name": "Ten Gateway", + "short_name": "Ten Gateway", + "icons": [ + { + "src": "/android-chrome-192x192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/android-chrome-512x512.png", + "sizes": "512x512", + "type": "image/png" + } + ], + "theme_color": "#db766b", + "background_color": "#db766b", + "display": "standalone" +} diff --git a/tools/walletextension/frontend/src/components/head-seo.tsx b/tools/walletextension/frontend/src/components/head-seo.tsx index 26a475374a..e353162253 100644 --- a/tools/walletextension/frontend/src/components/head-seo.tsx +++ b/tools/walletextension/frontend/src/components/head-seo.tsx @@ -24,11 +24,8 @@ const HeadSeo = ({ // @ts-ignore signature="_vd3udx2g2hfn9zclob5cat43b94q7fyk" > - {/* SECURITY: to prevent the page from being loaded in an iFrame */} - {/* to indicate the browser shouldn't interpret the response as something other than the specified content type */} - {/* The Content-Security-Policy header is used to prevent a wide range of attacks, including Cross-Site Scripting (XSS) and other cross-site injections. */} {/* twitter metadata */} diff --git a/tools/walletextension/frontend/src/components/layouts/header.tsx b/tools/walletextension/frontend/src/components/layouts/header.tsx index 3a2e59b40a..9a1946c88c 100644 --- a/tools/walletextension/frontend/src/components/layouts/header.tsx +++ b/tools/walletextension/frontend/src/components/layouts/header.tsx @@ -21,7 +21,7 @@ export default function Header() { className="cursor-pointer dark:hidden" /> Logo - - - + + + + +