diff --git a/espresso/client.go b/espresso/client.go new file mode 100644 index 0000000000..5659af0b92 --- /dev/null +++ b/espresso/client.go @@ -0,0 +1,148 @@ +package espresso + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/ethereum/go-ethereum/log" +) + +type Client struct { + baseUrl string + client *http.Client + log log.Logger +} + +func NewClient(log log.Logger, url string) *Client { + if !strings.HasSuffix(url, "/") { + url += "/" + } + return &Client{ + baseUrl: url, + client: http.DefaultClient, + log: log, + } +} + +func (c *Client) FetchHeadersForWindow(ctx context.Context, start uint64, end uint64) (WindowStart, error) { + var res WindowStart + if err := c.get(ctx, &res, "availability/headers/window/%d/%d", start, end); err != nil { + return WindowStart{}, err + } + return res, nil +} + +func (c *Client) FetchRemainingHeadersForWindow(ctx context.Context, from uint64, end uint64) (WindowMore, error) { + var res WindowMore + if err := c.get(ctx, &res, "availability/headers/window/from/%d/%d", from, end); err != nil { + return WindowMore{}, err + } + return res, nil +} + +func (c *Client) FetchHeader(ctx context.Context, blockHeight uint64) (Header, error) { + var res Header + if err := c.get(ctx, &res, "availability/header/%d", blockHeight); err != nil { + return Header{}, err + } + return res, nil +} + +func (c *Client) FetchTransactionsInBlock(ctx context.Context, block uint64, header *Header, namespace uint64) (TransactionsInBlock, error) { + var res NamespaceResponse + if err := c.get(ctx, &res, "availability/block/%d/namespace/%d", block, namespace); err != nil { + return TransactionsInBlock{}, err + } + return res.Validate(header, namespace) +} + +type NamespaceResponse struct { + Proof *json.RawMessage `json:"proof"` + Transactions *[]Transaction `json:"transactions"` +} + +// Validate a NamespaceResponse and extract the transactions. +// NMT proof validation is currently stubbed out. +func (res *NamespaceResponse) Validate(header *Header, namespace uint64) (TransactionsInBlock, error) { + if res.Proof == nil { + return TransactionsInBlock{}, fmt.Errorf("field proof of type NamespaceResponse is required") + } + if res.Transactions == nil { + return TransactionsInBlock{}, fmt.Errorf("field transactions of type NamespaceResponse is required") + } + + // Check that these transactions are only and all of the transactions from `namespace` in the + // block with `header`. + // TODO this is a hack. We should use the proof from the response (`proof := NmtProof{}`). + // However, due to a simplification in the Espresso NMT implementation, where left and right + // boundary transactions not belonging to this namespace are included in the proof in their + // entirety, this proof can be quite large, even if this rollup has no large transactions in its + // own namespace. In production, we have run into issues where huge transactions from other + // rollups cause this proof to be so large, that the resulting PayloadAttributes exceeds the + // maximum size allowed for an HTTP request by OP geth. Since NMT proof validation is currently + // mocked anyways, we can subvert this issue in the short term without making the rollup any + // less secure than it already is simply by using an empty proof. + proof := NmtProof{} + if err := proof.Validate(header.TransactionsRoot, *res.Transactions); err != nil { + return TransactionsInBlock{}, err + } + + // Extract the transactions. + var txs []Bytes + for i, tx := range *res.Transactions { + if tx.Vm != namespace { + return TransactionsInBlock{}, fmt.Errorf("transaction %d has wrong namespace (%d, expected %d)", i, tx.Vm, namespace) + } + txs = append(txs, tx.Payload) + } + + return TransactionsInBlock{ + Transactions: txs, + Proof: proof, + }, nil +} + +func (c *Client) get(ctx context.Context, out any, format string, args ...any) error { + url := c.baseUrl + fmt.Sprintf(format, args...) + + c.log.Debug("get", "url", url) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + c.log.Error("failed to build request", "err", err, "url", url) + return err + } + res, err := c.client.Do(req) + if err != nil { + c.log.Error("error in request", "err", err, "url", url) + return err + } + defer res.Body.Close() + + if res.StatusCode != 200 { + // Try to get the response body to include in the error message, as it may have useful + // information about why the request failed. If this call fails, the response will be `nil`, + // which is fine to include in the log, so we can ignore errors. + body, _ := io.ReadAll(res.Body) + c.log.Error("request failed", "err", err, "url", url, "status", res.StatusCode, "response", string(body)) + return fmt.Errorf("request failed with status %d", res.StatusCode) + } + + // Read the response body into memory before we unmarshal it, rather than passing the io.Reader + // to the json decoder, so that we still have the body and can inspect it if unmarshalling + // failed. + body, err := io.ReadAll(res.Body) + if err != nil { + c.log.Error("failed to read response body", "err", err, "url", url) + return err + } + if err := json.Unmarshal(body, out); err != nil { + c.log.Error("failed to parse body as json", "err", err, "url", url, "response", string(body)) + return err + } + c.log.Debug("request completed successfully", "url", url, "res", res, "body", string(body), "out", out) + return nil +} diff --git a/espresso/commit.go b/espresso/commit.go new file mode 100644 index 0000000000..833692f42f --- /dev/null +++ b/espresso/commit.go @@ -0,0 +1,182 @@ +package espresso + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "unicode/utf8" + + "github.com/ethereum/go-ethereum/crypto" +) + +type Commitment [32]byte + +func CommitmentFromUint256(n *U256) (Commitment, error) { + var bytes [32]byte + + bigEndian := n.Bytes() + if len(bigEndian) > 32 { + return Commitment{}, fmt.Errorf("integer out of range for U256 (%d)", n) + } + + // `n` might have fewer than 32 bytes, if the commitment starts with one or more zeros. Pad out + // to 32 bytes exactly, adding zeros at the beginning to be consistent with big-endian byte + // order. + if len(bigEndian) < 32 { + zeros := make([]byte, 32-len(bigEndian)) + bigEndian = append(zeros, bigEndian...) + } + + for i, b := range bigEndian { + // Bytes() returns the bytes in big endian order, but HotShot encodes commitments as + // U256 in little endian order, so we populate the bytes in reverse order. + bytes[31-i] = b + } + return bytes, nil +} + +func (c Commitment) Uint256() *U256 { + var bigEndian [32]byte + for i, b := range c { + // HotShot interprets the commitment as a little-endian integer. `SetBytes` takes the bytes + // in big-endian order, so we populate the bytes in reverse order. + bigEndian[31-i] = b + } + return NewU256().SetBytes(bigEndian) +} + +func (c Commitment) Equals(other Commitment) bool { + return bytes.Equal(c[:], other[:]) +} + +type RawCommitmentBuilder struct { + hasher crypto.KeccakState +} + +func NewRawCommitmentBuilder(name string) *RawCommitmentBuilder { + b := new(RawCommitmentBuilder) + b.hasher = crypto.NewKeccakState() + return b.ConstantString(name) +} + +// Append a constant string to the running hash. +// +// WARNING: The string `s` must be a constant. This function does not encode the length of `s` in +// the hash, which can lead to domain collisions when different strings with different lengths are +// used depending on the input object. +func (b *RawCommitmentBuilder) ConstantString(s string) *RawCommitmentBuilder { + // The commitment scheme is only designed to work with UTF-8 strings. In the reference + // implementation, written in Rust, all strings are UTF-8, but in Go we have to check. + if !utf8.Valid([]byte(s)) { + panic(fmt.Sprintf("ConstantString must only be called with valid UTF-8 strings: %v", s)) + } + + if _, err := io.WriteString(b.hasher, s); err != nil { + panic(fmt.Sprintf("KeccakState Writer is not supposed to fail, but it did: %v", err)) + } + + // To denote the end of the string and act as a domain separator, include a byte sequence which + // can never appear in a valid UTF-8 string. + invalidUtf8 := []byte{0xC0, 0x7F} + return b.FixedSizeBytes(invalidUtf8) +} + +// Include a named field of another committable type. +func (b *RawCommitmentBuilder) Field(f string, c Commitment) *RawCommitmentBuilder { + return b.ConstantString(f).FixedSizeBytes(c[:]) +} + +func (b *RawCommitmentBuilder) OptionalField(f string, c *Commitment) *RawCommitmentBuilder { + b.ConstantString(f) + + // Encode a 0 or 1 to separate the nil domain from the non-nil domain. + if c == nil { + b.Uint64(0) + } else { + b.Uint64(1) + b.FixedSizeBytes((*c)[:]) + } + + return b +} + +// Include a named field of type `uint256` in the hash. +func (b *RawCommitmentBuilder) Uint256Field(f string, n *U256) *RawCommitmentBuilder { + return b.ConstantString(f).Uint256(n) +} + +// Include a value of type `uint256` in the hash. +func (b *RawCommitmentBuilder) Uint256(n *U256) *RawCommitmentBuilder { + bytes := make([]byte, 32) + n.FillBytes(bytes) + + // `FillBytes` uses big endian byte ordering, but the Espresso commitment scheme uses little + // endian, so we need to reverse the bytes. + for i, j := 0, len(bytes)-1; i < j; i, j = i+1, j-1 { + bytes[i], bytes[j] = bytes[j], bytes[i] + } + + return b.FixedSizeBytes(bytes) +} + +// Include a named field of type `uint64` in the hash. +func (b *RawCommitmentBuilder) Uint64Field(f string, n uint64) *RawCommitmentBuilder { + return b.ConstantString(f).Uint64(n) +} + +// Include a value of type `uint64` in the hash. +func (b *RawCommitmentBuilder) Uint64(n uint64) *RawCommitmentBuilder { + bytes := make([]byte, 8) + binary.LittleEndian.PutUint64(bytes, n) + return b.FixedSizeBytes(bytes) +} + +// Include a named field of fixed length in the hash. +// +// WARNING: Go's type system cannot express the requirement that `bytes` is a fixed size array of +// any size. The best we can do is take a dynamically sized slice. However, this function uses a +// fixed-size encoding; namely, it does not encode the length of `bytes` in the hash, which can lead +// to domain collisions when this function is called with a slice which can have different lengths +// depending on the input object. +// +// The caller must ensure that this function is only used with slices whose length is statically +// determined by the type being committed to. +func (b *RawCommitmentBuilder) FixedSizeField(f string, bytes Bytes) *RawCommitmentBuilder { + return b.ConstantString(f).FixedSizeBytes(bytes) +} + +// Append a fixed size byte array to the running hash. +// +// WARNING: Go's type system cannot express the requirement that `bytes` is a fixed size array of +// any size. The best we can do is take a dynamically sized slice. However, this function uses a +// fixed-size encoding; namely, it does not encode the length of `bytes` in the hash, which can lead +// to domain collisions when this function is called with a slice which can have different lengths +// depending on the input object. +// +// The caller must ensure that this function is only used with slices whose length is statically +// determined by the type being committed to. +func (b *RawCommitmentBuilder) FixedSizeBytes(bytes Bytes) *RawCommitmentBuilder { + b.hasher.Write(bytes) + return b +} + +// Include a named field of dynamic length in the hash. +func (b *RawCommitmentBuilder) VarSizeField(f string, bytes Bytes) *RawCommitmentBuilder { + return b.ConstantString(f).VarSizeBytes(bytes) +} + +// Include a byte array whose length can be dynamic to the running hash. +func (b *RawCommitmentBuilder) VarSizeBytes(bytes Bytes) *RawCommitmentBuilder { + // First commit to the length, to prevent length extension and domain collision attacks. + b.Uint64(uint64(len(bytes))) + b.hasher.Write(bytes) + return b +} + +func (b *RawCommitmentBuilder) Finalize() Commitment { + var comm Commitment + bytes := b.hasher.Sum(nil) + copy(comm[:], bytes) + return comm +} diff --git a/espresso/hotshot/hotshot.go b/espresso/hotshot/hotshot.go new file mode 100644 index 0000000000..938467e028 --- /dev/null +++ b/espresso/hotshot/hotshot.go @@ -0,0 +1,669 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package hotshot + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// BN254G1Point is an auto generated low-level Go binding around an user-defined struct. +type BN254G1Point struct { + X *big.Int + Y *big.Int +} + +// BN254G2Point is an auto generated low-level Go binding around an user-defined struct. +type BN254G2Point struct { + X0 *big.Int + X1 *big.Int + Y0 *big.Int + Y1 *big.Int +} + +// HotShotQC is an auto generated low-level Go binding around an user-defined struct. +type HotShotQC struct { + Height *big.Int + BlockCommitment *big.Int + Pad1 *big.Int + Pad2 *big.Int +} + +// HotshotMetaData contains all meta data concerning the Hotshot contract. +var HotshotMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"BLSSigVerificationFailed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"expectedBlockNumber\",\"type\":\"uint256\"}],\"name\":\"IncorrectBlockNumber\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"InvalidQC\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoKeySelected\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotEnoughStake\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numBlocks\",\"type\":\"uint256\"}],\"name\":\"TooManyBlocks\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"firstBlockNumber\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"numBlocks\",\"type\":\"uint256\"}],\"name\":\"NewBlocks\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"x0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"x1\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y1\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structBN254.G2Point\",\"name\":\"stakingKey\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"name\":\"NewStakingKey\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_BLOCKS\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"x0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"x1\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y1\",\"type\":\"uint256\"}],\"internalType\":\"structBN254.G2Point\",\"name\":\"stakingKey\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"addNewStakingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"blockHeight\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"}],\"name\":\"commitments\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"commitment\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"name\":\"getStakingKey\",\"outputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"x0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"x1\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y0\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y1\",\"type\":\"uint256\"}],\"internalType\":\"structBN254.G2Point\",\"name\":\"\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"height\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockCommitment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pad1\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"pad2\",\"type\":\"uint256\"}],\"internalType\":\"structHotShot.QC[]\",\"name\":\"qcs\",\"type\":\"tuple[]\"}],\"name\":\"newBlocks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"y\",\"type\":\"uint256\"}],\"internalType\":\"structBN254.G1Point\",\"name\":\"sig\",\"type\":\"tuple\"},{\"internalType\":\"bool[]\",\"name\":\"bitmap\",\"type\":\"bool[]\"},{\"internalType\":\"uint256\",\"name\":\"minStakeThreshold\",\"type\":\"uint256\"}],\"name\":\"verifyAggSig\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +// HotshotABI is the input ABI used to generate the binding from. +// Deprecated: Use HotshotMetaData.ABI instead. +var HotshotABI = HotshotMetaData.ABI + +// Hotshot is an auto generated Go binding around an Ethereum contract. +type Hotshot struct { + HotshotCaller // Read-only binding to the contract + HotshotTransactor // Write-only binding to the contract + HotshotFilterer // Log filterer for contract events +} + +// HotshotCaller is an auto generated read-only Go binding around an Ethereum contract. +type HotshotCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HotshotTransactor is an auto generated write-only Go binding around an Ethereum contract. +type HotshotTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HotshotFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type HotshotFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// HotshotSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type HotshotSession struct { + Contract *Hotshot // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// HotshotCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type HotshotCallerSession struct { + Contract *HotshotCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// HotshotTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type HotshotTransactorSession struct { + Contract *HotshotTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// HotshotRaw is an auto generated low-level Go binding around an Ethereum contract. +type HotshotRaw struct { + Contract *Hotshot // Generic contract binding to access the raw methods on +} + +// HotshotCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type HotshotCallerRaw struct { + Contract *HotshotCaller // Generic read-only contract binding to access the raw methods on +} + +// HotshotTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type HotshotTransactorRaw struct { + Contract *HotshotTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewHotshot creates a new instance of Hotshot, bound to a specific deployed contract. +func NewHotshot(address common.Address, backend bind.ContractBackend) (*Hotshot, error) { + contract, err := bindHotshot(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Hotshot{HotshotCaller: HotshotCaller{contract: contract}, HotshotTransactor: HotshotTransactor{contract: contract}, HotshotFilterer: HotshotFilterer{contract: contract}}, nil +} + +// NewHotshotCaller creates a new read-only instance of Hotshot, bound to a specific deployed contract. +func NewHotshotCaller(address common.Address, caller bind.ContractCaller) (*HotshotCaller, error) { + contract, err := bindHotshot(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &HotshotCaller{contract: contract}, nil +} + +// NewHotshotTransactor creates a new write-only instance of Hotshot, bound to a specific deployed contract. +func NewHotshotTransactor(address common.Address, transactor bind.ContractTransactor) (*HotshotTransactor, error) { + contract, err := bindHotshot(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &HotshotTransactor{contract: contract}, nil +} + +// NewHotshotFilterer creates a new log filterer instance of Hotshot, bound to a specific deployed contract. +func NewHotshotFilterer(address common.Address, filterer bind.ContractFilterer) (*HotshotFilterer, error) { + contract, err := bindHotshot(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &HotshotFilterer{contract: contract}, nil +} + +// bindHotshot binds a generic wrapper to an already deployed contract. +func bindHotshot(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(HotshotABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Hotshot *HotshotRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Hotshot.Contract.HotshotCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Hotshot *HotshotRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Hotshot.Contract.HotshotTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Hotshot *HotshotRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Hotshot.Contract.HotshotTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Hotshot *HotshotCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Hotshot.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Hotshot *HotshotTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Hotshot.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Hotshot *HotshotTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Hotshot.Contract.contract.Transact(opts, method, params...) +} + +// MAXBLOCKS is a free data retrieval call binding the contract method 0x26833dcc. +// +// Solidity: function MAX_BLOCKS() view returns(uint256) +func (_Hotshot *HotshotCaller) MAXBLOCKS(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Hotshot.contract.Call(opts, &out, "MAX_BLOCKS") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// MAXBLOCKS is a free data retrieval call binding the contract method 0x26833dcc. +// +// Solidity: function MAX_BLOCKS() view returns(uint256) +func (_Hotshot *HotshotSession) MAXBLOCKS() (*big.Int, error) { + return _Hotshot.Contract.MAXBLOCKS(&_Hotshot.CallOpts) +} + +// MAXBLOCKS is a free data retrieval call binding the contract method 0x26833dcc. +// +// Solidity: function MAX_BLOCKS() view returns(uint256) +func (_Hotshot *HotshotCallerSession) MAXBLOCKS() (*big.Int, error) { + return _Hotshot.Contract.MAXBLOCKS(&_Hotshot.CallOpts) +} + +// BlockHeight is a free data retrieval call binding the contract method 0xf44ff712. +// +// Solidity: function blockHeight() view returns(uint256) +func (_Hotshot *HotshotCaller) BlockHeight(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Hotshot.contract.Call(opts, &out, "blockHeight") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// BlockHeight is a free data retrieval call binding the contract method 0xf44ff712. +// +// Solidity: function blockHeight() view returns(uint256) +func (_Hotshot *HotshotSession) BlockHeight() (*big.Int, error) { + return _Hotshot.Contract.BlockHeight(&_Hotshot.CallOpts) +} + +// BlockHeight is a free data retrieval call binding the contract method 0xf44ff712. +// +// Solidity: function blockHeight() view returns(uint256) +func (_Hotshot *HotshotCallerSession) BlockHeight() (*big.Int, error) { + return _Hotshot.Contract.BlockHeight(&_Hotshot.CallOpts) +} + +// Commitments is a free data retrieval call binding the contract method 0x49ce8997. +// +// Solidity: function commitments(uint256 blockHeight) view returns(uint256 commitment) +func (_Hotshot *HotshotCaller) Commitments(opts *bind.CallOpts, blockHeight *big.Int) (*big.Int, error) { + var out []interface{} + err := _Hotshot.contract.Call(opts, &out, "commitments", blockHeight) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Commitments is a free data retrieval call binding the contract method 0x49ce8997. +// +// Solidity: function commitments(uint256 blockHeight) view returns(uint256 commitment) +func (_Hotshot *HotshotSession) Commitments(blockHeight *big.Int) (*big.Int, error) { + return _Hotshot.Contract.Commitments(&_Hotshot.CallOpts, blockHeight) +} + +// Commitments is a free data retrieval call binding the contract method 0x49ce8997. +// +// Solidity: function commitments(uint256 blockHeight) view returns(uint256 commitment) +func (_Hotshot *HotshotCallerSession) Commitments(blockHeight *big.Int) (*big.Int, error) { + return _Hotshot.Contract.Commitments(&_Hotshot.CallOpts, blockHeight) +} + +// GetStakingKey is a free data retrieval call binding the contract method 0x67a21e70. +// +// Solidity: function getStakingKey(uint256 index) view returns((uint256,uint256,uint256,uint256), uint256) +func (_Hotshot *HotshotCaller) GetStakingKey(opts *bind.CallOpts, index *big.Int) (BN254G2Point, *big.Int, error) { + var out []interface{} + err := _Hotshot.contract.Call(opts, &out, "getStakingKey", index) + + if err != nil { + return *new(BN254G2Point), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(BN254G2Point)).(*BN254G2Point) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetStakingKey is a free data retrieval call binding the contract method 0x67a21e70. +// +// Solidity: function getStakingKey(uint256 index) view returns((uint256,uint256,uint256,uint256), uint256) +func (_Hotshot *HotshotSession) GetStakingKey(index *big.Int) (BN254G2Point, *big.Int, error) { + return _Hotshot.Contract.GetStakingKey(&_Hotshot.CallOpts, index) +} + +// GetStakingKey is a free data retrieval call binding the contract method 0x67a21e70. +// +// Solidity: function getStakingKey(uint256 index) view returns((uint256,uint256,uint256,uint256), uint256) +func (_Hotshot *HotshotCallerSession) GetStakingKey(index *big.Int) (BN254G2Point, *big.Int, error) { + return _Hotshot.Contract.GetStakingKey(&_Hotshot.CallOpts, index) +} + +// VerifyAggSig is a free data retrieval call binding the contract method 0x0340961e. +// +// Solidity: function verifyAggSig(bytes message, (uint256,uint256) sig, bool[] bitmap, uint256 minStakeThreshold) view returns() +func (_Hotshot *HotshotCaller) VerifyAggSig(opts *bind.CallOpts, message []byte, sig BN254G1Point, bitmap []bool, minStakeThreshold *big.Int) error { + var out []interface{} + err := _Hotshot.contract.Call(opts, &out, "verifyAggSig", message, sig, bitmap, minStakeThreshold) + + if err != nil { + return err + } + + return err + +} + +// VerifyAggSig is a free data retrieval call binding the contract method 0x0340961e. +// +// Solidity: function verifyAggSig(bytes message, (uint256,uint256) sig, bool[] bitmap, uint256 minStakeThreshold) view returns() +func (_Hotshot *HotshotSession) VerifyAggSig(message []byte, sig BN254G1Point, bitmap []bool, minStakeThreshold *big.Int) error { + return _Hotshot.Contract.VerifyAggSig(&_Hotshot.CallOpts, message, sig, bitmap, minStakeThreshold) +} + +// VerifyAggSig is a free data retrieval call binding the contract method 0x0340961e. +// +// Solidity: function verifyAggSig(bytes message, (uint256,uint256) sig, bool[] bitmap, uint256 minStakeThreshold) view returns() +func (_Hotshot *HotshotCallerSession) VerifyAggSig(message []byte, sig BN254G1Point, bitmap []bool, minStakeThreshold *big.Int) error { + return _Hotshot.Contract.VerifyAggSig(&_Hotshot.CallOpts, message, sig, bitmap, minStakeThreshold) +} + +// AddNewStakingKey is a paid mutator transaction binding the contract method 0xf1f45d99. +// +// Solidity: function addNewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount) returns() +func (_Hotshot *HotshotTransactor) AddNewStakingKey(opts *bind.TransactOpts, stakingKey BN254G2Point, amount *big.Int) (*types.Transaction, error) { + return _Hotshot.contract.Transact(opts, "addNewStakingKey", stakingKey, amount) +} + +// AddNewStakingKey is a paid mutator transaction binding the contract method 0xf1f45d99. +// +// Solidity: function addNewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount) returns() +func (_Hotshot *HotshotSession) AddNewStakingKey(stakingKey BN254G2Point, amount *big.Int) (*types.Transaction, error) { + return _Hotshot.Contract.AddNewStakingKey(&_Hotshot.TransactOpts, stakingKey, amount) +} + +// AddNewStakingKey is a paid mutator transaction binding the contract method 0xf1f45d99. +// +// Solidity: function addNewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount) returns() +func (_Hotshot *HotshotTransactorSession) AddNewStakingKey(stakingKey BN254G2Point, amount *big.Int) (*types.Transaction, error) { + return _Hotshot.Contract.AddNewStakingKey(&_Hotshot.TransactOpts, stakingKey, amount) +} + +// NewBlocks is a paid mutator transaction binding the contract method 0x0a321cff. +// +// Solidity: function newBlocks((uint256,uint256,uint256,uint256)[] qcs) returns() +func (_Hotshot *HotshotTransactor) NewBlocks(opts *bind.TransactOpts, qcs []HotShotQC) (*types.Transaction, error) { + return _Hotshot.contract.Transact(opts, "newBlocks", qcs) +} + +// NewBlocks is a paid mutator transaction binding the contract method 0x0a321cff. +// +// Solidity: function newBlocks((uint256,uint256,uint256,uint256)[] qcs) returns() +func (_Hotshot *HotshotSession) NewBlocks(qcs []HotShotQC) (*types.Transaction, error) { + return _Hotshot.Contract.NewBlocks(&_Hotshot.TransactOpts, qcs) +} + +// NewBlocks is a paid mutator transaction binding the contract method 0x0a321cff. +// +// Solidity: function newBlocks((uint256,uint256,uint256,uint256)[] qcs) returns() +func (_Hotshot *HotshotTransactorSession) NewBlocks(qcs []HotShotQC) (*types.Transaction, error) { + return _Hotshot.Contract.NewBlocks(&_Hotshot.TransactOpts, qcs) +} + +// HotshotNewBlocksIterator is returned from FilterNewBlocks and is used to iterate over the raw logs and unpacked data for NewBlocks events raised by the Hotshot contract. +type HotshotNewBlocksIterator struct { + Event *HotshotNewBlocks // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *HotshotNewBlocksIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(HotshotNewBlocks) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(HotshotNewBlocks) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *HotshotNewBlocksIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *HotshotNewBlocksIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// HotshotNewBlocks represents a NewBlocks event raised by the Hotshot contract. +type HotshotNewBlocks struct { + FirstBlockNumber *big.Int + NumBlocks *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterNewBlocks is a free log retrieval operation binding the contract event 0x8203a21e4f95f72e5081d5e0929b1a8c52141e123f9a14e1e74b0260fa5f52f1. +// +// Solidity: event NewBlocks(uint256 firstBlockNumber, uint256 numBlocks) +func (_Hotshot *HotshotFilterer) FilterNewBlocks(opts *bind.FilterOpts) (*HotshotNewBlocksIterator, error) { + + logs, sub, err := _Hotshot.contract.FilterLogs(opts, "NewBlocks") + if err != nil { + return nil, err + } + return &HotshotNewBlocksIterator{contract: _Hotshot.contract, event: "NewBlocks", logs: logs, sub: sub}, nil +} + +// WatchNewBlocks is a free log subscription operation binding the contract event 0x8203a21e4f95f72e5081d5e0929b1a8c52141e123f9a14e1e74b0260fa5f52f1. +// +// Solidity: event NewBlocks(uint256 firstBlockNumber, uint256 numBlocks) +func (_Hotshot *HotshotFilterer) WatchNewBlocks(opts *bind.WatchOpts, sink chan<- *HotshotNewBlocks) (event.Subscription, error) { + + logs, sub, err := _Hotshot.contract.WatchLogs(opts, "NewBlocks") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(HotshotNewBlocks) + if err := _Hotshot.contract.UnpackLog(event, "NewBlocks", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseNewBlocks is a log parse operation binding the contract event 0x8203a21e4f95f72e5081d5e0929b1a8c52141e123f9a14e1e74b0260fa5f52f1. +// +// Solidity: event NewBlocks(uint256 firstBlockNumber, uint256 numBlocks) +func (_Hotshot *HotshotFilterer) ParseNewBlocks(log types.Log) (*HotshotNewBlocks, error) { + event := new(HotshotNewBlocks) + if err := _Hotshot.contract.UnpackLog(event, "NewBlocks", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// HotshotNewStakingKeyIterator is returned from FilterNewStakingKey and is used to iterate over the raw logs and unpacked data for NewStakingKey events raised by the Hotshot contract. +type HotshotNewStakingKeyIterator struct { + Event *HotshotNewStakingKey // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *HotshotNewStakingKeyIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(HotshotNewStakingKey) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(HotshotNewStakingKey) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *HotshotNewStakingKeyIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *HotshotNewStakingKeyIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// HotshotNewStakingKey represents a NewStakingKey event raised by the Hotshot contract. +type HotshotNewStakingKey struct { + StakingKey BN254G2Point + Amount *big.Int + Index *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterNewStakingKey is a free log retrieval operation binding the contract event 0xd72fe1ac57d3e6d51c922ae4d811cc50aa3ad7026283aea637494a073252565a. +// +// Solidity: event NewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount, uint256 index) +func (_Hotshot *HotshotFilterer) FilterNewStakingKey(opts *bind.FilterOpts) (*HotshotNewStakingKeyIterator, error) { + + logs, sub, err := _Hotshot.contract.FilterLogs(opts, "NewStakingKey") + if err != nil { + return nil, err + } + return &HotshotNewStakingKeyIterator{contract: _Hotshot.contract, event: "NewStakingKey", logs: logs, sub: sub}, nil +} + +// WatchNewStakingKey is a free log subscription operation binding the contract event 0xd72fe1ac57d3e6d51c922ae4d811cc50aa3ad7026283aea637494a073252565a. +// +// Solidity: event NewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount, uint256 index) +func (_Hotshot *HotshotFilterer) WatchNewStakingKey(opts *bind.WatchOpts, sink chan<- *HotshotNewStakingKey) (event.Subscription, error) { + + logs, sub, err := _Hotshot.contract.WatchLogs(opts, "NewStakingKey") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(HotshotNewStakingKey) + if err := _Hotshot.contract.UnpackLog(event, "NewStakingKey", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseNewStakingKey is a log parse operation binding the contract event 0xd72fe1ac57d3e6d51c922ae4d811cc50aa3ad7026283aea637494a073252565a. +// +// Solidity: event NewStakingKey((uint256,uint256,uint256,uint256) stakingKey, uint256 amount, uint256 index) +func (_Hotshot *HotshotFilterer) ParseNewStakingKey(log types.Log) (*HotshotNewStakingKey, error) { + event := new(HotshotNewStakingKey) + if err := _Hotshot.contract.UnpackLog(event, "NewStakingKey", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/espresso/nmt.go b/espresso/nmt.go new file mode 100644 index 0000000000..841d3722e8 --- /dev/null +++ b/espresso/nmt.go @@ -0,0 +1,11 @@ +package espresso + +// This function mocks batch transaction validation against a set of HotShot NMT roots. +// It pretends to verify that the set of transactions (txns) in a batch correspond to a set of n NMT proofs +// (p1, ... pn) against trusted NMT roots r1,...rn. +// +// In other words, the function validates that txns = {...p1.txns, ..., ...pn.txns}, that all the transactions +// come from the given namespace, and that p1, ..., pn are all valid NMT proofs with respect to r1, ..., rn. +func ValidateBatchTransactions(namespace uint64, roots []*NmtRoot, proofs []*NmtProof, transactions []Bytes) error { + return nil +} diff --git a/espresso/query.go b/espresso/query.go new file mode 100644 index 0000000000..05c7e040c7 --- /dev/null +++ b/espresso/query.go @@ -0,0 +1,129 @@ +package espresso + +import ( + "context" + "encoding/json" + "fmt" +) + +// Interface to the Espresso Sequencer query service. +type QueryService interface { + // Get all the available headers whose timestamps fall in the window [start, end). + FetchHeadersForWindow(ctx context.Context, start uint64, end uint64) (WindowStart, error) + // Get all the available headers starting with the block numbered `from` whose timestamps are + // less than `end`. This can be used to continue fetching headers in a time window if not all + // headers in the window were available when `FetchHeadersForWindow` was called. + FetchRemainingHeadersForWindow(ctx context.Context, from uint64, end uint64) (WindowMore, error) + // Get the transactions belonging to the given namespace in the block numbered `block` with the + // given header, along with a proof that these are all such transactions. + FetchTransactionsInBlock(ctx context.Context, block uint64, header *Header, namespace uint64) (TransactionsInBlock, error) +} + +// Response to `FetchHeadersForWindow`. +type WindowStart struct { + // The block number of the first block in the window, unless the window is empty, in which case + // this is the block number of `Next`. + From uint64 `json:"from"` + // The available block headers in the requested window. + Window []Header `json:"window"` + // The header of the last block before the start of the window. This proves that the query + // service did not omit any blocks from the beginning of the window. This will be `nil` if + // `From` is 0. + Prev *Header `json:"prev"` + // The first block after the end of the window. This proves that the query service did not omit + // any blocks from the end of the window. This will be `nil` if the full window is not available + // yet, in which case `FetchRemainingHeadersForWindow` should be called to retrieve the rest of + // the window. + Next *Header `json:"next"` +} + +func (w *WindowStart) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + From *uint64 `json:"from"` + Window *[]Header `json:"window"` + Prev *Header `json:"prev"` + Next *Header `json:"next"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.From == nil { + return fmt.Errorf("Field from of type WindowStart is required") + } + w.From = *dec.From + + if dec.Window == nil { + return fmt.Errorf("Field window of type WindowStart is required") + } + w.Window = *dec.Window + + w.Prev = dec.Prev + w.Next = dec.Next + return nil +} + +// Response to `FetchRemainingHeadersForWindow`. +type WindowMore struct { + // The additional blocks within the window which are available, if any. + Window []Header `json:"window"` + // The first block after the end of the window, if the full window is available. + Next *Header `json:"next"` +} + +func (w *WindowMore) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Window *[]Header `json:"window"` + Next *Header `json:"next"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Window == nil { + return fmt.Errorf("Field window of type WindowMore is required") + } + w.Window = *dec.Window + + w.Next = dec.Next + return nil +} + +// Response to `FetchTransactionsInBlock` +type TransactionsInBlock struct { + // The transactions. + Transactions []Bytes `json:"transactions"` + // A proof that these are all the transactions in the block with the requested namespace. + Proof NmtProof `json:"proof"` +} + +func (t *TransactionsInBlock) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Transactions *[]Bytes `json:"transactions"` + Proof *NmtProof `json:"proof"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Transactions == nil { + return fmt.Errorf("Field transactions of type TransactionsInBlock is required") + } + t.Transactions = *dec.Transactions + + if dec.Proof == nil { + return fmt.Errorf("Field proof of type TransactionsInBlock is required") + } + t.Proof = *dec.Proof + + return nil +} diff --git a/espresso/types.go b/espresso/types.go new file mode 100644 index 0000000000..a25d558eee --- /dev/null +++ b/espresso/types.go @@ -0,0 +1,275 @@ +package espresso + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +type Header struct { + TransactionsRoot NmtRoot `json:"transactions_root"` + + Metadata `json:"metadata"` +} + +func (h *Header) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + TransactionsRoot *NmtRoot `json:"transactions_root"` + Metadata *Metadata `json:"metadata"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.TransactionsRoot == nil { + return fmt.Errorf("Field transactions_root of type Header is required") + } + h.TransactionsRoot = *dec.TransactionsRoot + + if dec.Metadata == nil { + return fmt.Errorf("Field metadata of type Header is required") + } + h.Metadata = *dec.Metadata + + return nil +} + +func (self *Header) Commit() Commitment { + var l1FinalizedComm *Commitment + if self.L1Finalized != nil { + comm := self.L1Finalized.Commit() + l1FinalizedComm = &comm + } + + return NewRawCommitmentBuilder("BLOCK"). + Uint64Field("timestamp", self.Timestamp). + Uint64Field("l1_head", self.L1Head). + OptionalField("l1_finalized", l1FinalizedComm). + Field("transactions_root", self.TransactionsRoot.Commit()). + Finalize() +} + +type Metadata struct { + Timestamp uint64 `json:"timestamp"` + L1Head uint64 `json:"l1_head"` + L1Finalized *L1BlockInfo `json:"l1_finalized" rlp:"nil"` +} + +func (m *Metadata) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Timestamp *uint64 `json:"timestamp"` + L1Head *uint64 `json:"l1_head"` + L1Finalized *L1BlockInfo `json:"l1_finalized" rlp:"nil"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Timestamp == nil { + return fmt.Errorf("Field timestamp of type Metadata is required") + } + m.Timestamp = *dec.Timestamp + + if dec.L1Head == nil { + return fmt.Errorf("Field l1_head of type Metadata is required") + } + m.L1Head = *dec.L1Head + + m.L1Finalized = dec.L1Finalized + return nil +} + +type L1BlockInfo struct { + Number uint64 `json:"number"` + Timestamp U256 `json:"timestamp"` + Hash common.Hash `json:"hash"` +} + +func (i *L1BlockInfo) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Number *uint64 `json:"number"` + Timestamp *U256 `json:"timestamp"` + Hash *common.Hash `json:"hash"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Number == nil { + return fmt.Errorf("Field number of type L1BlockInfo is required") + } + i.Number = *dec.Number + + if dec.Timestamp == nil { + return fmt.Errorf("Field timestamp of type L1BlockInfo is required") + } + i.Timestamp = *dec.Timestamp + + if dec.Hash == nil { + return fmt.Errorf("Field hash of type L1BlockInfo is required") + } + i.Hash = *dec.Hash + + return nil +} + +func (self *L1BlockInfo) Commit() Commitment { + return NewRawCommitmentBuilder("L1BLOCK"). + Uint64Field("number", self.Number). + Uint256Field("timestamp", &self.Timestamp). + FixedSizeField("hash", self.Hash[:]). + Finalize() +} + +type NmtRoot struct { + Root Bytes `json:"root"` +} + +func (r *NmtRoot) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Root *Bytes `json:"root"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Root == nil { + return fmt.Errorf("Field root of type NmtRoot is required") + } + r.Root = *dec.Root + + return nil +} + +func (self *NmtRoot) Commit() Commitment { + return NewRawCommitmentBuilder("NMTROOT"). + VarSizeField("root", self.Root). + Finalize() +} + +type Transaction struct { + Vm uint64 `json:"vm"` + Payload Bytes `json:"payload"` +} + +func (t *Transaction) UnmarshalJSON(b []byte) error { + // Parse using pointers so we can distinguish between missing and default fields. + type Dec struct { + Vm *uint64 `json:"vm"` + Payload *Bytes `json:"payload"` + } + + var dec Dec + if err := json.Unmarshal(b, &dec); err != nil { + return err + } + + if dec.Vm == nil { + return fmt.Errorf("Field vm of type Transaction is required") + } + t.Vm = *dec.Vm + + if dec.Payload == nil { + return fmt.Errorf("Field payload of type Transaction is required") + } + t.Payload = *dec.Payload + + return nil +} + +type BatchMerkleProof = Bytes +type NmtProof = Bytes + +func (*NmtProof) Validate(root NmtRoot, transactions []Transaction) error { + // TODO since porting the Rust NMT to Go is a big task, this validation is stubbed out for now, + // and always succeeds. Essentially, we trust the sequencer until this is fixed. + // https://github.com/EspressoSystems/op-espresso-integration/issues/17 + return nil +} + +// A bytes type which serializes to JSON as an array, rather than a base64 string. This ensures +// compatibility with the Espresso APIs. +type Bytes []byte + +func (b Bytes) MarshalJSON() ([]byte, error) { + // Convert to `int` array, which serializes the way we want. + ints := make([]int, len(b)) + for i := range b { + ints[i] = int(b[i]) + } + + return json.Marshal(ints) +} + +func (b *Bytes) UnmarshalJSON(in []byte) error { + // Parse as `int` array, which deserializes the way we want. + var ints []int + if err := json.Unmarshal(in, &ints); err != nil { + return err + } + + // Convert back to `byte` array. + *b = make([]byte, len(ints)) + for i := range ints { + if ints[i] < 0 || 255 < ints[i] { + return fmt.Errorf("byte out of range: %d", ints[i]) + } + (*b)[i] = byte(ints[i]) + } + + return nil +} + +// A BigInt type which serializes to JSON a a hex string. This ensures compatibility with the +// Espresso APIs. +type U256 struct { + big.Int +} + +func NewU256() *U256 { + return new(U256) +} + +func (i *U256) SetBigInt(n *big.Int) *U256 { + i.Int.Set(n) + return i +} + +func (i *U256) SetUint64(n uint64) *U256 { + i.Int.SetUint64(n) + return i +} + +func (i *U256) SetBytes(buf [32]byte) *U256 { + i.Int.SetBytes(buf[:]) + return i +} + +func (i U256) MarshalJSON() ([]byte, error) { + return json.Marshal(fmt.Sprintf("0x%s", i.Text(16))) +} + +func (i *U256) UnmarshalJSON(in []byte) error { + var s string + if err := json.Unmarshal(in, &s); err != nil { + return err + } + if _, err := fmt.Sscanf(s, "0x%x", &i.Int); err != nil { + return err + } + return nil +} diff --git a/espresso/types_test.go b/espresso/types_test.go new file mode 100644 index 0000000000..de8556c67a --- /dev/null +++ b/espresso/types_test.go @@ -0,0 +1,222 @@ +package espresso + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + + "github.com/stretchr/testify/require" +) + +func removeWhitespace(s string) string { + // Split the string on whitespace then concatenate the segments + return strings.Join(strings.Fields(s), "") +} + +// Reference data taken from the reference sequencer implementation +// (https://github.com/EspressoSystems/espresso-sequencer/blob/main/data) + +var ReferenceNmtRoot NmtRoot = NmtRoot{ + Root: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, +} + +var ReferenceL1BLockInfo L1BlockInfo = L1BlockInfo{ + Number: 123, + Timestamp: *NewU256().SetUint64(0x456), + Hash: common.Hash{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, +} + +var ReferenceHeader Header = Header{ + TransactionsRoot: ReferenceNmtRoot, + Metadata: Metadata{ + Timestamp: 789, + L1Head: 124, + L1Finalized: &ReferenceL1BLockInfo, + }, +} + +func TestEspressoTypesNmtRootJson(t *testing.T) { + data := []byte(removeWhitespace(`{ + "root": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + }`)) + + // Check encoding. + encoded, err := json.Marshal(ReferenceNmtRoot) + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + require.Equal(t, encoded, data) + + // Check decoding + var decoded NmtRoot + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("Failed to unmarshal JSON: %v", err) + } + require.Equal(t, decoded, ReferenceNmtRoot) + + CheckJsonRequiredFields[NmtRoot](t, data, "root") +} + +func TestEspressoTypesL1BLockInfoJson(t *testing.T) { + data := []byte(removeWhitespace(`{ + "number": 123, + "timestamp": "0x456", + "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + }`)) + + // Check encoding. + encoded, err := json.Marshal(ReferenceL1BLockInfo) + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + require.Equal(t, encoded, data) + + // Check decoding + var decoded L1BlockInfo + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("Failed to unmarshal JSON: %v", err) + } + require.Equal(t, decoded, ReferenceL1BLockInfo) + + CheckJsonRequiredFields[L1BlockInfo](t, data, "number", "timestamp", "hash") +} + +func TestEspressoTypesHeaderJson(t *testing.T) { + data := []byte(removeWhitespace(`{ + "transactions_root": { + "root": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + }, + "metadata": { + "timestamp": 789, + "l1_head": 124, + "l1_finalized": { + "number": 123, + "timestamp": "0x456", + "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + } + } + }`)) + + // Check encoding. + encoded, err := json.Marshal(ReferenceHeader) + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + require.Equal(t, encoded, data) + + // Check decoding + var decoded Header + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("Failed to unmarshal JSON: %v", err) + } + require.Equal(t, decoded, ReferenceHeader) + + CheckJsonRequiredFields[Header](t, data, "transactions_root", "metadata") +} + +func TestEspressoMetadataJson(t *testing.T) { + data := []byte(removeWhitespace(`{ + "timestamp": 789, + "l1_head": 124, + "l1_finalized": { + "number": 123, + "timestamp": "0x456", + "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + } + }`)) + m := ReferenceHeader.Metadata + + // Check encoding. + encoded, err := json.Marshal(m) + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + require.Equal(t, encoded, data) + + // Check decoding + var decoded Metadata + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("Failed to unmarshal JSON: %v", err) + } + require.Equal(t, decoded, m) + + CheckJsonRequiredFields[Metadata](t, data, "timestamp", "l1_head") +} + +func TestEspressoTransactionJson(t *testing.T) { + data := []byte(removeWhitespace(`{ + "vm": 0, + "payload": [1,2,3,4,5] + }`)) + tx := Transaction{ + Vm: 0, + Payload: []byte{1, 2, 3, 4, 5}, + } + + // Check encoding. + encoded, err := json.Marshal(tx) + if err != nil { + t.Fatalf("Failed to marshal JSON: %v", err) + } + require.Equal(t, encoded, data) + + // Check decoding + var decoded Transaction + if err := json.Unmarshal(data, &decoded); err != nil { + t.Fatalf("Failed to unmarshal JSON: %v", err) + } + require.Equal(t, decoded, tx) + + CheckJsonRequiredFields[Transaction](t, data, "vm", "payload") +} + +// Commitment tests ported from the reference sequencer implementation +// (https://github.com/EspressoSystems/espresso-sequencer/blob/main/sequencer/src/block.rs) + +func TestEspressoTypesNmtRootCommit(t *testing.T) { + require.Equal(t, ReferenceNmtRoot.Commit(), Commitment{251, 80, 232, 195, 91, 2, 138, 18, 240, 231, 31, 172, 54, 204, 90, 42, 215, 42, 72, 187, 15, 28, 128, 67, 149, 117, 26, 114, 232, 57, 190, 10}) +} + +func TestEspressoTypesL1BlockInfoCommit(t *testing.T) { + require.Equal(t, ReferenceL1BLockInfo.Commit(), Commitment{224, 122, 115, 150, 226, 202, 216, 139, 51, 221, 23, 79, 54, 243, 107, 12, 12, 144, 113, 99, 133, 217, 207, 73, 120, 182, 115, 84, 210, 230, 126, 148}) +} + +func TestEspressoTypesHeaderCommit(t *testing.T) { + require.Equal(t, ReferenceHeader.Commit(), Commitment{26, 77, 186, 162, 251, 241, 135, 23, 132, 5, 196, 207, 131, 64, 207, 215, 141, 144, 146, 65, 158, 30, 169, 102, 251, 183, 101, 149, 168, 173, 161, 149}) +} + +func TestEspressoCommitmentFromU256TrailingZero(t *testing.T) { + comm := Commitment{209, 146, 197, 195, 145, 148, 17, 211, 52, 72, 28, 120, 88, 182, 204, 206, 77, 36, 56, 35, 3, 143, 77, 186, 69, 233, 104, 30, 90, 105, 48, 0} + roundTrip, err := CommitmentFromUint256(comm.Uint256()) + require.Nil(t, err) + require.Equal(t, comm, roundTrip) +} + +func CheckJsonRequiredFields[T any](t *testing.T, data []byte, fields ...string) { + // Parse the JSON object into a map so we can selectively delete fields. + var obj map[string]json.RawMessage + if err := json.Unmarshal(data, &obj); err != nil { + t.Fatalf("failed to unmarshal JSON: %v", err) + } + + for _, field := range fields { + data, err := json.Marshal(withoutKey(obj, field)) + require.Nil(t, err, "failed to marshal JSON") + + var dec T + err = json.Unmarshal(data, &dec) + require.NotNil(t, err, "unmarshalling without required field %s should fail", field) + } +} + +func withoutKey[K comparable, V any](m map[K]V, key K) map[K]V { + copied := make(map[K]V) + for k, v := range m { + if k != key { + copied[k] = v + } + } + return copied +} diff --git a/execution/gethexec/espresso_sequencer.go b/execution/gethexec/espresso_sequencer.go new file mode 100644 index 0000000000..82f573d8af --- /dev/null +++ b/execution/gethexec/espresso_sequencer.go @@ -0,0 +1,164 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package gethexec + +import ( + "context" + "encoding/json" + "time" + + "github.com/offchainlabs/nitro/espresso" + "github.com/offchainlabs/nitro/util/stopwaiter" + + "github.com/ethereum/go-ethereum/arbitrum_types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l1pricing" +) + +var ( + retryTime = time.Second * 5 +) + +type HotShotState struct { + client espresso.Client + nextSeqBlockNum uint64 +} + +func NewHotShotState(log log.Logger, url string) *HotShotState { + return &HotShotState{ + client: *espresso.NewClient(log, url), + // TODO: Load this from the inbox reader so that new sequencers don't read redundant blocks + // https://github.com/EspressoSystems/espresso-sequencer/issues/734 + nextSeqBlockNum: 0, + } +} + +func (s *HotShotState) advance() { + s.nextSeqBlockNum += 1 +} + +type EspressoSequencer struct { + stopwaiter.StopWaiter + + execEngine *ExecutionEngine + config SequencerConfigFetcher + hotShotState *HotShotState +} + +func NewEspressoSequencer(execEngine *ExecutionEngine, configFetcher SequencerConfigFetcher) (*EspressoSequencer, error) { + config := configFetcher() + if err := config.Validate(); err != nil { + return nil, err + } + return &EspressoSequencer{ + execEngine: execEngine, + config: configFetcher, + hotShotState: NewHotShotState(log.New(), config.HotShotUrl), + }, nil +} + +func (s *EspressoSequencer) makeSequencingHooks() *arbos.SequencingHooks { + return &arbos.SequencingHooks{ + PreTxFilter: s.preTxFilter, + PostTxFilter: s.postTxFilter, + DiscardInvalidTxsEarly: false, + TxErrors: []error{}, + ConditionalOptionsForTx: nil, + } +} + +func (s *EspressoSequencer) createBlock(ctx context.Context) (returnValue bool) { + nextSeqBlockNum := s.hotShotState.nextSeqBlockNum + log.Info("Attempting to sequence Espresso block", "block_num", nextSeqBlockNum) + header, err := s.hotShotState.client.FetchHeader(ctx, nextSeqBlockNum) + namespace := s.config().EspressoNamespace + if err != nil { + log.Warn("Unable to fetch header for block number, will retry", "block_num", nextSeqBlockNum) + return false + } + arbTxns, err := s.hotShotState.client.FetchTransactionsInBlock(ctx, nextSeqBlockNum, &header, namespace) + if err != nil { + log.Error("Error fetching transactions", "err", err) + return false + + } + var txes types.Transactions + for _, tx := range arbTxns.Transactions { + var out types.Transaction + if err := json.Unmarshal(tx, &out); err != nil { + log.Error("Failed to serialize") + return false + } + txes = append(txes, &out) + + } + + arbHeader := &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: l1pricing.BatchPosterAddress, + BlockNumber: header.L1Head, + Timestamp: header.Timestamp, + RequestId: nil, + L1BaseFee: nil, + // TODO: add justification https://github.com/EspressoSystems/espresso-sequencer/issues/733 + } + + hooks := s.makeSequencingHooks() + _, err = s.execEngine.SequenceTransactions(arbHeader, txes, hooks) + if err != nil { + log.Error("Sequencing error for block number", "block_num", nextSeqBlockNum, "err", err) + return false + } + + s.hotShotState.advance() + + return true + +} + +func (s *EspressoSequencer) Start(ctxIn context.Context) error { + s.StopWaiter.Start(ctxIn, s) + s.CallIteratively(func(ctx context.Context) time.Duration { + retryBlockTime := time.Now().Add(retryTime) + madeBlock := s.createBlock(ctx) + if madeBlock { + // Allow the sequencer to catch up to HotShot + return 0 + } + // If we didn't make a block, try again in a bit + return time.Until(retryBlockTime) + }) + + return nil +} + +// Required methods for the TransactionPublisher interface +func (s *EspressoSequencer) PublishTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { + return nil +} + +func (s *EspressoSequencer) CheckHealth(ctx context.Context) error { + return nil +} + +func (s *EspressoSequencer) Initialize(ctx context.Context) error { + return nil +} + +// ArbOS expects some preTxFilter, postTxFilter +func (s *EspressoSequencer) preTxFilter(_ *params.ChainConfig, _ *types.Header, _ *state.StateDB, _ *arbosState.ArbosState, _ *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + return nil +} + +func (s *EspressoSequencer) postTxFilter(_ *types.Header, _ *arbosState.ArbosState, _ *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + return nil +} diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 5a99d59c5a..02fb07bb1d 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -163,13 +163,21 @@ func CreateExecutionNode( } } - if config.Sequencer.Enable { + if config.Sequencer.Enable && !config.Sequencer.Espresso { seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer } sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher) if err != nil { return nil, err } txPublisher = sequencer + } else if config.Sequencer.Enable && config.Sequencer.Espresso { + seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer } + espressoSequencer, err := NewEspressoSequencer(execEngine, seqConfigFetcher) + if err != nil { + return nil, err + } + txPublisher = espressoSequencer + } else { if config.Forwarder.RedisUrl != "" { txPublisher = NewRedisTxForwarder(config.forwardingTarget, &config.Forwarder) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 0f17beef13..7d04e99b4d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,7 +66,10 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` - Espresso bool `koanf:"espresso" reload:"hot"` + // Espresso specific flags + Espresso bool `koanf:"espresso"` + HotShotUrl string `koanf:"hotshot-url"` + EspressoNamespace uint64 `koanf:"espresso-namespace"` } func (c *SequencerConfig) Validate() error { @@ -117,7 +120,6 @@ var TestSequencerConfig = SequencerConfig{ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultSequencerConfig.Enable, "act and post to l1 as sequencer") - f.Bool(prefix+".espresso", DefaultSequencerConfig.Espresso, "if true, l2 transactions will be fetched from espresso sequencer") f.Duration(prefix+".max-block-speed", DefaultSequencerConfig.MaxBlockSpeed, "minimum delay between blocks (sets a maximum speed of block production)") f.Uint64(prefix+".max-revert-gas-reject", DefaultSequencerConfig.MaxRevertGasReject, "maximum gas executed in a revert for the sequencer to reject the transaction instead of posting it (anti-DOS)") f.Duration(prefix+".max-acceptable-timestamp-delta", DefaultSequencerConfig.MaxAcceptableTimestampDelta, "maximum acceptable time difference between the local time and the latest L1 block's timestamp") @@ -129,6 +131,9 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") + f.Bool(prefix+".espresso", DefaultSequencerConfig.Espresso, "if true, transactions will be fetched from the espresso sequencer network") + f.String(prefix+".hotshot-url", DefaultSequencerConfig.HotShotUrl, "") + f.Uint64(prefix+".espresso-namespace", DefaultSequencerConfig.EspressoNamespace, "espresso namespace that corresponds the L2 chain") } type txQueueItem struct { @@ -713,18 +718,6 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { } func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { - usingEspresso := s.config().Espresso - if usingEspresso { - return s.createBlockEspresso(ctx) - } - return s.createBlockDefault(ctx) -} - -func (s *Sequencer) createBlockEspresso(ctx context.Context) (returnValue bool) { - return false -} - -func (s *Sequencer) createBlockDefault(ctx context.Context) (returnValue bool) { var queueItems []txQueueItem var totalBatchSize int diff --git a/go-ethereum b/go-ethereum index 2eaa15227a..d16fbb42f4 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 2eaa15227ac2391d136fceea1c901db50d2db8ae +Subproject commit d16fbb42f4208332129a7f139e4787621d4784bc diff --git a/go.mod b/go.mod index cdfae4df16..5f7e4a5af3 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -35,6 +36,7 @@ require ( github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.8.2 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/term v0.6.0 golang.org/x/tools v0.7.0 @@ -74,7 +76,6 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -220,6 +221,7 @@ require ( github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -272,6 +274,7 @@ require ( google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect ) diff --git a/go.sum b/go.sum index db81b3a07e..938b5851c0 100644 --- a/go.sum +++ b/go.sum @@ -1566,6 +1566,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=