From 66a159f0c92c0126d71419f602dff81a8a8a0af6 Mon Sep 17 00:00:00 2001 From: Diego Date: Wed, 13 Dec 2023 14:23:55 -0500 Subject: [PATCH] Working block validation - Runs Nitro with Celestia DA and block validation - Also makes some fixes in git submodules --- .gitmodules | 1 + arbnode/node.go | 1 - arbstate/inbox.go | 8 +--- cmd/replay/main.go | 67 +++++++++++++++++++++----------- das/celestia/celestia.go | 20 +++------- das/celestia/tree/hash.go | 7 +++- das/celestia/tree/merkle_tree.go | 2 +- das/celestia/tree/nmt.go | 12 +++--- das/celestia/tree/nmt_wrapper.go | 10 ++--- 9 files changed, 68 insertions(+), 60 deletions(-) diff --git a/.gitmodules b/.gitmodules index 957d87f995..6026c2bc75 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,3 +20,4 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/celestiaorg/nitro-testnode.git + branch = celestia diff --git a/arbnode/node.go b/arbnode/node.go index 82c6d90567..c15fc527f2 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -769,7 +769,6 @@ func createNodeImpl( } else if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee { return nil, errors.New("a data availability service is required for this chain, but it was not configured") } else if config.Celestia.Enable { - log.Info("Celestia AUTH token", "auth", config.Celestia.AuthToken) celestiaService, err := celestia.NewCelestiaDA(config.Celestia) if err != nil { return nil, err diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 8b56a4bdc0..5629c73142 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -287,26 +287,21 @@ func RecoverPayloadFromCelestiaBatch( return nil, err } - log.Info("Attempting to fetch data for", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight) payload, squareData, err := celestiaReader.Read(ctx, blobPointer) if err != nil { log.Error("Failed to resolve blob pointer from celestia", "err", err) return nil, err } - log.Info("Succesfully fetched payload from Celestia", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight) + //log.Info("Succesfully fetched payload from Celestia", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight) // check what we actually need from eds, make a new struct that can be filled given the preimages if sha256Preimages != nil { - log.Info("Recording Sha256 preimage for Celestia data") if squareData == nil { log.Error("squareData is nil, read from replay binary, but preimages are empty") return nil, err } - // Compute row roots for the rows that contain our data - log.Info("Computing NMT roots", "square_size", squareData.SquareSize, "blob_pointer_start", blobPointer.Start, "startRow", squareData.StartRow, "endRow", squareData.EndRow) - rowIndex := squareData.StartRow squareSize := squareData.SquareSize for _, row := range squareData.Rows { @@ -339,7 +334,6 @@ func RecoverPayloadFromCelestiaBatch( log.Error("Data Root do not match", "blobPointer data root", blobPointer.DataRoot, "calculated", dataRoot) return nil, err } - log.Info("Succesfully compute roots and populated preimage mapping", "original_dataRoot", blobPointer.DataRoot, "computed_dataRoot", dataRoot) } return payload, nil diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 7f7fc42b6c..26ab9faa79 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -6,6 +6,7 @@ package main import ( "bytes" "context" + "encoding/binary" "encoding/hex" "encoding/json" "fmt" @@ -123,8 +124,6 @@ type PreimageCelestiaReader struct { } func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer celestia.BlobPointer) ([]byte, *celestia.SquareData, error) { - // write Merkle oracle - // write NMT oracle oracle := func(hash common.Hash) ([]byte, error) { return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash) } @@ -136,43 +135,65 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer c return nil, nil, err } - squareSize := uint64((len(leaves) / 2)) + squareSize := uint64(len(leaves)) / 2 // split leaves in half to get row roots rowRoots := leaves[:squareSize] - startRow := blobPointer.Start / squareSize - endRow := (blobPointer.Start + blobPointer.SharesLength) / squareSize + // We geth the original data square size, wich is (size_of_the_extended_square / 2) + odsSize := squareSize / 2 - // get rows behind row root and shares for our blob + startRow := blobPointer.Start / odsSize + endRow := (blobPointer.Start + blobPointer.SharesLength) / odsSize + + startIndex := (blobPointer.Start - (odsSize * (startRow))) + + endIndex := (blobPointer.Start + blobPointer.SharesLength) - (odsSize * (endRow)) - startIndex := blobPointer.Start - (squareSize * (startRow)) - endIndex := (blobPointer.Start + blobPointer.SharesLength) - (squareSize * (startRow)) + // get rows behind row root and shares for our blob + rows := [][][]byte{} shares := [][]byte{} for i := startRow; i <= endRow; i++ { - rowShares, err := tree.NmtContent(oracle, rowRoots[i]) + row, err := tree.NmtContent(oracle, rowRoots[i]) if err != nil { - log.Warn("Error revealing contents behind row root", "row", i, "row root", rowRoots[i], "err", err) return nil, nil, err } + rows = append(rows, row) if startRow == endRow { - shares = append(shares, rowShares[startIndex:endIndex]...) + shares = append(shares, row[startIndex:endIndex]...) } else if i == startRow { - shares = append(shares, rowShares[startIndex:]...) + shares = append(shares, row[startIndex:odsSize]...) } else if i == endRow { - shares = append(shares, rowShares[:endRow]...) + shares = append(shares, row[:endIndex]...) } else { - shares = append(shares, rowShares[:]...) + shares = append(shares, row[:odsSize]...) } } data := []byte{} - for _, share := range shares { - // 1 byte for the leaf prefix and then 29 bytes for the namespace ID - data = append(data, share[30:]...) + sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5]) + for i, share := range shares { + // trim extra namespace + share := share[29:] + if i == 0 { + data = append(data, share[tree.NamespaceSize+5:]...) + continue + } + data = append(data, share[tree.NamespaceSize+1:]...) + } - return data, nil, nil + // TODO return Square Data + data = data[:sequenceLength] + squareData := celestia.SquareData{ + RowRoots: rowRoots, + ColumnRoots: leaves[squareSize:], + Rows: rows, + SquareSize: squareSize, + StartRow: startRow, + EndRow: endRow, + } + return data, &squareData, nil } // To generate: @@ -239,11 +260,11 @@ func main() { dasReader = &PreimageCelestiaReader{} } backend := WavmInbox{} - var keysetValidationMode = arbstate.KeysetPanicIfInvalid - if backend.GetPositionWithinMessage() > 0 { - keysetValidationMode = arbstate.KeysetDontValidate - } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, nil, dasReader, keysetValidationMode) + // var keysetValidationMode = arbstate.KeysetPanicIfInvalid + // if backend.GetPositionWithinMessage() > 0 { + // keysetValidationMode = arbstate.KeysetDontValidate + // } + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, nil, dasReader, arbstate.KeysetDontValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/das/celestia/celestia.go b/das/celestia/celestia.go index 5d3768ae0c..97b556b2d8 100644 --- a/das/celestia/celestia.go +++ b/das/celestia/celestia.go @@ -74,12 +74,12 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e log.Warn("Error creating blob", "err", err) return nil, false, err } + commitment, err := blob.CreateCommitment(dataBlob) if err != nil { log.Warn("Error creating commitment", "err", err) return nil, false, err } - log.Info("Blob to be submitted: ", "blob", []*blob.Blob{dataBlob}) height, err := c.client.Blob.Submit(ctx, []*blob.Blob{dataBlob}, openrpc.DefaultSubmitOptions()) if err != nil { log.Warn("Blob Submission error", "err", err) @@ -91,24 +91,19 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e } // how long do we have to wait to retrieve a proof? - log.Info("Retrieving Proof from Celestia", "height", height, "commitment", commitment) + //log.Info("Retrieving Proof from Celestia", "height", height, "commitment", commitment) proofs, err := c.client.Blob.GetProof(ctx, height, c.namespace, commitment) if err != nil { log.Warn("Error retrieving proof", "err", err) return nil, false, err } - log.Info("Checking for inclusion", "height", height, "commitment", commitment) included, err := c.client.Blob.Included(ctx, height, c.namespace, proofs, commitment) if err != nil { log.Warn("Error checking for inclusion", "err", err, "proof", proofs) return nil, included, err } - log.Info("Sucesfully posted data to Celestia", "height", height, "commitment", commitment) - - log.Info("Retrieving data root for height ", "height", height) - header, err := c.client.Header.GetByHeight(ctx, height) if err != nil { log.Warn("Header retrieval error", "err", err) @@ -162,7 +157,6 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e } serializedBlobPointerData := buf.Bytes() - log.Info("Succesfully serialized Blob Pointer", "height", height, "commitment", commitment, "data root", header.DataHash) log.Trace("celestia.CelestiaDA.Store", "serialized_blob_pointer", serializedBlobPointerData) return serializedBlobPointerData, included, nil @@ -179,8 +173,6 @@ type SquareData struct { } func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, *SquareData, error) { - log.Info("Requesting data from Celestia", "namespace", c.cfg.NamespaceId, "height", blobPointer.BlockHeight) - blob, err := c.client.Blob.Get(ctx, blobPointer.BlockHeight, c.namespace, blobPointer.TxCommitment) if err != nil { return nil, nil, err @@ -197,9 +189,9 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, } squareSize := uint64(eds.Width()) - startRow := blobPointer.Start / squareSize - log.Info("End Row", "blobPointer.Start", blobPointer.Start, "shares length", blobPointer.SharesLength, "squareSize", squareSize) - endRow := (blobPointer.Start + blobPointer.SharesLength) / squareSize + odsSquareSize := squareSize / 2 + startRow := blobPointer.Start / odsSquareSize + endRow := (blobPointer.Start + blobPointer.SharesLength) / odsSquareSize rows := [][][]byte{} for i := startRow; i <= endRow; i++ { @@ -215,7 +207,5 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, EndRow: endRow, } - log.Info("Succesfully fetched data from Celestia", "namespace", c.cfg.NamespaceId, "height", blobPointer.BlockHeight, "commitment", blob.Commitment) - return blob.Data, &squareData, nil } diff --git a/das/celestia/tree/hash.go b/das/celestia/tree/hash.go index ef5e135f0e..528f1b5bf0 100644 --- a/das/celestia/tree/hash.go +++ b/das/celestia/tree/hash.go @@ -21,11 +21,16 @@ func emptyHash() []byte { func leafHash(record func(bytes32, []byte), leaf []byte) []byte { preimage := append(leafPrefix, leaf...) hash := tmhash.Sum(preimage) + record(common.BytesToHash(hash), preimage) return hash } // returns tmhash(0x01 || left || right) -func innerHash(left []byte, right []byte) []byte { +func innerHash(record func(bytes32, []byte), left []byte, right []byte) []byte { + preimage := append(innerPrefix, append(left, right...)...) + hash := tmhash.Sum(preimage) + + record(common.BytesToHash(hash), preimage) return tmhash.Sum(append(innerPrefix, append(left, right...)...)) } diff --git a/das/celestia/tree/merkle_tree.go b/das/celestia/tree/merkle_tree.go index 3a8c267f61..6d46ebeef0 100644 --- a/das/celestia/tree/merkle_tree.go +++ b/das/celestia/tree/merkle_tree.go @@ -22,7 +22,7 @@ func HashFromByteSlices(record func(bytes32, []byte), items [][]byte) []byte { k := getSplitPoint(int64(len(items))) left := HashFromByteSlices(record, items[:k]) right := HashFromByteSlices(record, items[k:]) - return innerHash(left, right) + return innerHash(record, left, right) } } diff --git a/das/celestia/tree/nmt.go b/das/celestia/tree/nmt.go index 5e364858b4..6fe1e04a66 100644 --- a/das/celestia/tree/nmt.go +++ b/das/celestia/tree/nmt.go @@ -1,7 +1,6 @@ package tree import ( - "bytes" "errors" "github.com/celestiaorg/rsmt2d" @@ -40,10 +39,11 @@ func isComplete(shares [][]byte) bool { // note that a leaf has the format minNID || maxNID || hash, here hash is the hash of the left and right // (NodePrefix) || (leftMinNID || leftMaxNID || leftHash) || (rightMinNID || rightMaxNID || rightHash) func getNmtChildrenHashes(hash []byte) (leftChild, rightChild []byte) { - flagLen := NamespaceSize * 2 + hash = hash[1:] + flagLen := int(NamespaceSize * 2) sha256Len := 32 - leftChild = hash[1 : flagLen+sha256Len] - rightChild = hash[flagLen+sha256Len+1:] + leftChild = hash[:flagLen+sha256Len] + rightChild = hash[flagLen+sha256Len:] return leftChild, rightChild } @@ -54,10 +54,8 @@ func NmtContent(oracle func(bytes32) ([]byte, error), rootHash []byte) ([][]byte return nil, err } - minNid := rootHash[:NamespaceSize] - maxNid := rootHash[NamespaceSize : NamespaceSize*2] // check if the hash corresponds to a leaf - if bytes.Equal(minNid, maxNid) { + if preimage[0] == leafPrefix[0] { // returns the data with the namespace ID prepended return [][]byte{preimage[1:]}, nil } diff --git a/das/celestia/tree/nmt_wrapper.go b/das/celestia/tree/nmt_wrapper.go index 2606994792..501c21acfe 100644 --- a/das/celestia/tree/nmt_wrapper.go +++ b/das/celestia/tree/nmt_wrapper.go @@ -13,9 +13,9 @@ import ( // NMT Wrapper from celestia-app with support for populating a mapping of preimages const ( - NamespaceSize = 29 - NamespaceIDSize = 28 - NamespaceVersionMax = math.MaxUint8 + NamespaceSize uint64 = 29 + NamespaceIDSize = 28 + NamespaceVersionMax = math.MaxUint8 ) // Fulfills the rsmt2d.Tree interface and rsmt2d.TreeConstructorFn function @@ -122,10 +122,10 @@ func (w *ErasuredNamespacedMerkleTree) Push(data []byte) error { return fmt.Errorf("pushed past predetermined square size: boundary at %d index at %d %d", 2*w.squareSize, w.axisIndex, w.shareIndex) } // - if len(data) < NamespaceSize { + if len(data) < int(NamespaceSize) { return fmt.Errorf("data is too short to contain namespace ID") } - nidAndData := make([]byte, NamespaceSize+len(data)) + nidAndData := make([]byte, int(NamespaceSize)+len(data)) copy(nidAndData[NamespaceSize:], data) // use the parity namespace if the cell is not in Q0 of the extended data square if w.isQuadrantZero() {