Skip to content

Commit

Permalink
Working block validation
Browse files Browse the repository at this point in the history
- Runs Nitro with Celestia DA and block validation
- Also makes some fixes in git submodules
  • Loading branch information
Ferret-san committed Dec 13, 2023
1 parent f1a95c5 commit 66a159f
Show file tree
Hide file tree
Showing 9 changed files with 68 additions and 60 deletions.
1 change: 1 addition & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@
[submodule "nitro-testnode"]
path = nitro-testnode
url = https://github.com/celestiaorg/nitro-testnode.git
branch = celestia
1 change: 0 additions & 1 deletion arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -769,7 +769,6 @@ func createNodeImpl(
} else if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee {
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
} else if config.Celestia.Enable {
log.Info("Celestia AUTH token", "auth", config.Celestia.AuthToken)
celestiaService, err := celestia.NewCelestiaDA(config.Celestia)
if err != nil {
return nil, err
Expand Down
8 changes: 1 addition & 7 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,26 +287,21 @@ func RecoverPayloadFromCelestiaBatch(
return nil, err
}

log.Info("Attempting to fetch data for", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight)
payload, squareData, err := celestiaReader.Read(ctx, blobPointer)
if err != nil {
log.Error("Failed to resolve blob pointer from celestia", "err", err)
return nil, err
}

log.Info("Succesfully fetched payload from Celestia", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight)
//log.Info("Succesfully fetched payload from Celestia", "batchNum", batchNum, "celestiaHeight", blobPointer.BlockHeight)

// check what we actually need from eds, make a new struct that can be filled given the preimages
if sha256Preimages != nil {
log.Info("Recording Sha256 preimage for Celestia data")
if squareData == nil {
log.Error("squareData is nil, read from replay binary, but preimages are empty")
return nil, err
}

// Compute row roots for the rows that contain our data
log.Info("Computing NMT roots", "square_size", squareData.SquareSize, "blob_pointer_start", blobPointer.Start, "startRow", squareData.StartRow, "endRow", squareData.EndRow)

rowIndex := squareData.StartRow
squareSize := squareData.SquareSize
for _, row := range squareData.Rows {
Expand Down Expand Up @@ -339,7 +334,6 @@ func RecoverPayloadFromCelestiaBatch(
log.Error("Data Root do not match", "blobPointer data root", blobPointer.DataRoot, "calculated", dataRoot)
return nil, err
}
log.Info("Succesfully compute roots and populated preimage mapping", "original_dataRoot", blobPointer.DataRoot, "computed_dataRoot", dataRoot)
}

return payload, nil
Expand Down
67 changes: 44 additions & 23 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package main
import (
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
Expand Down Expand Up @@ -123,8 +124,6 @@ type PreimageCelestiaReader struct {
}

func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer celestia.BlobPointer) ([]byte, *celestia.SquareData, error) {
// write Merkle oracle
// write NMT oracle
oracle := func(hash common.Hash) ([]byte, error) {
return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash)
}
Expand All @@ -136,43 +135,65 @@ func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer c
return nil, nil, err
}

squareSize := uint64((len(leaves) / 2))
squareSize := uint64(len(leaves)) / 2
// split leaves in half to get row roots
rowRoots := leaves[:squareSize]

startRow := blobPointer.Start / squareSize
endRow := (blobPointer.Start + blobPointer.SharesLength) / squareSize
// We geth the original data square size, wich is (size_of_the_extended_square / 2)
odsSize := squareSize / 2

// get rows behind row root and shares for our blob
startRow := blobPointer.Start / odsSize
endRow := (blobPointer.Start + blobPointer.SharesLength) / odsSize

startIndex := (blobPointer.Start - (odsSize * (startRow)))

endIndex := (blobPointer.Start + blobPointer.SharesLength) - (odsSize * (endRow))

startIndex := blobPointer.Start - (squareSize * (startRow))
endIndex := (blobPointer.Start + blobPointer.SharesLength) - (squareSize * (startRow))
// get rows behind row root and shares for our blob
rows := [][][]byte{}
shares := [][]byte{}
for i := startRow; i <= endRow; i++ {
rowShares, err := tree.NmtContent(oracle, rowRoots[i])
row, err := tree.NmtContent(oracle, rowRoots[i])
if err != nil {
log.Warn("Error revealing contents behind row root", "row", i, "row root", rowRoots[i], "err", err)
return nil, nil, err
}
rows = append(rows, row)

if startRow == endRow {
shares = append(shares, rowShares[startIndex:endIndex]...)
shares = append(shares, row[startIndex:endIndex]...)
} else if i == startRow {
shares = append(shares, rowShares[startIndex:]...)
shares = append(shares, row[startIndex:odsSize]...)
} else if i == endRow {
shares = append(shares, rowShares[:endRow]...)
shares = append(shares, row[:endIndex]...)
} else {
shares = append(shares, rowShares[:]...)
shares = append(shares, row[:odsSize]...)
}
}

data := []byte{}
for _, share := range shares {
// 1 byte for the leaf prefix and then 29 bytes for the namespace ID
data = append(data, share[30:]...)
sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5])
for i, share := range shares {
// trim extra namespace
share := share[29:]
if i == 0 {
data = append(data, share[tree.NamespaceSize+5:]...)
continue
}
data = append(data, share[tree.NamespaceSize+1:]...)

}

return data, nil, nil
// TODO return Square Data
data = data[:sequenceLength]
squareData := celestia.SquareData{
RowRoots: rowRoots,
ColumnRoots: leaves[squareSize:],
Rows: rows,
SquareSize: squareSize,
StartRow: startRow,
EndRow: endRow,
}
return data, &squareData, nil
}

// To generate:
Expand Down Expand Up @@ -239,11 +260,11 @@ func main() {
dasReader = &PreimageCelestiaReader{}
}
backend := WavmInbox{}
var keysetValidationMode = arbstate.KeysetPanicIfInvalid
if backend.GetPositionWithinMessage() > 0 {
keysetValidationMode = arbstate.KeysetDontValidate
}
inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, nil, dasReader, keysetValidationMode)
// var keysetValidationMode = arbstate.KeysetPanicIfInvalid
// if backend.GetPositionWithinMessage() > 0 {
// keysetValidationMode = arbstate.KeysetDontValidate
// }
inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, nil, dasReader, arbstate.KeysetDontValidate)
ctx := context.Background()
message, err := inboxMultiplexer.Pop(ctx)
if err != nil {
Expand Down
20 changes: 5 additions & 15 deletions das/celestia/celestia.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,12 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e
log.Warn("Error creating blob", "err", err)
return nil, false, err
}

commitment, err := blob.CreateCommitment(dataBlob)
if err != nil {
log.Warn("Error creating commitment", "err", err)
return nil, false, err
}
log.Info("Blob to be submitted: ", "blob", []*blob.Blob{dataBlob})
height, err := c.client.Blob.Submit(ctx, []*blob.Blob{dataBlob}, openrpc.DefaultSubmitOptions())
if err != nil {
log.Warn("Blob Submission error", "err", err)
Expand All @@ -91,24 +91,19 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e
}

// how long do we have to wait to retrieve a proof?
log.Info("Retrieving Proof from Celestia", "height", height, "commitment", commitment)
//log.Info("Retrieving Proof from Celestia", "height", height, "commitment", commitment)
proofs, err := c.client.Blob.GetProof(ctx, height, c.namespace, commitment)
if err != nil {
log.Warn("Error retrieving proof", "err", err)
return nil, false, err
}

log.Info("Checking for inclusion", "height", height, "commitment", commitment)
included, err := c.client.Blob.Included(ctx, height, c.namespace, proofs, commitment)
if err != nil {
log.Warn("Error checking for inclusion", "err", err, "proof", proofs)
return nil, included, err
}

log.Info("Sucesfully posted data to Celestia", "height", height, "commitment", commitment)

log.Info("Retrieving data root for height ", "height", height)

header, err := c.client.Header.GetByHeight(ctx, height)
if err != nil {
log.Warn("Header retrieval error", "err", err)
Expand Down Expand Up @@ -162,7 +157,6 @@ func (c *CelestiaDA) Store(ctx context.Context, message []byte) ([]byte, bool, e
}

serializedBlobPointerData := buf.Bytes()
log.Info("Succesfully serialized Blob Pointer", "height", height, "commitment", commitment, "data root", header.DataHash)
log.Trace("celestia.CelestiaDA.Store", "serialized_blob_pointer", serializedBlobPointerData)
return serializedBlobPointerData, included, nil

Expand All @@ -179,8 +173,6 @@ type SquareData struct {
}

func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, *SquareData, error) {
log.Info("Requesting data from Celestia", "namespace", c.cfg.NamespaceId, "height", blobPointer.BlockHeight)

blob, err := c.client.Blob.Get(ctx, blobPointer.BlockHeight, c.namespace, blobPointer.TxCommitment)
if err != nil {
return nil, nil, err
Expand All @@ -197,9 +189,9 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte,
}

squareSize := uint64(eds.Width())
startRow := blobPointer.Start / squareSize
log.Info("End Row", "blobPointer.Start", blobPointer.Start, "shares length", blobPointer.SharesLength, "squareSize", squareSize)
endRow := (blobPointer.Start + blobPointer.SharesLength) / squareSize
odsSquareSize := squareSize / 2
startRow := blobPointer.Start / odsSquareSize
endRow := (blobPointer.Start + blobPointer.SharesLength) / odsSquareSize

rows := [][][]byte{}
for i := startRow; i <= endRow; i++ {
Expand All @@ -215,7 +207,5 @@ func (c *CelestiaDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte,
EndRow: endRow,
}

log.Info("Succesfully fetched data from Celestia", "namespace", c.cfg.NamespaceId, "height", blobPointer.BlockHeight, "commitment", blob.Commitment)

return blob.Data, &squareData, nil
}
7 changes: 6 additions & 1 deletion das/celestia/tree/hash.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,16 @@ func emptyHash() []byte {
func leafHash(record func(bytes32, []byte), leaf []byte) []byte {
preimage := append(leafPrefix, leaf...)
hash := tmhash.Sum(preimage)

record(common.BytesToHash(hash), preimage)
return hash
}

// returns tmhash(0x01 || left || right)
func innerHash(left []byte, right []byte) []byte {
func innerHash(record func(bytes32, []byte), left []byte, right []byte) []byte {
preimage := append(innerPrefix, append(left, right...)...)
hash := tmhash.Sum(preimage)

record(common.BytesToHash(hash), preimage)
return tmhash.Sum(append(innerPrefix, append(left, right...)...))
}
2 changes: 1 addition & 1 deletion das/celestia/tree/merkle_tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func HashFromByteSlices(record func(bytes32, []byte), items [][]byte) []byte {
k := getSplitPoint(int64(len(items)))
left := HashFromByteSlices(record, items[:k])
right := HashFromByteSlices(record, items[k:])
return innerHash(left, right)
return innerHash(record, left, right)
}
}

Expand Down
12 changes: 5 additions & 7 deletions das/celestia/tree/nmt.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package tree

import (
"bytes"
"errors"

"github.com/celestiaorg/rsmt2d"
Expand Down Expand Up @@ -40,10 +39,11 @@ func isComplete(shares [][]byte) bool {
// note that a leaf has the format minNID || maxNID || hash, here hash is the hash of the left and right
// (NodePrefix) || (leftMinNID || leftMaxNID || leftHash) || (rightMinNID || rightMaxNID || rightHash)
func getNmtChildrenHashes(hash []byte) (leftChild, rightChild []byte) {
flagLen := NamespaceSize * 2
hash = hash[1:]
flagLen := int(NamespaceSize * 2)
sha256Len := 32
leftChild = hash[1 : flagLen+sha256Len]
rightChild = hash[flagLen+sha256Len+1:]
leftChild = hash[:flagLen+sha256Len]
rightChild = hash[flagLen+sha256Len:]
return leftChild, rightChild
}

Expand All @@ -54,10 +54,8 @@ func NmtContent(oracle func(bytes32) ([]byte, error), rootHash []byte) ([][]byte
return nil, err
}

minNid := rootHash[:NamespaceSize]
maxNid := rootHash[NamespaceSize : NamespaceSize*2]
// check if the hash corresponds to a leaf
if bytes.Equal(minNid, maxNid) {
if preimage[0] == leafPrefix[0] {
// returns the data with the namespace ID prepended
return [][]byte{preimage[1:]}, nil
}
Expand Down
10 changes: 5 additions & 5 deletions das/celestia/tree/nmt_wrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ import (
// NMT Wrapper from celestia-app with support for populating a mapping of preimages

const (
NamespaceSize = 29
NamespaceIDSize = 28
NamespaceVersionMax = math.MaxUint8
NamespaceSize uint64 = 29
NamespaceIDSize = 28
NamespaceVersionMax = math.MaxUint8
)

// Fulfills the rsmt2d.Tree interface and rsmt2d.TreeConstructorFn function
Expand Down Expand Up @@ -122,10 +122,10 @@ func (w *ErasuredNamespacedMerkleTree) Push(data []byte) error {
return fmt.Errorf("pushed past predetermined square size: boundary at %d index at %d %d", 2*w.squareSize, w.axisIndex, w.shareIndex)
}
//
if len(data) < NamespaceSize {
if len(data) < int(NamespaceSize) {
return fmt.Errorf("data is too short to contain namespace ID")
}
nidAndData := make([]byte, NamespaceSize+len(data))
nidAndData := make([]byte, int(NamespaceSize)+len(data))
copy(nidAndData[NamespaceSize:], data)
// use the parity namespace if the cell is not in Q0 of the extended data square
if w.isQuadrantZero() {
Expand Down

0 comments on commit 66a159f

Please sign in to comment.