Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore!: break out BlobTx and IndexWrapper types into separate tx package #97

Merged
merged 1 commit into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@
Package | Description
----------|---------------------------------------------------------------------------------------------------------------------
inclusion | Package inclusion contains functions to generate the blob share commitment from a given blob.
proto | Package contains proto definitions and go generated code
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[question] does this proto package actually exist? It looks like the proto definition for BlobTx and IndexWrapper are still inside proto/blob/v1

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

put another way: there is no Go package named proto

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No but here is generated code in the proto directory and that's what I was referring to. I'm happy to remove it

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought this list was just Go packages but I can see an argument for expanding this list to all directories and not just Go packages

share | Package share contains encoding and decoding logic from blobs to shares.
square | Package square implements the logic to construct the original data square based on a list of transactions.
tx | Package tx contains BlobTx and IndexWrapper types

## Installation

Expand Down
7 changes: 2 additions & 5 deletions builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"github.com/celestiaorg/go-square/v2/inclusion"
v1 "github.com/celestiaorg/go-square/v2/proto/blob/v1"
"github.com/celestiaorg/go-square/v2/share"
"github.com/celestiaorg/go-square/v2/tx"
"golang.org/x/exp/constraints"
"google.golang.org/protobuf/proto"
)
Expand Down Expand Up @@ -88,11 +89,7 @@ func (b *Builder) AppendTx(tx []byte) bool {
// AppendBlobTx attempts to allocate the blob transaction to the square. It returns false if there is not
// enough space in the square to fit the transaction.
func (b *Builder) AppendBlobTx(blobTx *share.BlobTx) bool {
iw := &v1.IndexWrapper{
Tx: blobTx.Tx,
TypeId: share.ProtoIndexWrapperTypeID,
ShareIndexes: worstCaseShareIndexes(len(blobTx.Blobs)),
}
iw := tx.NewIndexWrapper(blobTx.Tx, worstCaseShareIndexes(len(blobTx.Blobs))...)
size := proto.Size(iw)
pfbShareDiff := b.PfbCounter.Add(size)

Expand Down
5 changes: 3 additions & 2 deletions builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/celestiaorg/go-square/v2"
"github.com/celestiaorg/go-square/v2/internal/test"
"github.com/celestiaorg/go-square/v2/share"
"github.com/celestiaorg/go-square/v2/tx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
Expand Down Expand Up @@ -303,8 +304,8 @@ func TestSquareBlobPostions(t *testing.T) {
require.NoError(t, err)
txs, err := share.ParseTxs(square)
require.NoError(t, err)
for j, tx := range txs {
wrappedPFB, isWrappedPFB := share.UnmarshalIndexWrapper(tx)
for j, rawTx := range txs {
wrappedPFB, isWrappedPFB := tx.UnmarshalIndexWrapper(rawTx)
assert.True(t, isWrappedPFB)
assert.Equal(t, tt.expectedIndexes[j], wrappedPFB.ShareIndexes, j)
}
Expand Down
13 changes: 12 additions & 1 deletion inclusion/commitment.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ type MerkleRootFn func([][]byte) []byte
// [data square layout rationale]: ../../specs/src/specs/data_square_layout.md
// [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules
func CreateCommitment(blob *sh.Blob, merkleRootFn MerkleRootFn, subtreeRootThreshold int) ([]byte, error) {
shares, err := sh.SplitBlobs(blob)
shares, err := splitBlobs(blob)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -106,3 +106,14 @@ func MerkleMountainRangeSizes(totalSize, maxTreeSize uint64) ([]uint64, error) {

return treeSizes, nil
}

// SplitBlobs splits the provided blobs into shares.
func splitBlobs(blobs ...*sh.Blob) ([]sh.Share, error) {
writer := sh.NewSparseShareSplitter()
for _, blob := range blobs {
if err := writer.Write(blob); err != nil {
return nil, err
}
}
return writer.Export(), nil
}
34 changes: 20 additions & 14 deletions share/compact_shares_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func Test_processCompactShares(t *testing.T) {
t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) {
txs := generateRandomTxs(tc.txCount, tc.txSize)

shares, _, _, err := SplitTxs(txs)
shares, _, err := splitTxs(txs)
require.NoError(t, err)

parsedTxs, err := parseCompactShares(shares, SupportedShareVersions)
Expand All @@ -95,7 +95,7 @@ func Test_processCompactShares(t *testing.T) {
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
txs := generateRandomlySizedTxs(tc.txCount, tc.txSize)

txShares, _, _, err := SplitTxs(txs)
txShares, _, err := splitTxs(txs)
require.NoError(t, err)
parsedTxs, err := parseCompactShares(txShares, SupportedShareVersions)
if err != nil {
Expand All @@ -110,18 +110,9 @@ func Test_processCompactShares(t *testing.T) {
}
}

func TestAllSplit(t *testing.T) {
txs := generateRandomlySizedTxs(1000, 150)
txShares, _, _, err := SplitTxs(txs)
require.NoError(t, err)
resTxs, err := ParseTxs(txShares)
require.NoError(t, err)
assert.Equal(t, resTxs, txs)
}

func TestParseRandomOutOfContextShares(t *testing.T) {
txs := generateRandomlySizedTxs(1000, 150)
txShares, _, _, err := SplitTxs(txs)
txShares, _, err := splitTxs(txs)
require.NoError(t, err)

for i := 0; i < 1000; i++ {
Expand Down Expand Up @@ -160,7 +151,7 @@ func checkSubArray(txList [][]byte, subTxList [][]byte) bool {

func TestParseOutOfContextSharesUsingShareRanges(t *testing.T) {
txs := generateRandomlySizedTxs(1000, 150)
txShares, _, shareRanges, err := SplitTxs(txs)
txShares, shareRanges, err := splitTxs(txs)
require.NoError(t, err)

for key, r := range shareRanges {
Expand Down Expand Up @@ -228,7 +219,7 @@ func Test_parseCompactSharesErrors(t *testing.T) {
}

txs := generateRandomTxs(2, ContinuationCompactShareContentSize*4)
txShares, _, _, err := SplitTxs(txs)
txShares, _, err := splitTxs(txs)
require.NoError(t, err)
rawShares := ToBytes(txShares)

Expand Down Expand Up @@ -268,3 +259,18 @@ func generateRandomlySizedTxs(count, maxSize int) [][]byte {
}
return txs
}

func splitTxs(txs [][]byte) ([]Share, map[[sha256.Size]byte]Range, error) {
txWriter := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
for _, tx := range txs {
err := txWriter.WriteTx(tx)
if err != nil {
return nil, nil, err
}
}
shares, err := txWriter.Export()
if err != nil {
return nil, nil, err
}
return shares, txWriter.ShareRanges(0), nil
}
20 changes: 15 additions & 5 deletions share/parse_sparse_shares_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func Test_parseSparseShares(t *testing.T) {

SortBlobs(blobs)

shares, err := SplitBlobs(blobs...)
shares, err := splitBlobs(blobs...)
require.NoError(t, err)
parsedBlobs, err := parseSparseShares(shares, SupportedShareVersions)
if err != nil {
Expand All @@ -74,8 +74,8 @@ func Test_parseSparseShares(t *testing.T) {

// run the same tests using randomly sized blobs with caps of tc.blobSize
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
blobs := GenerateRandomlySizedBlobs(tc.blobCount, tc.blobSize)
shares, err := SplitBlobs(blobs...)
blobs := generateRandomlySizedBlobs(tc.blobCount, tc.blobSize)
shares, err := splitBlobs(blobs...)
require.NoError(t, err)
parsedBlobs, err := parseSparseShares(shares, SupportedShareVersions)
if err != nil {
Expand Down Expand Up @@ -154,7 +154,7 @@ func Test_parseSparseSharesWithNamespacedPadding(t *testing.T) {
func Test_parseShareVersionOne(t *testing.T) {
v1blob, err := NewV1Blob(MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize)), []byte("data"), bytes.Repeat([]byte{1}, SignerSize))
require.NoError(t, err)
v1shares, err := SplitBlobs(v1blob)
v1shares, err := splitBlobs(v1blob)
require.NoError(t, err)

parsedBlobs, err := parseSparseShares(v1shares, SupportedShareVersions)
Expand All @@ -181,7 +181,7 @@ func generateRandomBlob(dataSize int) *Blob {
return generateRandomBlobWithNamespace(ns, dataSize)
}

func GenerateRandomlySizedBlobs(count, maxBlobSize int) []*Blob {
func generateRandomlySizedBlobs(count, maxBlobSize int) []*Blob {
blobs := make([]*Blob, count)
for i := 0; i < count; i++ {
blobs[i] = generateRandomBlob(rand.Intn(maxBlobSize-1) + 1)
Expand All @@ -198,3 +198,13 @@ func GenerateRandomlySizedBlobs(count, maxBlobSize int) []*Blob {
SortBlobs(blobs)
return blobs
}

func splitBlobs(blobs ...*Blob) ([]Share, error) {
writer := NewSparseShareSplitter()
for _, blob := range blobs {
if err := writer.Write(blob); err != nil {
return nil, err
}
}
return writer.Export(), nil
}
6 changes: 3 additions & 3 deletions share/parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,17 @@ func TestParseShares(t *testing.T) {
ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
ns2 := MustNewV0Namespace(bytes.Repeat([]byte{2}, NamespaceVersionZeroIDSize))

txShares, _, _, err := SplitTxs(generateRandomTxs(2, 1000))
txShares, _, err := splitTxs(generateRandomTxs(2, 1000))
require.NoError(t, err)
txShareStart := txShares[0]
txShareContinuation := txShares[1]

blobOneShares, err := SplitBlobs(generateRandomBlobWithNamespace(ns1, 1000))
blobOneShares, err := splitBlobs(generateRandomBlobWithNamespace(ns1, 1000))
require.NoError(t, err)
blobOneStart := blobOneShares[0]
blobOneContinuation := blobOneShares[1]

blobTwoShares, err := SplitBlobs(generateRandomBlobWithNamespace(ns2, 1000))
blobTwoShares, err := splitBlobs(generateRandomBlobWithNamespace(ns2, 1000))
require.NoError(t, err)
blobTwoStart := blobTwoShares[0]
blobTwoContinuation := blobTwoShares[1]
Expand Down
19 changes: 12 additions & 7 deletions share/share_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,16 @@ func BenchmarkBlobsToShares(b *testing.B) {
b.Run(fmt.Sprintf("ShareEncoding%dBlobs%dBytes", numBlobs, size), func(b *testing.B) {
b.ReportAllocs()
blobs := test.GenerateBlobs(test.Repeat(size, numBlobs)...)

b.ResetTimer()
for i := 0; i < b.N; i++ {
// Convert blob to shares
_, err := share.SplitBlobs(blobs...)
if err != nil {
b.Fatal("Failed to split blob into shares:", err)
writer := share.NewSparseShareSplitter()
for _, blob := range blobs {
if err := writer.Write(blob); err != nil {
b.Fatal("Failed to write blob into shares:", err)
}
}
_ = writer.Export()
}
})
}
Expand All @@ -38,10 +40,13 @@ func BenchmarkSharesToBlobs(b *testing.B) {
b.Run(fmt.Sprintf("ShareDecoding%dBlobs%dBytes", numBlobs, size), func(b *testing.B) {
b.ReportAllocs()
blobs := test.GenerateBlobs(test.Repeat(size, numBlobs)...)
s, err := share.SplitBlobs(blobs...)
if err != nil {
b.Fatal("Failed to split blob into shares:", err)
writer := share.NewSparseShareSplitter()
for _, blob := range blobs {
if err := writer.Write(blob); err != nil {
b.Fatal("Failed to write blob into shares:", err)
}
}
s := writer.Export()

b.ResetTimer()
for i := 0; i < b.N; i++ {
Expand Down
5 changes: 5 additions & 0 deletions share/share_sequence_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,3 +250,8 @@ func FuzzValidSequenceLen(f *testing.F) {
assert.NoError(t, err)
})
}

// padShare returns a share padded with trailing zeros.
func padShare(share Share) (paddedShare Share) {
return fillShare(share, 0)
}
93 changes: 0 additions & 93 deletions share/share_splitting.go

This file was deleted.

Loading
Loading