Skip to content

Commit

Permalink
chore!: remove NamespacedShares (#822)
Browse files Browse the repository at this point in the history
Closes #721
  • Loading branch information
rootulp authored Sep 30, 2022
1 parent 56e96cf commit 906fdf7
Show file tree
Hide file tree
Showing 13 changed files with 188 additions and 167 deletions.
2 changes: 1 addition & 1 deletion app/prepare_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr
}

// erasure the data square which we use to create the data root.
eds, err := da.ExtendShares(squareSize, dataSquare)
eds, err := da.ExtendShares(squareSize, shares.ToBytes(dataSquare))
if err != nil {
app.Logger().Error(
"failure to erasure the data square while creating a proposal block",
Expand Down
2 changes: 1 addition & 1 deletion app/process_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) abci.ResponsePr
}

cacher := inclusion.NewSubtreeCacher(data.OriginalSquareSize)
eds, err := rsmt2d.ComputeExtendedDataSquare(dataSquare, appconsts.DefaultCodec(), cacher.Constructor)
eds, err := rsmt2d.ComputeExtendedDataSquare(shares.ToBytes(dataSquare), appconsts.DefaultCodec(), cacher.Constructor)
if err != nil {
logInvalidPropBlockError(app.Logger(), req.Header, "failure to erasure the data square", err)
return abci.ResponseProcessProposal{
Expand Down
29 changes: 14 additions & 15 deletions pkg/prove/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func TxInclusion(codec rsmt2d.Codec, data types.Data, txIndex uint64) (types.TxP
}

var proofs []*tmproto.NMTProof //nolint:prealloc // rarely will this contain more than a single proof
var shares [][]byte //nolint:prealloc // rarely will this contain more than a single share
var rawShares [][]byte //nolint:prealloc // rarely will this contain more than a single share
var rowRoots []tmbytes.HexBytes //nolint:prealloc // rarely will this contain more than a single root
for i, row := range rowShares {
// create an nmt to use to generate a proof
Expand All @@ -64,8 +64,7 @@ func TxInclusion(codec rsmt2d.Codec, data types.Data, txIndex uint64) (types.TxP
endLeafPos = data.OriginalSquareSize - 1
}

shares = append(shares, row[startLeafPos:endLeafPos+1]...)

rawShares = append(rawShares, shares.ToBytes(row[startLeafPos:endLeafPos+1])...)
proof, err := tree.Tree().ProveRange(int(startLeafPos), int(endLeafPos+1))
if err != nil {
return types.TxProof{}, err
Expand All @@ -85,7 +84,7 @@ func TxInclusion(codec rsmt2d.Codec, data types.Data, txIndex uint64) (types.TxP

return types.TxProof{
RowRoots: rowRoots,
Data: shares,
Data: rawShares,
Proofs: proofs,
}, nil
}
Expand Down Expand Up @@ -132,7 +131,7 @@ func txShareIndex(totalTxLen int) (index uint64) {
}

// genRowShares progessively generates data square rows from block data
func genRowShares(codec rsmt2d.Codec, data types.Data, startRow, endRow uint64) ([][][]byte, error) {
func genRowShares(codec rsmt2d.Codec, data types.Data, startRow, endRow uint64) ([][]shares.Share, error) {
if endRow > data.OriginalSquareSize {
return nil, errors.New("cannot generate row shares past the original square size")
}
Expand All @@ -141,17 +140,17 @@ func genRowShares(codec rsmt2d.Codec, data types.Data, startRow, endRow uint64)
genOrigRowShares(data, startRow, endRow),
)

encodedRowShares := make([][][]byte, len(origRowShares))
encodedRowShares := make([][]shares.Share, len(origRowShares))
for i, row := range origRowShares {
encRow, err := codec.Encode(row)
encRow, err := codec.Encode(shares.ToBytes(row))
if err != nil {
panic(err)
}
encodedRowShares[i] = append(
append(
make([][]byte, 0, len(row)+len(encRow)),
make([]shares.Share, 0, len(row)+len(encRow)),
row...,
), encRow...,
), shares.FromBytes(encRow)...,
)
}

Expand All @@ -161,7 +160,7 @@ func genRowShares(codec rsmt2d.Codec, data types.Data, startRow, endRow uint64)
// genOrigRowShares progressively generates data square rows for the original
// data square, meaning the rows only half the full square length, as there is
// not erasure data
func genOrigRowShares(data types.Data, startRow, endRow uint64) [][]byte {
func genOrigRowShares(data types.Data, startRow, endRow uint64) []shares.Share {
wantLen := (endRow + 1) * data.OriginalSquareSize
startPos := startRow * data.OriginalSquareSize

Expand Down Expand Up @@ -196,17 +195,17 @@ func genOrigRowShares(data types.Data, startRow, endRow uint64) [][]byte {
}

tailShares := shares.TailPaddingShares(int(wantLen) - len(rawShares))
rawShares = append(rawShares, tailShares.RawShares()...)
rawShares = append(rawShares, tailShares...)

return rawShares[startPos:wantLen]
}

// splitIntoRows splits shares into rows of a particular square size
func splitIntoRows(squareSize uint64, shares [][]byte) [][][]byte {
rowCount := uint64(len(shares)) / squareSize
rows := make([][][]byte, rowCount)
func splitIntoRows(squareSize uint64, s []shares.Share) [][]shares.Share {
rowCount := uint64(len(s)) / squareSize
rows := make([][]shares.Share, rowCount)
for i := uint64(0); i < rowCount; i++ {
rows[i] = shares[i*squareSize : (i+1)*squareSize]
rows[i] = s[i*squareSize : (i+1)*squareSize]
}
return rows
}
2 changes: 1 addition & 1 deletion pkg/prove/proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ func TestTxShareIndex(t *testing.T) {
// stripCompactShares strips the universal prefix (namespace, info byte, data length) and
// reserved byte from a list of compact shares and joins them into a single byte
// slice.
func stripCompactShares(compactShares [][]byte, start uint64, end uint64) (result []byte) {
func stripCompactShares(compactShares []shares.Share, start uint64, end uint64) (result []byte) {
for i := start; i <= end; i++ {
if i == 0 {
// the first compact share includes a total data length varint
Expand Down
19 changes: 12 additions & 7 deletions pkg/shares/compact_shares_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@ func TestCompactShareWriter(t *testing.T) {
rawTx, _ := MarshalDelimitedTx(tx)
w.WriteBytes(rawTx)
}
resShares := w.Export()
rawResTxs, err := parseCompactShares(resShares.RawShares())
shares := w.Export()
rawShares := ToBytes(shares)
rawResTxs, err := parseCompactShares(rawShares)
resTxs := coretypes.ToTxs(rawResTxs)
require.NoError(t, err)

Expand Down Expand Up @@ -92,8 +93,9 @@ func Test_processCompactShares(t *testing.T) {
txs := generateRandomTransaction(tc.txCount, tc.txSize)

shares := SplitTxs(txs)
rawShares := ToBytes(shares)

parsedTxs, err := parseCompactShares(shares)
parsedTxs, err := parseCompactShares(rawShares)
if err != nil {
t.Error(err)
}
Expand All @@ -109,8 +111,9 @@ func Test_processCompactShares(t *testing.T) {
txs := generateRandomlySizedTransactions(tc.txCount, tc.txSize)

shares := SplitTxs(txs)
rawShares := ToBytes(shares)

parsedTxs, err := parseCompactShares(shares)
parsedTxs, err := parseCompactShares(rawShares)
if err != nil {
t.Error(err)
}
Expand All @@ -131,7 +134,7 @@ func TestCompactShareContainsInfoByte(t *testing.T) {
css.WriteTx(tx)
}

shares := css.Export().RawShares()
shares := css.Export()
assert.Condition(t, func() bool { return len(shares) == 1 })

infoByte := shares[0][appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0]
Expand All @@ -151,7 +154,7 @@ func TestContiguousCompactShareContainsInfoByte(t *testing.T) {
css.WriteTx(tx)
}

shares := css.Export().RawShares()
shares := css.Export()
assert.Condition(t, func() bool { return len(shares) > 1 })

infoByte := shares[1][appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0]
Expand All @@ -166,6 +169,8 @@ func TestContiguousCompactShareContainsInfoByte(t *testing.T) {
func Test_parseCompactSharesReturnsErrForShareWithStartIndicatorFalse(t *testing.T) {
txs := generateRandomTransaction(2, appconsts.ContinuationCompactShareContentSize*4)
shares := SplitTxs(txs)
_, err := parseCompactShares(shares[1:]) // the second share has the message start indicator set to false
rawShares := ToBytes(shares)

_, err := parseCompactShares(rawShares[1:]) // the second share has the message start indicator set to false
assert.Error(t, err)
}
42 changes: 31 additions & 11 deletions pkg/shares/share_splitting.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package shares

import (
"bytes"
"errors"
"fmt"
"sort"
Expand All @@ -21,7 +22,7 @@ var (
// Split converts block data into encoded shares, optionally using share indexes
// that are encoded as wrapped transactions. Most use cases out of this package
// should use these share indexes and therefore set useShareIndexes to true.
func Split(data coretypes.Data, useShareIndexes bool) ([][]byte, error) {
func Split(data coretypes.Data, useShareIndexes bool) ([]Share, error) {
if data.OriginalSquareSize == 0 || !isPowerOf2(data.OriginalSquareSize) {
return nil, fmt.Errorf("square size is not a power of two: %d", data.OriginalSquareSize)
}
Expand All @@ -43,7 +44,7 @@ func Split(data coretypes.Data, useShareIndexes bool) ([][]byte, error) {
msgIndexes := ExtractShareIndexes(data.Txs)
sort.Slice(msgIndexes, func(i, j int) bool { return msgIndexes[i] < msgIndexes[j] })

var padding [][]byte
var padding []Share
if len(data.Messages.MessagesList) > 0 {
msgShareStart, _ := NextAlignedPowerOfTwo(
currentShareCount,
Expand All @@ -54,11 +55,11 @@ func Split(data coretypes.Data, useShareIndexes bool) ([][]byte, error) {
if len(evdShares) > 0 {
ns = appconsts.EvidenceNamespaceID
}
padding = namespacedPaddedShares(ns, msgShareStart-currentShareCount).RawShares()
padding = namespacedPaddedShares(ns, msgShareStart-currentShareCount)
}
currentShareCount += len(padding)

var msgShares [][]byte
var msgShares []Share
if msgIndexes != nil && int(msgIndexes[0]) < currentShareCount {
return nil, ErrUnexpectedFirstMessageShareIndex
}
Expand All @@ -68,7 +69,7 @@ func Split(data coretypes.Data, useShareIndexes bool) ([][]byte, error) {
return nil, err
}
currentShareCount += len(msgShares)
tailShares := TailPaddingShares(wantShareCount - currentShareCount).RawShares()
tailShares := TailPaddingShares(wantShareCount - currentShareCount)

// todo: optimize using a predefined slice
shares := append(append(append(append(
Expand Down Expand Up @@ -105,26 +106,26 @@ func ExtractShareIndexes(txs coretypes.Txs) []uint32 {
return msgIndexes
}

func SplitTxs(txs coretypes.Txs) [][]byte {
func SplitTxs(txs coretypes.Txs) []Share {
writer := NewCompactShareSplitter(appconsts.TxNamespaceID, appconsts.ShareVersion)
for _, tx := range txs {
writer.WriteTx(tx)
}
return writer.Export().RawShares()
return writer.Export()
}

func SplitEvidence(evd coretypes.EvidenceList) ([][]byte, error) {
func SplitEvidence(evd coretypes.EvidenceList) ([]Share, error) {
writer := NewCompactShareSplitter(appconsts.EvidenceNamespaceID, appconsts.ShareVersion)
for _, ev := range evd {
err := writer.WriteEvidence(ev)
if err != nil {
return nil, err
}
}
return writer.Export().RawShares(), nil
return writer.Export(), nil
}

func SplitMessages(cursor int, indexes []uint32, msgs []coretypes.Message, useShareIndexes bool) ([][]byte, error) {
func SplitMessages(cursor int, indexes []uint32, msgs []coretypes.Message, useShareIndexes bool) ([]Share, error) {
if useShareIndexes && len(indexes) != len(msgs) {
return nil, ErrIncorrectNumberOfIndexes
}
Expand All @@ -136,5 +137,24 @@ func SplitMessages(cursor int, indexes []uint32, msgs []coretypes.Message, useSh
writer.WriteNamespacedPaddedShares(paddedShareCount)
}
}
return writer.Export().RawShares(), nil
return writer.Export(), nil
}

var tailPaddingInfo, _ = NewInfoByte(appconsts.ShareVersion, false)

// tail is filler for all tail padded shares
// it is allocated once and used everywhere
var tailPaddingShare = append(append(
append(make([]byte, 0, appconsts.ShareSize), appconsts.TailPaddingNamespaceID...),
byte(tailPaddingInfo)),
bytes.Repeat([]byte{0}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.ShareInfoBytes)...,
)

// TailPaddingShares creates n tail padding shares.
func TailPaddingShares(n int) []Share {
shares := make([]Share, n)
for i := 0; i < n; i++ {
shares[i] = tailPaddingShare
}
return shares
}
14 changes: 7 additions & 7 deletions pkg/shares/share_splitting_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@ func TestSplitTxs(t *testing.T) {
type testCase struct {
name string
txs coretypes.Txs
want [][]byte
want []Share
}
testCases := []testCase{
{
name: "empty txs",
txs: coretypes.Txs{},
want: [][]byte{},
want: []Share{},
},
{
name: "one small tx",
txs: coretypes.Txs{coretypes.Tx{0xa}},
want: [][]uint8{
want: []Share{
append([]uint8{
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // namespace id
0x1, // info byte
Expand All @@ -37,7 +37,7 @@ func TestSplitTxs(t *testing.T) {
{
name: "two small txs",
txs: coretypes.Txs{coretypes.Tx{0xa}, coretypes.Tx{0xb}},
want: [][]uint8{
want: []Share{
append([]uint8{
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // namespace id
0x1, // info byte
Expand All @@ -53,7 +53,7 @@ func TestSplitTxs(t *testing.T) {
{
name: "one large tx that spans two shares",
txs: coretypes.Txs{bytes.Repeat([]byte{0xC}, 241)},
want: [][]uint8{
want: []Share{
append([]uint8{
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // namespace id
0x1, // info byte
Expand All @@ -72,7 +72,7 @@ func TestSplitTxs(t *testing.T) {
{
name: "one small tx then one large tx that spans two shares",
txs: coretypes.Txs{coretypes.Tx{0xd}, bytes.Repeat([]byte{0xe}, 241)},
want: [][]uint8{
want: []Share{
append([]uint8{
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // namespace id
0x1, // info byte
Expand All @@ -93,7 +93,7 @@ func TestSplitTxs(t *testing.T) {
{
name: "one large tx that spans two shares then one small tx",
txs: coretypes.Txs{bytes.Repeat([]byte{0xe}, 241), coretypes.Tx{0xd}},
want: [][]uint8{
want: []Share{
append([]uint8{
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // namespace id
0x1, // info byte
Expand Down
Loading

0 comments on commit 906fdf7

Please sign in to comment.