Skip to content

Commit

Permalink
refactor prependNode
Browse files Browse the repository at this point in the history
  • Loading branch information
renaynay committed Oct 6, 2021
1 parent d4d9db6 commit 5edc350
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 11 deletions.
16 changes: 9 additions & 7 deletions ipld/plugin/nmt.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,13 @@ const (
// DagParserFormatName can be used when putting into the IPLD Dag
DagParserFormatName = "extended-square-row-or-col"

// ShareSize system wide default size for data shares.
ShareSize = 256

// Repeated here to avoid a dependency to the wrapping repo as this makes
// it hard to compile and use the plugin against a local ipfs version.
// TODO: plugins have config options; make this configurable instead
namespaceSize = 8
shareSize = 256
// nmtHashSize is the size of a digest created by an NMT in bytes.
nmtHashSize = 2*namespaceSize + sha256.Size
)
Expand Down Expand Up @@ -113,7 +115,7 @@ func DataSquareRowOrColumnRawInputParser(r io.Reader, _mhType uint64, _mhLen int
)

for {
namespacedLeaf := make([]byte, shareSize+namespaceSize)
namespacedLeaf := make([]byte, ShareSize+namespaceSize)
if _, err := io.ReadFull(br, namespacedLeaf); err != nil {
if err == io.EOF {
break
Expand All @@ -137,7 +139,7 @@ type nmtNodeCollector struct {

func newNodeCollector() *nmtNodeCollector {
// extendedRowOrColumnSize is hardcoded here to avoid importing
const extendedRowOrColumnSize = 2 * 128
extendedRowOrColumnSize := 2 * 128
return &nmtNodeCollector{nodes: make([]ipld.Node, 0, extendedRowOrColumnSize)}
}

Expand Down Expand Up @@ -165,10 +167,10 @@ func (n *nmtNodeCollector) visit(hash []byte, children ...[]byte) {
}

func prependNode(newNode ipld.Node, nodes []ipld.Node) []ipld.Node {
nodes = append(nodes, ipld.Node(nil))
copy(nodes[1:], nodes)
nodes[0] = newNode
return nodes
prepended := make([]ipld.Node, len(nodes)+1)
prepended[0] = newNode
copy(prepended[1:], nodes)
return prepended
}

func NmtNodeParser(block blocks.Block) (ipld.Node, error) {
Expand Down
2 changes: 1 addition & 1 deletion ipld/read.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func (sc *shareCounter) retrieveShare(
}
}

if len(data) < ShareSize {
if len(data) < plugin.ShareSize {
return
}

Expand Down
2 changes: 0 additions & 2 deletions ipld/share.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@ import "github.com/celestiaorg/nmt/namespace"
const (
// MaxSquareSize is currently the maximum size supported for unerasured data in rsmt2d.ExtendedDataSquare.
MaxSquareSize = 128
// ShareSize system wide default size for data shares.
ShareSize = 256
// NamespaceSize is a system wide size for NMT namespaces.
// TODO(Wondertan): Should be part of IPLD/NMT plugin
NamespaceSize = 8
Expand Down
2 changes: 1 addition & 1 deletion ipld/test_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func RandNamespacedShares(t *testing.T, total int) NamespacedShares {
shares := make(NamespacedShares, total)
for i := 0; i < total; i++ {
shares[i].ID = data[i]
shares[i].Share = make([]byte, NamespaceSize+ShareSize)
shares[i].Share = make([]byte, NamespaceSize+plugin.ShareSize)
copy(shares[i].Share[:NamespaceSize], data[i])
_, err := mrand.Read(shares[i].Share[NamespaceSize:]) // nolint:gosec // G404: Use of weak random number generator
require.NoError(t, err)
Expand Down
1 change: 1 addition & 0 deletions ipld/write.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ func PutData(ctx context.Context, shares [][]byte, adder ipld.NodeAdder) (*rsmt2
return eds, batchAdder.Commit()
}

// convertEDStoShares returns the original shares of the given ExtendedDataSquare.
func convertEDStoShares(eds *rsmt2d.ExtendedDataSquare) [][]byte {
origWidth := eds.Width() / 2
origShares := make([][]byte, origWidth*origWidth)
Expand Down

0 comments on commit 5edc350

Please sign in to comment.