diff --git a/array.go b/array.go index f661e9c4..d4e6fca5 100644 --- a/array.go +++ b/array.go @@ -19,14 +19,18 @@ package atree import ( + "bytes" "encoding/binary" "fmt" "math" "strings" + "sync" "github.com/fxamacker/cbor/v2" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( slabAddressSize = 8 slabIndexSize = 8 @@ -57,6 +61,37 @@ const ( // 32 is faster than 24 and 40. linearScanThreshold = 32 + + // inlined tag number size: CBOR tag number CBORTagInlinedArray or CBORTagInlinedMap + inlinedTagNumSize = 2 + + // inlined CBOR array head size: CBOR array head of 3 elements (extra data index, value id, elements) + inlinedCBORArrayHeadSize = 1 + + // inlined extra data index size: CBOR positive number encoded in 2 bytes [0, 255] (fixed-size for easy computation) + inlinedExtraDataIndexSize = 2 + + // inlined CBOR byte string head size for value ID: CBOR byte string head for byte string of 8 bytes + inlinedCBORValueIDHeadSize = 1 + + // inlined value id size: encoded in 8 bytes + inlinedValueIDSize = 8 + + // inlined array data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data index (2 bytes) [0, 255] + + // value ID index head (1 byte) + + // value ID index (8 bytes) + + // element array head (3 bytes) + inlinedArrayDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize + + arrayDataSlabElementHeadSize + + maxInlinedExtraDataIndex = 255 ) type ArraySlabHeader struct { @@ -69,6 +104,8 @@ type ArrayExtraData struct { TypeInfo TypeInfo // array type } +var _ ExtraData = &ArrayExtraData{} + // ArrayDataSlab is leaf node, implementing ArraySlab. type ArrayDataSlab struct { next SlabID @@ -78,6 +115,10 @@ type ArrayDataSlab struct { // extraData is data that is prepended to encoded slab data. // It isn't included in slab size calculation for splitting and merging. extraData *ArrayExtraData + + // inlined indicates whether this slab is stored inlined in its parent slab. + // This flag affects Encode(), ByteSize(), etc. + inlined bool } func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { @@ -142,24 +183,68 @@ type ArraySlab interface { SetExtraData(*ArrayExtraData) PopIterate(SlabStorage, ArrayPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } -// Array is tree +// Array is a heterogeneous variable-size array, storing any type of values +// into a smaller ordered list of values and provides efficient functionality +// to lookup, insert and remove elements anywhere in the array. +// +// Array elements can be stored in one or more relatively fixed-sized segments. +// +// Array can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, array with one segment +// which fits in size limit can be inlined, while arrays with multiple segments +// can't be inlined. type Array struct { Storage SlabStorage root ArraySlab + + // parentUpdater is a callback that notifies parent container when this array is modified. + // If this callback is nil, this array has no parent. Otherwise, this array has parent + // and this callback must be used when this array is changed by Append, Insert, Set, Remove, etc. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child array is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. + parentUpdater parentUpdater + + // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. + // This is needed by mutable element to properly update itself through parentUpdater. + // WARNING: since mutableElementIndex is created lazily, we need to create mutableElementIndex + // if it is nil before adding/updating elements. Range, delete, and read are no-ops on nil Go map. + // TODO: maybe optimize by replacing map to get faster updates. + mutableElementIndex map[ValueID]uint64 +} + +var bufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(int(maxThreshold)) + return e + }, +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func putBuffer(e *bytes.Buffer) { + e.Reset() + bufferPool.Put(e) } var _ Value = &Array{} +var _ mutableValueNotifier = &Array{} func (a *Array) Address() Address { return a.root.SlabID().address } -func (a *Array) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(a.SlabID()), nil -} - const arrayExtraDataLength = 1 func newArrayExtraDataFromData( @@ -208,6 +293,10 @@ func newArrayExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) return &ArrayExtraData{TypeInfo: typeInfo}, nil } +func (a *ArrayExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info] @@ -353,25 +442,26 @@ func newArrayDataSlabFromDataV0( return nil, NewDecodingError(err) } + // Compute slab size for version 1. + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable - } - - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize + slabSize += storable.ByteSize() } header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -385,23 +475,18 @@ func newArrayDataSlabFromDataV0( // newArrayDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// DataSlab Header: // -// Non-root DataSlab Header (18 bytes): -// -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func newArrayDataSlabFromDataV1( id SlabID, h head, @@ -415,6 +500,7 @@ func newArrayDataSlabFromDataV1( ) { var err error var extraData *ArrayExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -426,6 +512,20 @@ func newArrayDataSlabFromDataV1( } } + // Decode inlined slab extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // err is categorized already by newInlinedExtraDataFromData. + return nil, err + } + } + // Decode next slab ID if h.hasNextSlabID() { next, err = NewSlabIDFromRawBytes(data) @@ -450,14 +550,20 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingError(err) } + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable + slabSize += storable.ByteSize() } // Check if data reached EOF @@ -465,15 +571,9 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingErrorf("data has %d bytes of extraneous data for array data slab", len(data)-cborDec.NumBytesDecoded()) } - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize - } - header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -482,30 +582,234 @@ func newArrayDataSlabFromDataV1( header: header, elements: elements, extraData: extraData, + inlined: false, // this function is only called when slab is not inlined. }, nil } -// Encode encodes this array data slab to the given encoder. +// DecodeInlinedArrayStorable decodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, and tag contant +// as 3-element array: // -// Root DataSlab Header: +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedArrayStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedArrayDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedArrayDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d elements, got %d elements", + inlinedArrayDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*ArrayExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect *ArrayExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode array elements (CBOR array) + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + size := uint32(inlinedArrayDataSlabPrefixSize) + + elements := make([]Storable, elemCount) + for i := 0; i < int(elemCount); i++ { + storable, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") + } + elements[i] = storable + + size += storable.ByteSize() + } + + header := ArraySlabHeader{ + slabID: slabID, + size: size, + count: uint32(elemCount), + } + + return &ArrayDataSlab{ + header: header, + elements: elements, + extraData: &ArrayExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + }, + inlined: true, + }, nil +} + +// encodeAsInlined encodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, +// and tag contant as 3-element array: // -// Non-root DataSlab Header (18 bytes): +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if a.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root array data slab as inlined")) + } + + if !a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone array data slab as inlined")) + } + + extraDataIndex := inlinedTypeInfo.addArrayExtraData(a.extraData) + + if extraDataIndex > maxInlinedExtraDataIndex { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedArray, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(a.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: array elements + err = a.encodeElements(enc, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// Encode encodes this array data slab to the given encoder. // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// DataSlab Header: +// +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func (a *ArrayDataSlab) Encode(enc *Encoder) error { + if a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elementEnc := NewEncoder(elementBuf, enc.encMode) + + err := a.encodeElements(elementEnc, inlinedTypes) + if err != nil { + // err is already categorized by Array.encodeElements(). + return err + } + + err = elementEnc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + const version = 1 h, err := newArraySlabHead(version, slabArrayData) @@ -525,15 +829,18 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + // Encode head (version + flag) _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } - // Encode header + // Encode extra data if a.extraData != nil { - // Encode extra data err = a.extraData.Encode(enc) if err != nil { // err is already categorized by ArrayExtraData.Encode(). @@ -541,6 +848,15 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode inlined extra data + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + // err is already categorized by inlinedExtraData.Encode(). + return err + } + } + // Encode next slab ID if a.next != SlabIDUndefined { n, err := a.next.ToRawBytes(enc.Scratch[:]) @@ -555,6 +871,21 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode elements by copying raw bytes from previous buffer + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 @@ -568,14 +899,14 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // Write scratch content to encoder totalSize := countOffset + countSize - _, err = enc.Write(enc.Scratch[:totalSize]) + err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) if err != nil { return NewEncodingError(err) } // Encode data slab content (array of elements) for _, e := range a.elements { - err = e.Encode(enc) + err = encodeStorableAsElement(enc, e, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") @@ -590,6 +921,85 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } +func (a *ArrayDataSlab) Inlined() bool { + return a.inlined +} + +// Inlinable returns true if +// - array data slab is root slab +// - size of inlined array data slab <= maxInlineSize +func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { + if a.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + // At this point, this data slab is either + // - inlined data slab, or + // - not inlined root data slab + + // Compute inlined size from cached slab size + inlinedSize := a.header.size + if !a.inlined { + inlinedSize = inlinedSize - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + } + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + +// Inline converts not-inlined ArrayDataSlab to inlined ArrayDataSlab and removes it from storage. +func (a *ArrayDataSlab) Inline(storage SlabStorage) error { + if a.inlined { + return NewFatalError(fmt.Errorf("failed to inline ArrayDataSlab %s: it is inlined already", a.header.slabID)) + } + + id := a.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size as inlined slab. + a.header.size = a.header.size - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + + // Update data slab inlined status. + a.inlined = true + + return nil +} + +// Uninline converts an inlined ArrayDataSlab to uninlined ArrayDataSlab and stores it in storage. +func (a *ArrayDataSlab) Uninline(storage SlabStorage) error { + if !a.inlined { + return NewFatalError(fmt.Errorf("failed to un-inline ArrayDataSlab %s: it is not inlined", a.header.slabID)) + } + + // Update data slab size + a.header.size = a.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize + + // Update data slab inlined status + a.inlined = false + + // Store slab in storage + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } + + return nil +} + func (a *ArrayDataSlab) hasPointer() bool { for _, e := range a.elements { if hasPointer(e) { @@ -606,6 +1016,9 @@ func (a *ArrayDataSlab) ChildStorables() []Storable { } func (a *ArrayDataSlab) getPrefixSize() uint32 { + if a.inlined { + return inlinedArrayDataSlabPrefixSize + } if a.extraData != nil { return arrayRootDataSlabPrefixSize } @@ -644,10 +1057,12 @@ func (a *ArrayDataSlab) Set(storage SlabStorage, address Address, index uint64, a.header.size = size - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return oldElem, nil @@ -675,10 +1090,12 @@ func (a *ArrayDataSlab) Insert(storage SlabStorage, address Address, index uint6 a.header.count++ a.header.size += storable.ByteSize() - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return nil @@ -705,10 +1122,12 @@ func (a *ArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, err a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return v, nil @@ -2201,7 +2620,23 @@ func (a *ArrayMetaDataSlab) CanLendToRight(size uint32) bool { return a.header.size-arraySlabHeaderSize*n > uint32(minThreshold) } -func (a ArrayMetaDataSlab) IsData() bool { +func (a *ArrayMetaDataSlab) Inlined() bool { + return false +} + +func (a *ArrayMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (a *ArrayMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline ArrayMetaDataSlab %s: ArrayMetaDataSlab can't be inlined", a.header.slabID)) +} + +func (a *ArrayMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline ArrayMetaDataSlab %s: ArrayMetaDataSlab is already unlined", a.header.slabID)) +} + +func (a *ArrayMetaDataSlab) IsData() bool { return false } @@ -2341,21 +2776,236 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { }, nil } +// TODO: maybe optimize this +func (a *Array) incrementIndexFrom(index uint64) error { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. + for id, i := range a.mutableElementIndex { + if i >= index { + if a.mutableElementIndex[id]+1 >= a.Count() { + return NewFatalError(fmt.Errorf("failed to increment index of ValueID %s in array %s: new index exceeds array count", id, a.ValueID())) + } + a.mutableElementIndex[id]++ + } + } + return nil +} + +// TODO: maybe optimize this +func (a *Array) decrementIndexFrom(index uint64) error { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. + for id, i := range a.mutableElementIndex { + if i > index { + if a.mutableElementIndex[id] <= 0 { + return NewFatalError(fmt.Errorf("failed to decrement index of ValueID %s in array %s: new index < 0", id, a.ValueID())) + } + a.mutableElementIndex[id]-- + } + } + return nil +} + +func (a *Array) getIndexByValueID(id ValueID) (uint64, bool) { + index, exist := a.mutableElementIndex[id] + return index, exist +} + +func (a *Array) setParentUpdater(f parentUpdater) { + a.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value (child) +// so parent array (a) can be notified when child value is modified. +func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64) { + c, ok := child.(mutableValueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + // mutableElementIndex is lazily initialized. + if a.mutableElementIndex == nil { + a.mutableElementIndex = make(map[ValueID]uint64) + } + + // Index i will be updated with array operations, which affects element index. + a.mutableElementIndex[vid] = i + + c.setParentUpdater(func() (found bool, err error) { + + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return true, nil + } + + // Get latest adjusted index by child value ID. + adjustedIndex, exist := a.getIndexByValueID(vid) + if !exist { + return false, nil + } + + storable, err := a.root.Get(a.Storage, adjustedIndex) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). + return false, err + } + + // Verify retrieved element is either SlabIDStorable or Slab, with identical value ID. + switch storable := storable.(type) { + case SlabIDStorable: + sid := SlabID(storable) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := storable.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil + } + + // Set child value with parent array using updated index. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := a.set(adjustedIndex, c) + if err != nil { + return false, err + } + + // Verify overwritten storable has identical value ID. + + switch existingValueStorable := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(existingValueStorable) + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := existingValueStorable.SlabID() + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + + default: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) + } + + return true, nil + }) +} + +// notifyParentIfNeeded calls parent updater if this array (a) is a child element in another container. +func (a *Array) notifyParentIfNeeded() error { + if a.parentUpdater == nil { + return nil + } + + // If parentUpdater() doesn't find child array (a), then no-op on parent container + // and unset parentUpdater callback in child array. This can happen when child + // array is an outdated reference (removed or overwritten in parent container). + found, err := a.parentUpdater() + if err != nil { + return err + } + if !found { + a.parentUpdater = nil + } + return nil +} + func (a *Array) Get(i uint64) (Value, error) { storable, err := a.root.Get(a.Storage, i) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). return nil, err } + v, err := storable.StoredValue(a.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + // As a parent, this array (a) sets up notification callback with child + // value (v) so this array can be notified when child value is modified. + a.setCallbackWithChild(i, v, maxInlineArrayElementSize) + return v, nil } func (a *Array) Set(index uint64, value Value) (Storable, error) { + existingStorable, err := a.set(index, value) + if err != nil { + return nil, err + } + + var existingValueID ValueID + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := existingStorable.(type) { + case ArraySlab: // inlined array slab + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) + + case MapSlab: // inlined map slab + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) + + case SlabIDStorable: // uninlined slab + existingValueID = slabIDToValueID(SlabID(s)) + } + + // Remove overwritten array/map's ValueID from mutableElementIndex if: + // - new value isn't array/map, or + // - new value is array/map with different value ID + if existingValueID != emptyValueID { + newValue, ok := value.(mutableValueNotifier) + if !ok || existingValueID != newValue.ValueID() { + delete(a.mutableElementIndex, existingValueID) + } + } + + return existingStorable, nil +} + +func (a *Array) set(index uint64, value Value) (Storable, error) { existingStorable, err := a.root.Set(a.Storage, a.Address(), index, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Set(). @@ -2368,7 +3018,6 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). return nil, err } - return existingStorable, nil } if !a.root.IsData() { @@ -2382,6 +3031,30 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { } } + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by setting new child. + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + return existingStorable, nil } @@ -2398,14 +3071,87 @@ func (a *Array) Insert(index uint64, value Value) error { } if a.root.IsFull() { - // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). - return a.splitRoot() + err = a.splitRoot() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). + return err + } } + err = a.incrementIndexFrom(index) + if err != nil { + return err + } + + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (also its size) is changed by this "Insert" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by inserting new child. + err = a.notifyParentIfNeeded() + if err != nil { + return err + } + + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + return nil } func (a *Array) Remove(index uint64) (Storable, error) { + storable, err := a.remove(index) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case ArraySlab: + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + + case MapSlab: + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + + case SlabIDStorable: + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(SlabID(s)) + delete(a.mutableElementIndex, removedValueID) + } + + return storable, nil +} + +func (a *Array) remove(index uint64) (Storable, error) { storable, err := a.root.Remove(a.Storage, index) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Remove(). @@ -2424,6 +3170,18 @@ func (a *Array) Remove(index uint64) (Storable, error) { } } + err = a.decrementIndexFrom(index) + if err != nil { + return nil, err + } + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by removing element. + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return storable, nil } @@ -2534,14 +3292,75 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { return nil } +func (a *Array) Inlined() bool { + return a.root.Inlined() +} + +func (a *Array) Inlinable(maxInlineSize uint64) bool { + return a.root.Inlinable(maxInlineSize) +} + +// Storable returns array a as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { + + inlined := a.root.Inlined() + inlinable := a.root.Inlinable(maxInlineSize) + + switch { + case inlinable && inlined: + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return a.root, nil + + case !inlinable && !inlined: + // Root slab is not inlinable and was not inlined. + // Return root slab ID as storable, no size adjustment and change to storage. + return SlabIDStorable(a.SlabID()), nil + + case inlinable && !inlined: + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + err := a.root.Inline(a.Storage) + if err != nil { + return nil, err + } + + return a.root, nil + + case !inlinable && inlined: + + // Root slab is NOT inlinable and was previously inlined. + + // Uninline root slab. + err := a.root.Uninline(a.Storage) + if err != nil { + return nil, err + } + + return SlabIDStorable(a.SlabID()), nil + + default: + panic("not reachable") + } +} + var emptyArrayIterator = &ArrayIterator{} type ArrayIterator struct { - storage SlabStorage - id SlabID - dataSlab *ArrayDataSlab - index int - remainingCount int + array *Array + id SlabID + dataSlab *ArrayDataSlab + indexInArray int + indexInDataSlab int + remainingCount int + readOnly bool +} + +func (i *ArrayIterator) CanMutate() bool { + return !i.readOnly } func (i *ArrayIterator) Next() (Value, error) { @@ -2554,7 +3373,7 @@ func (i *ArrayIterator) Next() (Value, error) { return nil, nil } - slab, found, err := i.storage.Retrieve(i.id) + slab, found, err := i.array.Storage.Retrieve(i.id) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) @@ -2564,22 +3383,29 @@ func (i *ArrayIterator) Next() (Value, error) { } i.dataSlab = slab.(*ArrayDataSlab) - i.index = 0 + i.indexInDataSlab = 0 } var element Value var err error - if i.index < len(i.dataSlab.elements) { - element, err = i.dataSlab.elements[i.index].StoredValue(i.storage) + if i.indexInDataSlab < len(i.dataSlab.elements) { + element, err = i.dataSlab.elements[i.indexInDataSlab].StoredValue(i.array.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } - i.index++ + if i.CanMutate() { + // Set up notification callback in child value so + // when child value is modified parent a is notified. + i.array.setCallbackWithChild(uint64(i.indexInArray), element, maxInlineArrayElementSize) + } + + i.indexInDataSlab++ + i.indexInArray++ } - if i.index >= len(i.dataSlab.elements) { + if i.indexInDataSlab >= len(i.dataSlab.elements) { i.id = i.dataSlab.next i.dataSlab = nil } @@ -2597,13 +3423,26 @@ func (a *Array) Iterator() (*ArrayIterator, error) { } return &ArrayIterator{ - storage: a.Storage, + array: a, id: slab.SlabID(), dataSlab: slab, remainingCount: int(a.Count()), }, nil } +// ReadOnlyIterator returns readonly iterator for array elements. +// If elements of child containers are mutated, those changes +// are not guaranteed to persist. +func (a *Array) ReadOnlyIterator() (*ArrayIterator, error) { + iterator, err := a.Iterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Iterator(). + return nil, err + } + iterator.readOnly = true + return iterator, nil +} + func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { count := a.Count() @@ -2646,24 +3485,27 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato } return &ArrayIterator{ - storage: a.Storage, - id: dataSlab.SlabID(), - dataSlab: dataSlab, - index: int(index), - remainingCount: int(numberOfElements), + array: a, + id: dataSlab.SlabID(), + dataSlab: dataSlab, + indexInArray: int(startIndex), + indexInDataSlab: int(index), + remainingCount: int(numberOfElements), }, nil } -type ArrayIterationFunc func(element Value) (resume bool, err error) - -func (a *Array) Iterate(fn ArrayIterationFunc) error { - - iterator, err := a.Iterator() +func (a *Array) ReadOnlyRangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { + iterator, err := a.RangeIterator(startIndex, endIndex) if err != nil { - // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). - return err + return nil, err } + iterator.readOnly = true + return iterator, nil +} + +type ArrayIterationFunc func(element Value) (resume bool, err error) +func iterateArray(iterator *ArrayIterator, fn ArrayIterationFunc) error { for { value, err := iterator.Next() if err != nil { @@ -2684,49 +3526,55 @@ func (a *Array) Iterate(fn ArrayIterationFunc) error { } } -func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { +func (a *Array) Iterate(fn ArrayIterationFunc) error { + iterator, err := a.Iterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). + return err + } + return iterateArray(iterator, fn) +} + +func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyIterator(). + return err + } + return iterateArray(iterator, fn) +} +func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { iterator, err := a.RangeIterator(startIndex, endIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.RangeIterator(). return err } + return iterateArray(iterator, fn) +} - for { - value, err := iterator.Next() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ArrayIterator.Next(). - return err - } - if value == nil { - return nil - } - resume, err := fn(value) - if err != nil { - // Wrap err as external error (if needed) because err is returned by ArrayIterationFunc callback. - return wrapErrorAsExternalErrorIfNeeded(err) - } - if !resume { - return nil - } +func (a *Array) IterateReadOnlyRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyRangeIterator(startIndex, endIndex) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyRangeIterator(). + return err } + return iterateArray(iterator, fn) } + func (a *Array) Count() uint64 { return uint64(a.root.Header().count) } func (a *Array) SlabID() SlabID { + if a.root.Inlined() { + return SlabIDUndefined + } return a.root.SlabID() } func (a *Array) ValueID() ValueID { - sid := a.SlabID() - - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) - - return id + return slabIDToValueID(a.root.SlabID()) } func (a *Array) Type() TypeInfo { @@ -2737,7 +3585,7 @@ func (a *Array) Type() TypeInfo { } func (a *Array) String() string { - iterator, err := a.Iterator() + iterator, err := a.ReadOnlyIterator() if err != nil { return err.Error() } @@ -2831,20 +3679,30 @@ func (a *Array) PopIterate(fn ArrayPopIterationFunc) error { extraData := a.root.ExtraData() + inlined := a.root.Inlined() + + size := uint32(arrayRootDataSlabPrefixSize) + if inlined { + size = inlinedArrayDataSlabPrefixSize + } + // Set root to empty data slab a.root = &ArrayDataSlab{ header: ArraySlabHeader{ slabID: rootID, - size: arrayRootDataSlabPrefixSize, + size: size, }, extraData: extraData, + inlined: inlined, } // Save root slab - err = a.Storage.Store(a.root.SlabID(), a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + if !a.Inlined() { + err = a.Storage.Store(a.root.SlabID(), a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + } } return nil @@ -3210,8 +4068,8 @@ func (i *ArrayLoadedValueIterator) Next() (Value, error) { return nil, nil } -// LoadedValueIterator returns iterator to iterate loaded array elements. -func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded array elements. +func (a *Array) ReadOnlyLoadedValueIterator() (*ArrayLoadedValueIterator, error) { switch slab := a.root.(type) { case *ArrayDataSlab: @@ -3249,9 +4107,9 @@ func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded array values. -func (a *Array) IterateLoadedValues(fn ArrayIterationFunc) error { - iterator, err := a.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded array values. +func (a *Array) IterateReadOnlyLoadedValues(fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.LoadedValueIterator(). return err diff --git a/array_bench_test.go b/array_bench_test.go index 572abff8..b8c06cd0 100644 --- a/array_bench_test.go +++ b/array_bench_test.go @@ -355,7 +355,7 @@ func benchmarkNewArrayFromAppend(b *testing.B, initialArraySize int) { for i := 0; i < b.N; i++ { copied, _ := NewArray(storage, array.Address(), array.Type()) - _ = array.Iterate(func(value Value) (bool, error) { + _ = array.IterateReadOnly(func(value Value) (bool, error) { _ = copied.Append(value) return true, nil }) @@ -379,7 +379,7 @@ func benchmarkNewArrayFromBatchData(b *testing.B, initialArraySize int) { b.StartTimer() for i := 0; i < b.N; i++ { - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(b, err) copied, _ := NewArrayFromBatchData(storage, array.Address(), array.Type(), func() (Value, error) { diff --git a/array_debug.go b/array_debug.go index 64cf0a07..18b88556 100644 --- a/array_debug.go +++ b/array_debug.go @@ -63,16 +63,14 @@ func GetArrayStats(a *Array) (ArrayStats, error) { return ArrayStats{}, err } - if slab.IsData() { + switch slab.(type) { + case *ArrayDataSlab: dataSlabCount++ - childStorables := slab.ChildStorables() - for _, s := range childStorables { - if _, ok := s.(SlabIDStorable); ok { - storableSlabCount++ - } - } - } else { + ids := getSlabIDFromStorable(slab, nil) + storableSlabCount += uint64(len(ids)) + + case *ArrayMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -130,20 +128,14 @@ func DumpArraySlabs(a *Array) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*ArrayDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *ArrayDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*ArrayMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *ArrayMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -175,8 +167,29 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool -func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyArray(a, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { + // Verify array address (independent of array inlined status) + if address != a.Address() { + return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) + } + // Verify array value ID (independent of array inlined status) + err := verifyArrayValueID(a) + if err != nil { + return err + } + + // Verify array slab ID (dependent of array inlined status) + err = verifyArraySlabID(a) + if err != nil { + return err + } + + // Verify array extra data extraData := a.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", a.root.SlabID())) @@ -192,10 +205,18 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp )) } - computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(tic, hip, a.Storage, a.root.Header().slabID, 0, nil, []SlabID{}, []SlabID{}) + v := &arrayVerifier{ + storage: a.Storage, + address: address, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + // Verify array slabs + computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifySlab(a.root, 0, nil, []SlabID{}, []SlabID{}, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -213,134 +234,234 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp return nil } -func validArraySlab( - tic TypeInfoComparator, - hip HashInputProvider, - storage SlabStorage, - id SlabID, +type arrayVerifier struct { + storage SlabStorage + address Address + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +// verifySlab verifies ArraySlab in memory which can be inlined or not inlined. +func (v *arrayVerifier) verifySlab( + slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, ) ( elementCount uint32, _dataSlabIDs []SlabID, _nextDataSlabIDs []SlabID, err error, ) { + id := slab.Header().slabID - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return 0, nil, nil, err + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + + // Verify slab address (independent of array inlined status) + if v.address != id.address { + return 0, nil, nil, NewFatalError(fmt.Errorf("array slab address %v, got %v", v.address, id.address)) + } + + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } } if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %d has extra data", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s has extra data", id)) } // Verify that non-root slab doesn't underflow if underflowSize, underflow := slab.IsUnderflow(); underflow { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d underflows by %d bytes", id, underflowSize)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s underflows by %d bytes", id, underflowSize)) } } // Verify that slab doesn't overflow if slab.IsFull() { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d overflows", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s overflows", id)) } // Verify that header is in sync with header from parent slab if headerFromParentSlab != nil { if !reflect.DeepEqual(*headerFromParentSlab, slab.Header()) { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d header %+v is different from header %+v from parent slab", + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s header %+v is different from header %+v from parent slab", id, slab.Header(), headerFromParentSlab)) } } - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) - } + switch slab := slab.(type) { + case *ArrayDataSlab: + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) - // Verify that element count is the same as header.count - if uint32(len(dataSlab.elements)) != dataSlab.header.count { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header count %d is wrong, want %d", - id, dataSlab.header.count, len(dataSlab.elements))) - } + case *ArrayMetaDataSlab: + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(arrayDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(arrayRootDataSlabPrefixSize) + default: + return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } +} + +func (v *arrayVerifier) verifyDataSlab( + dataSlab *ArrayDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := dataSlab.header.slabID + + if !dataSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayDataSlab %s is not data", id)) + } + + // Verify that element count is the same as header.count + if uint32(len(dataSlab.elements)) != dataSlab.header.count { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", + id, dataSlab.header.count, len(dataSlab.elements))) + } + + // Verify that only root data slab can be inlined + if level > 0 && dataSlab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(arrayDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(arrayRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedArrayDataSlabPrefixSize) } - for _, e := range dataSlab.elements { + } - // Verify element size is <= inline size - if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d element %s size %d is too large, want < %d", - id, e, e.ByteSize(), maxInlineArrayElementSize)) - } + for _, e := range dataSlab.elements { + computedSize += e.ByteSize() + } + + if computedSize != dataSlab.header.size { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } - computedSize += e.ByteSize() + dataSlabIDs = append(dataSlabIDs, id) + + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + } + + for _, e := range dataSlab.elements { + + value, err := e.StoredValue(v.storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, + fmt.Sprintf( + "data slab %s element %s can't be converted to value", + id, e, + )) } - if computedSize != dataSlab.header.size { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) + // Verify element size <= inline size + if e.ByteSize() > uint32(maxInlineArrayElementSize) { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", + id, e, e.ByteSize(), maxInlineArrayElementSize)) } - dataSlabIDs = append(dataSlabIDs, id) + switch e := e.(type) { + case SlabIDStorable: + // Verify not-inlined element > inline size, or can't be inlined + if v.inlineEnabled { + err = verifyNotInlinedValueStatusAndSize(value, uint32(maxInlineArrayElementSize)) + if err != nil { + return 0, nil, nil, err + } + } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } } // Verify element - for _, e := range dataSlab.elements { - v, err := e.StoredValue(storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, - fmt.Sprintf( - "data slab %s element %s can't be converted to value", - id, e, - )) - } - err = ValidValue(v, nil, tic, hip) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). - return 0, nil, nil, fmt.Errorf( - "data slab %d element %s isn't valid: %w", - id, e, err, - ) - } + err = verifyValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyValue(). + return 0, nil, nil, fmt.Errorf( + "data slab %s element %q isn't valid: %w", + id, e, err, + ) } + } - return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil + return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil +} + +func (v *arrayVerifier) verifyMetaDataSlab( + metaSlab *ArrayMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := metaSlab.header.slabID + + if metaSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s is data", id)) } - meta, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) + if metaSlab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s shouldn't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, NewFatalError(fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } // Verify childrenCountSum - if len(meta.childrenCountSum) != len(meta.childrenHeaders) { + if len(metaSlab.childrenCountSum) != len(metaSlab.childrenHeaders) { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d has %d childrenCountSum, want %d", - id, len(meta.childrenCountSum), len(meta.childrenHeaders))) + id, len(metaSlab.childrenCountSum), len(metaSlab.childrenHeaders))) } computedCount := uint32(0) @@ -348,48 +469,54 @@ func validArraySlab( // If we use range, then h would be a temporary object and we'd be passing address of // temporary object to function, which can lead to bugs depending on usage. It's not a bug // with the current usage but it's less fragile to future changes by not using range here. - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] + + childSlab, err := getArraySlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return 0, nil, nil, err + } // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(tic, hip, storage, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, err } computedCount += count // Verify childrenCountSum - if meta.childrenCountSum[i] != computedCount { + if metaSlab.childrenCountSum[i] != computedCount { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d childrenCountSum[%d] is %d, want %d", - id, i, meta.childrenCountSum[i], computedCount)) + id, i, metaSlab.childrenCountSum[i], computedCount)) } } // Verify that aggregated element count is the same as header.count - if computedCount != meta.header.count { + if computedCount != metaSlab.header.count { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header count %d is wrong, want %d", - id, meta.header.count, computedCount)) + id, metaSlab.header.count, computedCount)) } // Verify that aggregated header size + slab prefix is the same as header.size - computedSize := uint32(len(meta.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } - return meta.header.count, dataSlabIDs, nextDataSlabIDs, nil + return metaSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -// ValidArraySerialization traverses array tree and verifies serialization +// VerifyArraySerialization traverses array tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidArraySerialization( +func VerifyArraySerialization( a *Array, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -397,149 +524,137 @@ func ValidArraySerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validArraySlabSerialization( - a.Storage, - a.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + v := &serializationVerifier{ + storage: a.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyArraySlab(a.root) } -func validArraySlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +type serializationVerifier struct { + storage SlabStorage + cborDecMode cbor.DecMode + cborEncMode cbor.EncMode + decodeStorable StorableDecoder + decodeTypeInfo TypeInfoDecoder + compare StorableComparator +} - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return err - } +// verifySlab verifies serialization of not inlined ArraySlab. +func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { + + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := Encode(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined compact map because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // compact map extra data section for reuse, and only compact map field + // values are encoded in non-extra data section. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) - } - - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError(fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError(fmt.Errorf("slab %s encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) } + } + switch slab := slab.(type) { + case *ArrayDataSlab: decodedDataSlab, ok := decodedSlab.(*ArrayDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayDataSlab", id)) } // Compare slabs - err = arrayDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.arrayDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by arrayDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) - } + case *ArrayMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) - } + // Compare slabs + err = v.arrayMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = arrayMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + childSlab, err := getArraySlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validArraySlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlabSerialization(). - return err + // Verify child slabs + err = v.verifyArraySlab(childSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyArraySlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } } -func arrayDataSlabEqual( - expected *ArrayDataSlab, - actual *ArrayDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) arrayDataSlabEqual(expected, actual *ArrayDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -548,6 +663,11 @@ func arrayDataSlabEqual( return err } + // Compare inlined status + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -567,34 +687,49 @@ func arrayDataSlabEqual( for i := 0; i < len(expected.elements); i++ { ee := expected.elements[i] ae := actual.elements[i] - if !compare(ee, ae) { - return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) - } - // Compare nested element - if idStorable, ok := ee.(SlabIDStorable); ok { + switch ee := ee.(type) { - ev, err := idStorable.StoredValue(storage) + case SlabIDStorable: // Compare not-inlined element + if !v.compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } + + ev, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - return ValidValueSerialization( - ev, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyValue(ev) + + case *ArrayDataSlab: // Compare inlined array + ae, ok := ae.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as inlined *ArrayDataSlab, actual %T", ae)) + } + + return v.arrayDataSlabEqual(ee, ae) + + case *MapDataSlab: // Compare inlined map + ae, ok := ae.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as inlined *MapDataSlab, actual %T", ae)) + } + + return v.mapDataSlabEqual(ee, ae) + + default: + if !v.compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } } } return nil } -func arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { +func (v *serializationVerifier) arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -638,39 +773,19 @@ func arrayExtraDataEqual(expected, actual *ArrayExtraData) error { return nil } -func ValidValueSerialization( - value Value, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyValue(value Value) error { - switch v := value.(type) { + switch value := value.(type) { case *Array: - return ValidArraySerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyArraySlab(value.root) + case *OrderedMap: - return ValidMapSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyMapSlab(value.root) } return nil } -func computeSlabSize(data []byte) (int, error) { +func computeSize(data []byte) (int, error) { if len(data) < versionAndFlagSize { return 0, NewDecodingError(fmt.Errorf("data is too short")) } @@ -680,20 +795,23 @@ func computeSlabSize(data []byte) (int, error) { return 0, NewDecodingError(err) } - slabExtraDataSize, err := getExtraDataSize(h, data[versionAndFlagSize:]) + slabExtraDataSize, inlinedSlabExtrDataSize, err := getExtraDataSizes(h, data[versionAndFlagSize:]) if err != nil { return 0, err } - // Computed slab size (slab header size): - // - excludes slab extra data size - // - adds next slab ID for non-root data slab if not encoded - size := len(data) - slabExtraDataSize - isDataSlab := h.getSlabArrayType() == slabArrayData || h.getSlabMapType() == slabMapData || h.getSlabMapType() == slabMapCollisionGroup + // computed size (slab header size): + // - excludes slab extra data size + // - excludes inlined slab extra data size + // - adds next slab ID for non-root data slab if not encoded + size := len(data) + size -= slabExtraDataSize + size -= inlinedSlabExtrDataSize + if !h.isRoot() && isDataSlab && !h.hasNextSlabID() { size += slabIDSize } @@ -701,15 +819,214 @@ func computeSlabSize(data []byte) (int, error) { return size, nil } -func getExtraDataSize(h head, data []byte) (int, error) { +func hasInlinedComposite(data []byte) (bool, error) { + if len(data) < versionAndFlagSize { + return false, NewDecodingError(fmt.Errorf("data is too short")) + } + + h, err := newHeadFromData(data[:versionAndFlagSize]) + if err != nil { + return false, NewDecodingError(err) + } + + if !h.hasInlinedSlabs() { + return false, nil + } + + data = data[versionAndFlagSize:] + + // Skip slab extra data if needed. + if h.isRoot() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return false, NewDecodingError(err) + } + + data = data[len(b):] + } + + // Parse inlined extra data to find compact map extra data. + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + count, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return false, NewDecodingError(err) + } + if tagNum == CBORTagInlinedCompactMapExtraData { + return true, nil + } + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + } + + return false, nil +} + +func getExtraDataSizes(h head, data []byte) (int, int, error) { + + var slabExtraDataSize, inlinedSlabExtraDataSize int + if h.isRoot() { dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) b, err := dec.DecodeRawBytes() if err != nil { - return 0, NewDecodingError(err) + return 0, 0, NewDecodingError(err) } - return len(b), nil + slabExtraDataSize = len(b) + + data = data[slabExtraDataSize:] + } + + if h.hasInlinedSlabs() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + inlinedSlabExtraDataSize = len(b) + } + + return slabExtraDataSize, inlinedSlabExtraDataSize, nil +} + +// getSlabIDFromStorable appends slab IDs from storable to ids. +// This function traverses child storables. If child storable +// is inlined map or array, inlined map or array is also traversed. +func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { + childStorables := storable.ChildStorables() + + for _, e := range childStorables { + switch e := e.(type) { + case SlabIDStorable: + ids = append(ids, SlabID(e)) + + case *ArrayDataSlab: + ids = getSlabIDFromStorable(e, ids) + + case *MapDataSlab: + ids = getSlabIDFromStorable(e, ids) + } + } + + return ids +} + +// verifyArrayValueID verifies array ValueID is always the same as +// root slab's SlabID indepedent of array's inlined status. +func verifyArrayValueID(a *Array) error { + rootSlabID := a.root.Header().slabID + + vid := a.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) } - return 0, nil + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// verifyArraySlabID verifies array SlabID is either empty for inlined array, or +// same as root slab's SlabID for not-inlined array. +func verifyArraySlabID(a *Array) error { + sid := a.SlabID() + + if a.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := a.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) + } + + return nil +} + +func verifyNotInlinedValueStatusAndSize(v Value, maxInlineSize uint32) error { + + switch v := v.(type) { + case *Array: + // Verify not-inlined array's inlined status + if v.root.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined array %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined array size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - arrayRootDataSlabPrefixSize + inlinedArrayDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined array root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } + } + + case *OrderedMap: + // Verify not-inlined map's inlined status + if v.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined map %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined map size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - mapRootDataSlabPrefixSize + inlinedMapDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined map root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } + } + } + + return nil } diff --git a/array_test.go b/array_test.go index ad08ac9d..588f475e 100644 --- a/array_test.go +++ b/array_test.go @@ -29,18 +29,39 @@ import ( "github.com/stretchr/testify/require" ) -func verifyEmptyArray( +func testEmptyArrayV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, array *Array, ) { - verifyArray(t, storage, typeInfo, address, array, nil, false) + testArrayV0(t, storage, typeInfo, address, array, nil, false) } -// verifyArray verifies array elements and validates serialization and in-memory slab tree. -func verifyArray( +func testEmptyArray( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, +) { + testArray(t, storage, typeInfo, address, array, nil, false) +} + +func testArrayV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + values []Value, + hasNestedArrayMapElement bool, +) { + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, false) +} + +func testArray( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -48,40 +69,54 @@ func verifyArray( array *Array, values []Value, hasNestedArrayMapElement bool, +) { + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, true) +} + +// _testArray tests array elements, serialization, and in-memory slab tree. +func _testArray( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + expectedValues arrayValue, + hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, array.Type())) require.Equal(t, address, array.Address()) - require.Equal(t, uint64(len(values)), array.Count()) + require.Equal(t, uint64(len(expectedValues)), array.Count()) var err error // Verify array elements - for i, v := range values { - e, err := array.Get(uint64(i)) + for i, expected := range expectedValues { + actual, err := array.Get(uint64(i)) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify array elements by iterator i := 0 - err = array.Iterate(func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, values[i], v) + err = array.IterateReadOnly(func(v Value) (bool, error) { + valueEqual(t, expectedValues[i], v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(values), i) + require.Equal(t, len(expectedValues), i) // Verify in-memory slabs - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintArray(array) } require.NoError(t, err) // Verify slab serializations - err = ValidArraySerialization( + err = VerifyArraySerialization( array, storage.cborDecMode, storage.cborEncMode, @@ -116,7 +151,7 @@ func verifyArray( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(values) == 0 { + if len(expectedValues) == 0 { // Verify slab count for empty array require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -160,7 +195,7 @@ func TestArrayAppendAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArraySetAndGet(t *testing.T) { @@ -183,7 +218,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -195,10 +230,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs splitting and root slab reassignment caused by Set operation. @@ -229,7 +264,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -241,10 +276,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs merging and root slab reassignment caused by Set operation. @@ -276,7 +311,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -288,10 +323,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -326,7 +361,7 @@ func TestArraySetAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -354,7 +389,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-last", func(t *testing.T) { @@ -376,7 +411,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert", func(t *testing.T) { @@ -409,7 +444,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -443,7 +478,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -481,7 +516,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -491,11 +526,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, arraySize-i-1, array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[i+1:], false) + testArray(t, storage, typeInfo, address, array, values[i+1:], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove-last", func(t *testing.T) { @@ -528,7 +563,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -538,11 +573,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(i), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[:i], false) + testArray(t, storage, typeInfo, address, array, values[:i], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove", func(t *testing.T) { @@ -578,7 +613,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + valueEqual(t, v, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -591,13 +626,13 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(len(values)), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } } require.Equal(t, arraySize/2, len(values)) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -629,7 +664,7 @@ func TestArrayRemove(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBounds) require.ErrorAs(t, userError, &indexOutOfBounds) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -644,7 +679,7 @@ func TestArrayIterate(t *testing.T) { require.NoError(t, err) i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { i++ return true, nil }) @@ -671,7 +706,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -708,7 +743,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -741,7 +776,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -777,7 +812,7 @@ func TestArrayIterate(t *testing.T) { i := uint64(0) j := uint64(1) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(j), v) i++ j += 2 @@ -803,7 +838,7 @@ func TestArrayIterate(t *testing.T) { } i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, nil } @@ -832,7 +867,7 @@ func TestArrayIterate(t *testing.T) { testErr := errors.New("test") i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, testErr } @@ -847,6 +882,67 @@ func TestArrayIterate(t *testing.T) { require.Equal(t, count/2, i) }) + + t.Run("mutation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + sizeBeforeMutation := array.root.Header().size + + i := 0 + newElement := Uint64Value(0) + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + err := childArray.Append(newElement) + require.NoError(t, err) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) } func testArrayIterateRange(t *testing.T, array *Array, values []Value) { @@ -858,7 +954,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { count := array.Count() // If startIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(count+1, count+1, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(count+1, count+1, func(v Value) (bool, error) { i++ return true, nil }) @@ -871,7 +967,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { require.Equal(t, uint64(0), i) // If endIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(0, count+1, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(0, count+1, func(v Value) (bool, error) { i++ return true, nil }) @@ -883,7 +979,7 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { // If startIndex > endIndex, IterateRange returns InvalidSliceIndexError if count > 0 { - err = array.IterateRange(1, 0, func(v Value) (bool, error) { + err = array.IterateReadOnlyRange(1, 0, func(v Value) (bool, error) { i++ return true, nil }) @@ -898,8 +994,8 @@ func testArrayIterateRange(t *testing.T, array *Array, values []Value) { for startIndex := uint64(0); startIndex <= count; startIndex++ { for endIndex := startIndex; endIndex <= count; endIndex++ { i = uint64(0) - err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, v, values[int(startIndex+i)]) + err = array.IterateReadOnlyRange(startIndex, endIndex, func(v Value) (bool, error) { + valueEqual(t, v, values[int(startIndex+i)]) i++ return true, nil }) @@ -980,7 +1076,7 @@ func TestArrayIterateRange(t *testing.T) { startIndex := uint64(1) endIndex := uint64(5) count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { if i == count/2 { return false, nil } @@ -1009,7 +1105,7 @@ func TestArrayIterateRange(t *testing.T) { startIndex := uint64(1) endIndex := uint64(5) count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { if i == count/2 { return false, testErr } @@ -1023,6 +1119,70 @@ func TestArrayIterateRange(t *testing.T) { require.Equal(t, testErr, externalError.Unwrap()) require.Equal(t, count/2, i) }) + + t.Run("mutation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + sizeBeforeMutation := array.root.Header().size + + i := 0 + startIndex := uint64(1) + endIndex := array.Count() - 2 + newElement := Uint64Value(0) + err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + err := childArray.Append(newElement) + require.NoError(t, err) + + index := int(startIndex) + i + expectedChildArrayValues, ok := expectedValues[index].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[index] = expectedChildArrayValues + + i++ + + require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, endIndex-startIndex, uint64(i)) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) } func TestArrayRootSlabID(t *testing.T) { @@ -1100,10 +1260,10 @@ func TestArraySetRandomValues(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArrayInsertRandomValues(t *testing.T) { @@ -1133,7 +1293,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-last", func(t *testing.T) { @@ -1158,7 +1318,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-random", func(t *testing.T) { @@ -1186,7 +1346,7 @@ func TestArrayInsertRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -1216,7 +1376,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) // Remove n elements at random index for i := uint64(0); i < arraySize; i++ { @@ -1227,7 +1387,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) + valueEqual(t, values[k], existingValue) copy(values[k:], values[k+1:]) values = values[:len(values)-1] @@ -1238,7 +1398,7 @@ func TestArrayRemoveRandomValues(t *testing.T) { } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) } func testArrayAppendSetInsertRemoveRandomValues( @@ -1295,7 +1455,7 @@ func testArrayAppendSetInsertRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldV, existingValue) + valueEqual(t, oldV, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1325,7 +1485,7 @@ func testArrayAppendSetInsertRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) + valueEqual(t, values[k], existingValue) copy(values[k:], values[k+1:]) values = values[:len(values)-1] @@ -1358,10 +1518,10 @@ func TestArrayAppendSetInsertRemoveRandomValues(t *testing.T) { address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } -func TestArrayNestedArrayMap(t *testing.T) { +func TestArrayWithChildArrayMap(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -1370,143 +1530,152 @@ func TestArrayNestedArrayMap(t *testing.T) { const arraySize = 4096 - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create a list of arrays with 2 elements. - nestedArrays := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) - require.NoError(t, err) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // Create child arrays with 1 element. + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, childTypeInfo) require.NoError(t, err) - require.True(t, nested.root.IsData()) - - nestedArrays[i] = nested - } + v := Uint64Value(i) - typeInfo := testTypeInfo{42} + err = childArray.Append(v) + require.NoError(t, err) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.True(t, childArray.root.IsData()) + require.False(t, childArray.Inlined()) - for _, a := range nestedArrays { - err := array.Append(a) + err = array.Append(childArray) require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues[i] = arrayValue{v} } - verifyArray(t, storage, typeInfo, address, array, nestedArrays, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big array", func(t *testing.T) { const arraySize = 4096 + const childArraySize = 40 - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Create child arrays with 40 element. + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) + childArray, err := NewArray(storage, address, childTypeInfo) require.NoError(t, err) - for i := uint64(0); i < 40; i++ { - err := nested.Append(Uint64Value(math.MaxUint64)) - require.NoError(t, err) - } + expectedChildArrayValues := make([]Value, childArraySize) + for i := uint64(0); i < childArraySize; i++ { + v := Uint64Value(math.MaxUint64) - require.False(t, nested.root.IsData()) + err := childArray.Append(v) + require.NoError(t, err) - values[i] = nested - } + expectedChildArrayValues[i] = v + } - typeInfo := testTypeInfo{42} + require.False(t, childArray.root.IsData()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) + err = array.Append(childArray) require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedValues[i] = arrayValue(expectedChildArrayValues) } - verifyArray(t, storage, typeInfo, address, array, values, true) + testArray(t, storage, typeInfo, address, array, expectedValues, true) }) t.Run("small map", func(t *testing.T) { const arraySize = 4096 - nestedTypeInfo := testTypeInfo{43} - + typeInfo := testTypeInfo{42} + childArayTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - nestedMaps := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childArayTypeInfo) require.NoError(t, err) - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) + k := Uint64Value(i) + v := Uint64Value(i * 2) + storable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, storable) - require.True(t, nested.root.IsData()) - - nestedMaps[i] = nested - } - - typeInfo := testTypeInfo{42} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.True(t, childMap.root.IsData()) - for _, a := range nestedMaps { - err := array.Append(a) + err = array.Append(childMap) require.NoError(t, err) + + expectedValues[i] = mapValue{k: v} } - verifyArray(t, storage, typeInfo, address, array, nestedMaps, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) t.Run("big map", func(t *testing.T) { const arraySize = 4096 + typeInfo := testTypeInfo{42} nestedTypeInfo := testTypeInfo{43} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) require.NoError(t, err) + expectedChildMapValues := mapValue{} for i := uint64(0); i < 25; i++ { - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) + k := Uint64Value(i) + v := Uint64Value(i * 2) + + storable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, storable) - } - - require.False(t, nested.root.IsData()) - values[i] = nested - } + expectedChildMapValues[k] = v + } - typeInfo := testTypeInfo{42} + require.False(t, childMap.root.IsData()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) + err = array.Append(childMap) require.NoError(t, err) + + expectedValues[i] = expectedChildMapValues } - verifyArray(t, storage, typeInfo, address, array, values, true) + testArray(t, storage, typeInfo, address, array, expectedValues, true) }) } @@ -1552,7 +1721,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArrayV0(t, storage, typeInfo, address, array) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1598,19 +1767,20 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage, arraySlabID) require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArrayV0(t, storage, typeInfo, address, array, values, false) }) t.Run("metadataslab as root", func(t *testing.T) { storage := newTestBasicStorage(t) typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} address := Address{1, 2, 3, 4, 5, 6, 7, 8} arraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} arrayDataSlabID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} arrayDataSlabID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - nestedArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + childArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} const arraySize = 20 values := make([]Value, arraySize) @@ -1618,16 +1788,15 @@ func TestArrayDecodeV0(t *testing.T) { values[i] = NewStringValue(strings.Repeat("a", 22)) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - nestedArray.root.SetSlabID(nestedArraySlabID) + childArray, err := NewArray(storage, address, childTypeInfo) + childArray.root.SetSlabID(childArraySlabID) require.NoError(t, err) - err = nestedArray.Append(Uint64Value(0)) + v := Uint64Value(0) + err = childArray.Append(v) require.NoError(t, err) - values[arraySize-1] = nestedArray + values[arraySize-1] = arrayValue{v} slabData := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] @@ -1705,7 +1874,7 @@ func TestArrayDecodeV0(t *testing.T) { }, // (data slab) next: 0, data: [0] - nestedArraySlabID: { + childArraySlabID: { // extra data // version 0x00, @@ -1734,7 +1903,7 @@ func TestArrayDecodeV0(t *testing.T) { array, err := NewArrayWithRootID(storage2, arraySlabID) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array, values, false) + testArrayV0(t, storage2, typeInfo, address, array, values, false) }) } @@ -1779,10 +1948,10 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyEmptyArray(t, storage2, typeInfo, address, array2) + testEmptyArray(t, storage2, typeInfo, address, array2) }) - t.Run("dataslab as root", func(t *testing.T) { + t.Run("root dataslab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1825,10 +1994,10 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + testArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("has pointers", func(t *testing.T) { + t.Run("root metadata slab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1836,35 +2005,19 @@ func TestArrayEncodeDecode(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - const arraySize = 20 + const arraySize = 18 values := make([]Value, arraySize) - for i := uint64(0); i < arraySize-1; i++ { + for i := uint64(0); i < arraySize; i++ { v := NewStringValue(strings.Repeat("a", 22)) values[i] = v + err := array.Append(v) require.NoError(t, err) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - require.NoError(t, err) - - err = nestedArray.Append(Uint64Value(0)) - require.NoError(t, err) - - values[arraySize-1] = nestedArray - - err = array.Append(nestedArray) - require.NoError(t, err) - - require.Equal(t, uint64(arraySize), array.Count()) - require.Equal(t, uint64(1), nestedArray.Count()) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -1892,8 +2045,8 @@ func TestArrayEncodeDecode(t *testing.T) { 0x00, 0xe4, // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x0b, - 0x01, 0x0e, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, }, // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] @@ -1918,14 +2071,14 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + // (data slab) data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] id3: { // version 0x10, // array data slab flag - 0x40, + 0x00, // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x0b, + 0x99, 0x00, 0x09, // CBOR encoded array elements 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, @@ -1936,27 +2089,6 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - - // (data slab) next: 0, data: [0] - id4: { - // version - 0x10, - // extra data flag - 0x80, - - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, }, } @@ -1966,7 +2098,6 @@ func TestArrayEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], m[id1]) require.Equal(t, expected[id2], m[id2]) require.Equal(t, expected[id3], m[id3]) - require.Equal(t, expected[id4], m[id4]) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, m) @@ -1975,1741 +2106,5337 @@ func TestArrayEncodeDecode(t *testing.T) { array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + testArray(t, storage2, typeInfo, address, array2, values, false) }) -} - -func TestArrayEncodeDecodeRandomValues(t *testing.T) { - - SetThreshold(256) - defer SetThreshold(1024) - const opCount = 8192 - - r := newRand(t) + // Same type info is reused. + t.Run("root data slab, inlined child array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - verifyArray(t, storage, typeInfo, address, array, values, false) + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) - // Decode data to new storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + err = childArray.Append(v) + require.NoError(t, err) - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + err = parentArray.Append(childArray) + require.NoError(t, err) - verifyArray(t, storage2, typeInfo, address, array2, values, false) -} + expectedValues[i] = arrayValue{v} + } -func TestEmptyArray(t *testing.T) { + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - t.Parallel() + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestBasicStorage(t) + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - t.Run("get", func(t *testing.T) { - s, err := array.Get(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } - t.Run("set", func(t *testing.T) { - s, err := array.Set(0, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - t.Run("insert", func(t *testing.T) { - err := array.Insert(1, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - t.Run("remove", func(t *testing.T) { - s, err := array.Remove(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) - t.Run("iterate", func(t *testing.T) { - i := uint64(0) - err := array.Iterate(func(v Value) (bool, error) { - i++ - return true, nil - }) + // Different type info are encoded. + t.Run("root data slab, inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), i) - }) - t.Run("count", func(t *testing.T) { - count := array.Count() - require.Equal(t, uint64(0), count) + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = childArray.Append(v) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, array.Type())) + // Same type info is reused. + t.Run("root data slab, multiple levels of inlined array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + err = gchildArray.Append(v) + require.NoError(t, err) + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) }) - // TestArrayEncodeDecode/empty tests empty array encoding and decoding -} + t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + typeInfo4 := testTypeInfo{45} + typeInfo5 := testTypeInfo{46} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} -func TestArrayStringElement(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Parallel() + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + var ti TypeInfo + if i == 0 { + ti = typeInfo2 + } else { + ti = typeInfo4 + } + gchildArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = gchildArray.Append(v) + require.NoError(t, err) + + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo5 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x84, + // typeInfo3 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // typeInfo2 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // typeInfo5 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2e, + // typeInfo4 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2d, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x03, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + for i := 0; i < 2; i++ { + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + for i := 0; i < 2; i++ { + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + v := Uint64Value(i) + + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("has pointers", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + const childArraySize = 5 + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + expectedChildArrayValues[i] = v + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue(expectedChildArrayValues)) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(5), childArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + id3: { + // version (no next slab ID, no inlined slabs) + 0x10, + // array data slab flag + 0x40, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // (data slab) next: 0, data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("has pointers in inlined slab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + const gchildArraySize = 5 + + expectedGChildArrayValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedGChildArrayValues[i] = v + } + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{ + arrayValue(expectedGChildArrayValues), + }) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(5), gchildArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:287 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x1f, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [SlabID(...)]] + id3: { + // version (no next slab ID, has inlined slabs) + 0x11, + // array data slab flag (has pointer) + 0x40, + + // inlined array of extra data + 0x81, + // type info + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + }, + + // (data slab) data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) +} + +func TestArrayEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const opCount = 8192 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + + testArray(t, storage, typeInfo, address, array, values, false) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, values, false) +} + +func TestEmptyArray(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestBasicStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := array.Get(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("set", func(t *testing.T) { + s, err := array.Set(0, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("insert", func(t *testing.T) { + err := array.Insert(1, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + }) + + t.Run("remove", func(t *testing.T) { + s, err := array.Remove(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("iterate", func(t *testing.T) { + i := uint64(0) + err := array.IterateReadOnly(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("count", func(t *testing.T) { + count := array.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, array.Type())) + }) + + // TestArrayEncodeDecode/empty tests empty array encoding and decoding +} + +func TestArrayStringElement(t *testing.T) { + + t.Parallel() + + t.Run("inline", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize - 3) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(0), stats.StorableSlabCount) + }) + + t.Run("external slab", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize + 512) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(arraySize), stats.StorableSlabCount) + }) +} + +func TestArrayStoredValue(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + rootID := array.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + array2, ok := value.(*Array) + require.True(t, ok) + + testArray(t, storage, typeInfo, address, array2, values, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestArrayPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := uint64(0) + err = array.PopIterate(func(v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) +} + +func TestArrayFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + testEmptyArray(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, v) + + err = array.Insert(0, v) + require.NoError(t, err) + + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + } + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, nil) + copy(values[25+1:], values[25:]) + values[25] = v + + err = array.Insert(25, v) + require.NoError(t, err) + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) +} + +func TestArrayNestedStorables(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + + const arraySize = 1024 * 4 + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := strings.Repeat("a", int(i)) + v := SomeValue{Value: NewStringValue(s)} + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, true) +} + +func TestArrayMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var values []Value + for i := 0; i < 2; i++ { + // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. + v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + // Size of root data slab with two elements of max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab), and minus 1 byte + // (for rounding when computing max inline array element size). + require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + + testArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5]` + require.Equal(t, want, array.String()) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` + require.Equal(t, want, array.String()) + }) +} + +func TestArraySlabDump(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + } + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", + "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", + "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("overflow", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) + require.NoError(t, err) + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func errorCategorizationCount(err error) int { + var fatalError *FatalError + var userError *UserError + var externalError *ExternalError + + count := 0 + if errors.As(err, &fatalError) { + count++ + } + if errors.As(err, &userError) { + count++ + } + if errors.As(err, &externalError) { + count++ + } + return count +} + +func TestArrayLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, nil) + }) + + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[i+1:] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[:i] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + unloadValueIndex := 1 + + slabID := childSlabIDs[unloadValueIndex] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + i := 0 + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { + // At this point, iterator returned first element (v). + + // Remove all other nested composite elements (except first element) from storage. + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) + require.NoError(t, err) + } + + require.Equal(t, 0, i) + valueEqual(t, values[0], v) + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) + + t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 3 + + // Create an array with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { + storage := newTestPersistentStorage(t) + + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) + + // parent array: 1 root data slab + // nested composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element + err := storage.Remove(childSlabID) + require.NoError(t, err) + + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + require.Equal(t, 3, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(childSlabIDs); i++ { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[i+1:] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(childSlabIDs) - 1; i >= 0; i-- { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[:i] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + for _, index := range []int{4, 14} { + + slabID := childSlabIDs[index] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 20 + + // Create an array with composite value at specified index. + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { + storage := newTestPersistentStorage(t) + + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+1, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite value + err := storage.Remove(childSlabID) + require.NoError(t, err) + + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from front to back + for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from back to front + for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[:len(values)-int(childHeader.count)] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + require.True(t, len(metaDataSlab.childrenHeaders) > 2) + + index := 1 + childHeader := metaDataSlab.childrenHeaders[index] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) + values = values[:array.Count()-uint64(childHeader.count)] + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + r := newRand(t) + + // Unload random composite element + for len(values) > 0 { + + i := r.Intn(len(values)) + + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + type slabInfo struct { + id SlabID + startIndex int + count int + } + + count := 0 + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { + nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + for _, h := range nonrootMetaDataSlab.childrenHeaders { + dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} + dataSlabInfos = append(dataSlabInfos, dataSlabInfo) + count += int(h.count) + } + } + + r := newRand(t) + + // Unload random data slab. + for len(dataSlabInfos) > 0 { + indexToUnload := r.Intn(len(dataSlabInfos)) + + slabInfoToUnload := dataSlabInfos[indexToUnload] + + // Update startIndex for all data slabs after indexToUnload. + for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabInfoToUnload.count + } + + // Remove slabInfo to be unloaded from dataSlabInfos. + copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + err := storage.Remove(slabInfoToUnload.id) + require.NoError(t, err) + + copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) + values = values[:len(values)-slabInfoToUnload.count] + + testArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) + + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo + } + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + var dataSlabCount, metadataSlabCount int + nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { + + nonrootMetadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: metadataSlabCount, + count: int(mheader.count), + } + metadataSlabCount += int(mheader.count) + + nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) + for i, h := range nonrootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: dataSlabCount, + count: int(h.count), + } + dataSlabCount += int(h.count) + } + + nonrootMetadataSlabInfo.children = children + nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + } + + r := newRand(t) + + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) + + for len(nonrootMetadataSlabInfos) > 0 { + + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool + + // Unload random metadata or data slab. + switch r.Intn(maxSlabType) { + + case metadataSlabType: + // Unload metadata slab at random index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + + slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence metadata and data slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + + case dataSlabType: + // Unload data slab at randome index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + + dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + + slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + + isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && + (dataSlabIndex == len(metaSlabInfo.children)-1) + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence data slabs. + for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { + metaSlabInfo.children[i].startIndex -= count + } + + copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) + metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + + metaSlabInfo.count -= count + + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + if len(metaSlabInfo.children) == 0 { + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + } + } + + err := storage.Remove(slabInfoToBeRemoved.id) + require.NoError(t, err) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + testArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) +} + +func createArrayWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := rune('a') + for i := 0; i < arraySize; i++ { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + + err := array.Append(values[i]) + require.NoError(t, err) + } + + return array, values +} + +func createArrayWithChildArrays( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value, []SlabID) { + const childArraySize = 50 + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + childSlabIDs := make([]SlabID, arraySize) + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues[j] = v + } + + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabIDs[i] = childArray.SlabID() + + // Append nested array to parent + err = array.Append(childArray) + require.NoError(t, err) + } + + return array, expectedValues, childSlabIDs +} + +func createArrayWithSimpleAndChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, + compositeValueIndex int, +) (*Array, []Value, SlabID) { + const childArraySize = 50 + + require.True(t, compositeValueIndex < arraySize) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + var childSlabID SlabID + r := 'a' + for i := 0; i < arraySize; i++ { + + if compositeValueIndex == i { + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues[j] = v + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabID = childArray.SlabID() + } else { + v := NewStringValue(strings.Repeat(string(r), 20)) + r++ + + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + } + } + + return array, expectedValues, childSlabID +} + +func testArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { + i := 0 + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, expectedValues[i], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} + +func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*ArrayMetaDataSlab); ok { + counter++ + } + } + return counter +} + +func TestArrayID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + sid := array.SlabID() + id := array.ValueID() + + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} + +func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { + const ( + arraySize = 3 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]*testMutableValue, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := newTestMutableValue(initialStorableSize) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + mv := values[i] + mv.updateStorableSize(mutatedStorableSize) + + existingStorable, err := array.Set(i, mv) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) +} + +func TestChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, with one child array", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 1 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + err = childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. + + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from child array which triggers standalone array slab becomes inlined slab again. + for childArray.Count() > 0 { + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues = expectedChildValues[1:] + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, with two child arrays", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 2 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID + } + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + expectedStoredDeltas := 1 + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues + + expectedStoredDeltas++ + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) // There are more stored slab because child array is no longer inlined. + + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + //expectedParentSize := arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize -= inlinedArrayDataSlabPrefixSize + uint32(childArray.Count()-1)*vSize + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues + + require.True(t, childArray.Inlined()) + + expectedStoredDeltas-- + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + expectedParentSize -= SlabIDStorable{}.ByteSize() + expectedParentSize += expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root metadata slab, with four child arrays", func(t *testing.T) { + const arraySize = 4 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 4 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID + } + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues + + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has one data slab and all child arrays are not inlined. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) + + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Parent array has 1 data slab. + // All child arrays are inlined. + require.Equal(t, 1, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) + + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) +} + +func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + // Get inlined grand child array + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Add one more element to grand child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. + + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[1:] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers grand child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + // Get inlined grand child array + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Add one more element to grand child array which triggers inlined grand child array slab (NOT child array slab) becomes standalone slab + largeValue := NewStringValue(strings.Repeat("b", 20)) + largeValueSize := largeValue.ByteSize() + err = gchildArray.Append(largeValue) + require.NoError(t, err) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, largeValue) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.False(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + expectedSlabID := valueIDToSlabID(gValueID) + require.Equal(t, expectedSlabID, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := arrayRootDataSlabPrefixSize + uint32(gchildArray.Count()-1)*vSize + largeValueSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + expectedStandaloneSlabSize := inlinedArrayDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + expectedParentSize = arrayRootDataSlabPrefixSize + expectedStandaloneSlabSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + _, err := gchildArray.Remove(gchildArray.Count() - 1) + require.NoError(t, err) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - t.Run("inline", func(t *testing.T) { + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - const arraySize = 4096 + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - r := newRand(t) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - stringSize := int(maxInlineArrayElementSize - 3) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, two child array, one grand child array each, changes to child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} - array, err := NewArray(storage, address, typeInfo) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - verifyArray(t, storage, typeInfo, address, array, values, false) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - stats, err := GetArrayStats(array) - require.NoError(t, err) - require.Equal(t, uint64(0), stats.StorableSlabCount) - }) + // Append element to grand child array + err = gchild.Append(v) + require.NoError(t, err) - t.Run("external slab", func(t *testing.T) { + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - const arraySize = 4096 + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - r := newRand(t) + expectedValues[i] = arrayValue{arrayValue{v}} + } - stringSize := int(maxInlineArrayElementSize + 512) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + vSize*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo } - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + children := make([]arrayInfo, arraySize) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - verifyArray(t, storage, typeInfo, address, array, values, false) + // Appending 7 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 7; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - stats, err := GetArrayStats(array) - require.NoError(t, err) - require.Equal(t, uint64(arraySize), stats.StorableSlabCount) - }) -} + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+2), childArray.Count()) -func TestArrayStoredValue(t *testing.T) { + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - const arraySize = 4096 + expectedChildValues = append(expectedChildValues, v) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedValues[j] = expectedChildValues - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - rootID := array.SlabID() + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + // Test inlined grand child slab size (1 element, unchanged) + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - for { - id, slab := slabIterator() + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(i+1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - if id == SlabIDUndefined { - break + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - value, err := slab.StoredValue(storage) + // Add one more element to child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - if id == rootID { + err = childArray.Append(v) require.NoError(t, err) - array2, ok := value.(*Array) + expectedChildValues, ok := expectedValues[i].(arrayValue) require.True(t, ok) - verifyArray(t, storage, typeInfo, address, array2, values, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) - } - } -} + expectedChildValues = append(expectedChildValues, v) -func TestArrayPopIterate(t *testing.T) { + expectedValues[i] = expectedChildValues - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2+i, getStoredDeltas(storage)) // There are >1 stored slab because child array is no longer inlined. - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - i := uint64(0) - err = array.PopIterate(func(v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - t.Run("root-dataslab", func(t *testing.T) { + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - const arraySize = 10 + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(SlabID{}).ByteSize()*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) + // Remove one elements from each child array to trigger child arrays being inlined again. + expectedParentSize = arrayRootDataSlabPrefixSize + + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + _, err = childArray.Remove(childArray.Count() - 1) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] - const arraySize = 4096 + expectedValues[i] = expectedChildValues - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2-i, getStoredDeltas(storage)) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyEmptyArray(t, storage, typeInfo, address, array) - }) -} + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) -func TestArrayFromBatchData(t *testing.T) { + expectedParentSize += expectedInlinedChildSize - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) - require.Equal(t, uint64(0), array.Count()) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - iter, err := array.Iterator() - require.NoError(t, err) + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + // Remove elements from child array. + elementCount := children[0].array.Count() - verifyEmptyArray(t, storage, typeInfo, address, copied) - }) + for i := uint64(0); i < elementCount-1; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - t.Run("root-dataslab", func(t *testing.T) { + existingStorable, err := childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - const arraySize = 10 + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + expectedValues[j] = expectedChildValues - require.Equal(t, uint64(arraySize), array.Count()) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - iter, err := array.Iterator() - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Test parent slab size + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - const arraySize = 4096 + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + for _, child := range children { + require.Equal(t, uint64(1), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root metadata slab, with four child arrays, each child array has grand child arrays", func(t *testing.T) { + const arraySize = 4 typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - require.Equal(t, uint64(arraySize), array.Count()) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - iter, err := array.Iterator() - require.NoError(t, err) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + expectedValues[i] = arrayValue{arrayValue{}} + } - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - typeInfo := testTypeInfo{42} + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - var values []Value - var v Value + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, v) + children := make([]arrayInfo, arraySize) - err = array.Insert(0, v) - require.NoError(t, err) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err = array.Append(v) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - require.Equal(t, uint64(36), array.Count()) + // Appending 6 elements to grand child array so that parent array root slab is metadata slab. + for i := uint32(0); i < 6; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - iter, err := array.Iterator() - require.NoError(t, err) + err := gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedGChildValues = append(expectedGChildValues, v) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues - typeInfo := testTypeInfo{42} + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - var values []Value - var v Value - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, nil) - copy(values[25+1:], values[25:]) - values[25] = v + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize*(i+1) + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err = array.Insert(25, v) - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - require.Equal(t, uint64(36), array.Count()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - iter, err := array.Iterator() - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Add one more element to grand child array which triggers parent array slab becomes metadata slab (all elements are still inlined). + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + err = gchildArray.Append(v) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - const arraySize = 4096 + expectedGChildValues = append(expectedGChildValues, v) - r := newRand(t) + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - typeInfo := testTypeInfo{42} + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because parent root slab is metdata. - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - err := array.Append(v) - require.NoError(t, err) - } + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - require.Equal(t, uint64(arraySize), array.Count()) + expectedInlinedChildSlabSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSlabSize, childArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - storage := newTestPersistentStorage(t) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. + require.False(t, parentArray.root.IsData()) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Add one more element to grand child array which triggers + // - child arrays become standalone slab (grand child arrays are still inlined) + // - parent array slab becomes data slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + for j := 0; j < 2; j++ { + err = gchildArray.Append(v) + require.NoError(t, err) - r := newRand(t) + expectedGChildValues = append(expectedGChildValues, v) + } - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - var values []Value - var v Value + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + // Test standalone grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + expectedStandaloneChildSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneChildSlabSize, childArray.root.ByteSize()) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) -} + // Parent array has one root data slab, 4 grand child array with standalone root data slab. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) -func TestArrayNestedStorables(t *testing.T) { + // Remove elements from grand child array to trigger child array inlined again. + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - t.Parallel() + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - typeInfo := testTypeInfo{42} + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - const arraySize = 1024 * 4 + for j := 0; j < 2; j++ { + _, err = gchildArray.Remove(0) + require.NoError(t, err) - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := strings.Repeat("a", int(i)) - v := SomeValue{Value: NewStringValue(s)} - values[i] = v + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - verifyArray(t, storage, typeInfo, address, array, values, true) -} + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged -func TestArrayMaxInlineElement(t *testing.T) { - t.Parallel() + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - r := newRand(t) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - var values []Value - for i := 0; i < 2; i++ { - // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. - v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) - values = append(values, v) + // Parent array has 1 metadata slab, and two data slab, all child and grand child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - err = array.Append(v) - require.NoError(t, err) - } + // Remove elements from grand child array. + elementCount := children[0].child.array.Count() - require.True(t, array.root.IsData()) + for i := uint64(0); i < elementCount; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - // Size of root data slab with two elements of max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab), and minus 1 byte - // (for rounding when computing max inline array element size). - require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, v, existingStorable) - verifyArray(t, storage, typeInfo, address, array, values, false) -} + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) -func TestArrayString(t *testing.T) { + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + expectedGChildValues = expectedGChildValues[1:] - t.Run("small", func(t *testing.T) { - const arraySize = 6 + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - want := `[0 1 2 3 4 5]` - require.Equal(t, want, array.String()) - }) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + for _, child := range children { + require.Equal(t, uint64(0), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) - want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` - require.Equal(t, want, array.String()) + expectedParentSize = uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) }) } -func TestArraySlabDump(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) +func TestChildArrayWhenParentArrayIsModified(t *testing.T) { + + const arraySize = 2 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + children := make([]*struct { + array *Array + valueID ValueID + parentIndex int + }, arraySize) - array, err := NewArray(storage, address, typeInfo) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i] = &struct { + array *Array + valueID ValueID + parentIndex int + }{ + childArray, valueID, i, } + } - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", - } - dumps, err := DumpArraySlabs(array) + t.Run("insert elements in parent array", func(t *testing.T) { + // insert value at index 0, so all child array indexes are moved by +1 + v := Uint64Value(0) + err := parentArray.Insert(0, v) require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + expectedValues = append(expectedValues, nil) + copy(expectedValues[1:], expectedValues) + expectedValues[0] = v - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + for i, child := range children { + childArray := child.array + childValueID := child.valueID - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + v := Uint64Value(i) + vSize := v.ByteSize() - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + err := childArray.Append(v) require.NoError(t, err) - } + require.Equal(t, uint64(1), childArray.Count()) - want := []string{ - "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", - "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", - "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", - } + child.parentIndex = i + 1 - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - t.Run("overflow", func(t *testing.T) { + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - dumps, err := DumpArraySlabs(array) + // insert value at index 2, so only second child array index is moved by +1 + v = Uint64Value(2) + err = parentArray.Insert(2, v) require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} -func errorCategorizationCount(err error) int { - var fatalError *FatalError - var userError *UserError - var externalError *ExternalError + expectedValues = append(expectedValues, nil) + copy(expectedValues[3:], expectedValues[2:]) + expectedValues[2] = v - count := 0 - if errors.As(err, &fatalError) { - count++ - } - if errors.As(err, &userError) { - count++ - } - if errors.As(err, &externalError) { - count++ - } - return count -} + for i, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArrayLoadedValueIterator(t *testing.T) { + v := Uint64Value(i) + vSize := v.ByteSize() - SetThreshold(256) - defer SetThreshold(1024) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + if i > 0 { + child.parentIndex++ + } - t.Run("empty", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - array, err := NewArray(storage, address, typeInfo) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // insert value at index 4, so none of child array indexes are affected. + v = Uint64Value(4) + err = parentArray.Insert(4, v) require.NoError(t, err) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedValues = append(expectedValues, nil) + expectedValues[4] = v - verifyArrayLoadedElements(t, array, nil) - }) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + v := Uint64Value(i) + vSize := v.ByteSize() - const arraySize = 3 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(3), childArray.Count()) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - verifyArrayLoadedElements(t, array, values) - }) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } }) - t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + t.Run("remove elements from parent array", func(t *testing.T) { + // remove value at index 0, so all child array indexes are moved by -1. + existingStorable, err := parentArray.Remove(0) + require.NoError(t, err) + require.Equal(t, Uint64Value(0), existingStorable) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + copy(expectedValues, expectedValues[1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - verifyArrayLoadedElements(t, array, values) + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(4), childArray.Count()) - nestedArray, ok := v.(*Array) + child.parentIndex-- + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) - } - }) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - verifyArrayLoadedElements(t, array, values) + // Remove value at index 1, so only second child array index is moved by -1 + existingStorable, err = parentArray.Remove(1) + require.NoError(t, err) + require.Equal(t, Uint64Value(2), existingStorable) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + copy(expectedValues[1:], expectedValues[2:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - nestedArray, ok := v.(*Array) - require.True(t, ok) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + v := Uint64Value(i) + vSize := v.ByteSize() - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) - } - }) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(5), childArray.Count()) - t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + if i > 0 { + child.parentIndex-- + } - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - verifyArrayLoadedElements(t, array, values) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - // Unload composite element in the middle - unloadValueIndex := 1 + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - v := values[unloadValueIndex] + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - err := storage.Remove(nestedArray.SlabID()) + // Remove value at index 2 (last element), so none of child array indexes are affected. + existingStorable, err = parentArray.Remove(2) require.NoError(t, err) + require.Equal(t, Uint64Value(4), existingStorable) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] - - verifyArrayLoadedElements(t, array, values) - }) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { - storage := newTestPersistentStorage(t) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + v := Uint64Value(i) + vSize := v.ByteSize() - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(6), childArray.Count()) - verifyArrayLoadedElements(t, array, values) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - // At this point, iterator returned first element (v). + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - // Remove all other nested composite elements (except first element) from storage. - for _, value := range values[1:] { - nestedArray, ok := value.(*Array) - require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0], v) - i++ - return true, nil - }) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } }) +} - t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 3 +func createArrayWithEmptyChildArray( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { - // Create an array with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root data slab - // nested composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Append child array to parent + err = array.Append(child) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + expectedValues[i] = arrayValue{} + } - // Unload composite element - v := values[nestedCompositeIndex].(*Array) + return array, expectedValues +} - err := storage.Remove(v.SlabID()) - require.NoError(t, err) +func createArrayWithEmpty2LevelChildArray( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - } - }) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - const arraySize = 20 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - require.Equal(t, 3, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Append child array to parent + err = array.Append(child) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + expectedValues[i] = arrayValue{arrayValue{}} + } - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + return array, expectedValues +} - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) +func getStoredDeltas(storage *PersistentSlabStorage) int { + count := 0 + for _, slab := range storage.deltas { + if slab != nil { + count++ + } + } + return count +} - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) +func TestArraySetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyArrayLoadedElements(t, array, values) - }) + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 - t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + err = parentArray.Append(childArray) + require.NoError(t, err) - nestedArray, ok := v.(*Array) - require.True(t, ok) + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + err = childArray.Append(v) + require.NoError(t, err) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) - } - }) + expectedChildValues = append(expectedChildValues, v) - t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + if !childArray.Inlined() { + break + } + } - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedValues = append(expectedValues, expectedChildValues) + } - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - nestedArray, ok := v.(*Array) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + expectedValues[i] = Uint64Value(0) + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) } + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + storage := newTestPersistentStorage(t) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + var expectedValues arrayValue - // Unload composite element in the middle - for _, index := range []int{4, 14} { + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - v := values[index] + err = parentArray.Append(childArray) + require.NoError(t, err) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - err := storage.Remove(nestedArray.SlabID()) + err = childArray.Append(v) require.NoError(t, err) + require.True(t, childArray.Inlined()) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] - - verifyArrayLoadedElements(t, array, values) + expectedValues = append(expectedValues, arrayValue{v}) } - }) - t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 20 + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // Create an array with composite value at specified index. - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+1, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + valueEqual(t, expectedValues[i], child) - // Unload composite value - v := values[nestedCompositeIndex].(*Array) + expectedValues[i] = Uint64Value(0) - err := storage.Remove(v.SlabID()) + err = storage.Remove(SlabID(id)) require.NoError(t, err) + } - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) - } + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 + storage := newTestPersistentStorage(t) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + err = parentArray.Append(childMap) + require.NoError(t, err) - // Unload data slabs from front to back - for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - childHeader := metaDataSlab.childrenHeaders[i] + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - values = values[childHeader.count:] + expectedChildValues[k] = v - verifyArrayLoadedElements(t, array, values) + if !childMap.Inlined() { + break + } + } } - }) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - verifyArrayLoadedElements(t, array, values) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + child, err := id.StoredValue(storage) + require.NoError(t, err) - // Unload data slabs from back to front - for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + valueEqual(t, expectedValues[i], child) - childHeader := metaDataSlab.childrenHeaders[i] + expectedValues[i] = Uint64Value(0) - err := storage.Remove(childHeader.slabID) + err = storage.Remove(SlabID(id)) require.NoError(t, err) + } - values = values[:len(values)-int(childHeader.count)] + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) - } + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 + storage := newTestPersistentStorage(t) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + k := Uint64Value(i) - require.True(t, len(metaDataSlab.childrenHeaders) > 2) + err = parentArray.Append(childMap) + require.NoError(t, err) - index := 1 - childHeader := metaDataSlab.childrenHeaders[index] + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) - copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) - values = values[:array.Count()-uint64(childHeader.count)] + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - verifyArrayLoadedElements(t, array, values) - }) + expectedChildValues[k] = v + } - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - // Unload non-root metadata slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + child, err := id.StoredValue(storage) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + valueEqual(t, expectedValues[i], child) - err := storage.Remove(childHeader.slabID) + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) require.NoError(t, err) + } - values = values[childHeader.count:] + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) - } + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) +} - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) - - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) +func TestArrayRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + storage := newTestPersistentStorage(t) - // Unload non-root metadata slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + var expectedValues arrayValue - err := storage.Remove(childHeader.slabID) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values = values[childHeader.count:] - - verifyArrayLoadedElements(t, array, values) - } - }) - - t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + err = parentArray.Append(childArray) + require.NoError(t, err) - storage := newTestPersistentStorage(t) + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + err = childArray.Append(v) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + expectedChildValues = append(expectedChildValues, v) - verifyArrayLoadedElements(t, array, values) + if !childArray.Inlined() { + break + } + } - r := newRand(t) + expectedValues = append(expectedValues, expectedChildValues) + } - // Unload random composite element - for len(values) > 0 { + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - i := r.Intn(len(values)) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - v := values[i] + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - nestedArray, ok := v.(*Array) + id, ok := valueStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + + testEmptyArray(t, storage, typeInfo, address, parentArray) }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 storage := newTestPersistentStorage(t) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + err = parentArray.Append(childArray) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - } + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - count := 0 - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { - nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) - for _, h := range nonrootMetaDataSlab.childrenHeaders { - dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} - dataSlabInfos = append(dataSlabInfos, dataSlabInfo) - count += int(h.count) - } + expectedValues = append(expectedValues, arrayValue{v}) } - r := newRand(t) - - // Unload random data slab. - for len(dataSlabInfos) > 0 { - indexToUnload := r.Intn(len(dataSlabInfos)) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - slabInfoToUnload := dataSlabInfos[indexToUnload] + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Update startIndex for all data slabs after indexToUnload. - for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabInfoToUnload.count - } + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - // Remove slabInfo to be unloaded from dataSlabInfos. - copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - err := storage.Remove(slabInfoToUnload.id) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) - values = values[:len(values)-slabInfoToUnload.count] + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - require.Equal(t, 0, len(values)) + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + + testEmptyArray(t, storage, typeInfo, address, parentArray) }) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 storage := newTestPersistentStorage(t) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + err = parentArray.Append(childMap) + require.NoError(t, err) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - var dataSlabCount, metadataSlabCount int - nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - nonrootMetadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: metadataSlabCount, - count: int(mheader.count), - } - metadataSlabCount += int(mheader.count) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + expectedChildValues[k] = v - children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) - for i, h := range nonrootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: dataSlabCount, - count: int(h.count), + if !childMap.Inlined() { + break } - dataSlabCount += int(h.count) } - - nonrootMetadataSlabInfo.children = children - nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo } - r := newRand(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - for len(nonrootMetadataSlabInfos) > 0 { + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - // Unload random metadata or data slab. - switch r.Intn(maxSlabType) { + child, err := id.StoredValue(storage) + require.NoError(t, err) - case metadataSlabType: - // Unload metadata slab at random index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + valueEqual(t, expectedValues[i], child) - isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) - count := slabInfoToBeRemoved.count + testEmptyArray(t, storage, typeInfo, address, parentArray) + }) - // Update startIndex for subsequence metadata and data slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + storage := newTestPersistentStorage(t) - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - case dataSlabType: - // Unload data slab at randome index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + var expectedValues arrayValue - metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + k := Uint64Value(i) - slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + err = parentArray.Append(childMap) + require.NoError(t, err) - isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && - (dataSlabIndex == len(metaSlabInfo.children)-1) + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - count := slabInfoToBeRemoved.count + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) - // Update startIndex for subsequence data slabs. - for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { - metaSlabInfo.children[i].startIndex -= count - } + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) - metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + expectedChildValues[k] = v + } - metaSlabInfo.count -= count + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - if len(metaSlabInfo.children) == 0 { - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] - } - } + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - err := storage.Remove(slabInfoToBeRemoved.id) + child, err := id.StoredValue(storage) require.NoError(t, err) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - require.Equal(t, 0, len(values)) + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + + testEmptyArray(t, storage, typeInfo, address, parentArray) }) } -func createArrayWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { +func TestArrayWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + t.Run("overwritten child array", func(t *testing.T) { - values := make([]Value, arraySize) - r := rune('a') - for i := 0; i < arraySize; i++ { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) + storage := newTestPersistentStorage(t) - err := array.Append(values[i]) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - return array, values -} - -func createArrayWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { - - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + var expectedValues arrayValue - expectedValues := make([]Value, arraySize) - for i := 0; i < arraySize; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // Insert child array to parent array + err = parentArray.Append(childArray) require.NoError(t, err) - expectedValues[i] = nested + v := NewStringValue(strings.Repeat("a", 10)) - // Append nested array to parent - err = array.Append(nested) + err = childArray.Append(v) require.NoError(t, err) - } - return array, expectedValues -} + expectedValues = append(expectedValues, arrayValue{v}) -func createArrayWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, - compositeValueIndex int, -) (*Array, []Value) { - require.True(t, compositeValueIndex < arraySize) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Overwrite child array value from parent + valueStorable, err := parentArray.Set(0, Uint64Value(0)) + require.NoError(t, err) - values := make([]Value, arraySize) - r := 'a' - for i := 0; i < arraySize; i++ { + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + child, err := id.StoredValue(storage) + require.NoError(t, err) - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + valueEqual(t, expectedValues[0], child) - values[i] = a - } else { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) - r++ - } + expectedValues[0] = Uint64Value(0) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) - err = array.Append(values[i]) + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) - } - return array, values -} + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) -func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i], v) - i++ - return true, nil + // No-op on parent + valueEqual(t, expectedValues, parentArray) }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) -} -func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*ArrayMetaDataSlab); ok { - counter++ - } - } - return counter -} + t.Run("removed child array", func(t *testing.T) { -func TestArrayID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - sid := array.SlabID() - id := array.ValueID() + var expectedValues arrayValue - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) -func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { - const ( - arraySize = 3 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) + // Insert child array to parent array + err = parentArray.Append(childArray) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + v := NewStringValue(strings.Repeat("a", 10)) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + err = childArray.Append(v) + require.NoError(t, err) - values := make([]*mutableValue, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := newMutableValue(initialStorableSize) - values[i] = v + expectedValues = append(expectedValues, arrayValue{v}) - err := array.Append(v) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove child array value from parent + valueStorable, err := parentArray.Remove(0) require.NoError(t, err) - } - require.True(t, array.root.IsData()) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + child, err := id.StoredValue(storage) + require.NoError(t, err) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + valueEqual(t, expectedValues[0], child) - for i := uint64(0); i < arraySize; i++ { - mv := values[i] - mv.updateStorableSize(mutatedStorableSize) + expectedValues = arrayValue{} - existingStorable, err := array.Set(i, mv) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) - require.True(t, array.root.IsData()) + // modify removed child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) - expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + // No-op on parent + valueEqual(t, expectedValues, parentArray) + }) } diff --git a/basicarray.go b/basicarray.go index b5267e4c..143bec35 100644 --- a/basicarray.go +++ b/basicarray.go @@ -76,7 +76,7 @@ func newBasicArrayDataSlabFromData( ) } - cborDec := decMode.NewByteStreamDecoder(data[2:]) + cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) elemCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -85,7 +85,7 @@ func newBasicArrayDataSlabFromData( elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") @@ -101,10 +101,17 @@ func newBasicArrayDataSlabFromData( func (a *BasicArrayDataSlab) Encode(enc *Encoder) error { - flag := maskBasicArray | maskSlabRoot + const version = 1 + + h, err := newArraySlabHead(version, slabBasicArray) + if err != nil { + return NewEncodingError(err) + } + + h.setRoot() // Encode flag - _, err := enc.Write([]byte{0x0, flag}) + _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } diff --git a/cmd/main/main.go b/cmd/main/main.go index 3e0cf470..f94db511 100644 --- a/cmd/main/main.go +++ b/cmd/main/main.go @@ -73,20 +73,34 @@ func (v Uint64Value) String() string { return fmt.Sprintf("%d", uint64(v)) } -type testTypeInfo struct{} +type testTypeInfo struct { + value uint64 +} var _ atree.TypeInfo = testTypeInfo{} -func (testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint8(42) +func (i testTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i.value) +} + +func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { + return e.EncodeUint64(i.value) } func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - _, ok := other.(testTypeInfo) - return ok + otherTestTypeInfo, ok := other.(testTypeInfo) + return ok && i.value == otherTestTypeInfo.value } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { tagNumber, err := dec.DecodeTagNumber() if err != nil { return nil, err diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index b3fba90a..a2bdf1da 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -413,7 +413,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { t, err := dec.NextType() if err != nil { return nil, err diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index 4618dc12..ec78239f 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -19,6 +19,8 @@ package main import ( + "fmt" + "github.com/onflow/atree" "github.com/fxamacker/cbor/v2" @@ -30,6 +32,18 @@ type testTypeInfo struct { var _ atree.TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { return e.EncodeUint64(i.value) } diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index c75296fe..96f72584 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -132,7 +132,7 @@ func copyValue(storage *atree.PersistentSlabStorage, address atree.Address, valu } func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, array *atree.Array) (*atree.Array, error) { - iterator, err := array.Iterator() + iterator, err := array.ReadOnlyIterator() if err != nil { return nil, err } @@ -149,7 +149,7 @@ func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, arra } func copyMap(storage *atree.PersistentSlabStorage, address atree.Address, m *atree.OrderedMap) (*atree.OrderedMap, error) { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { return nil, err } @@ -260,12 +260,12 @@ func arrayEqual(a atree.Value, b atree.Value) error { return fmt.Errorf("array %s count %d != array %s count %d", array1, array1.Count(), array2, array2.Count()) } - iterator1, err := array1.Iterator() + iterator1, err := array1.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get array1 iterator: %w", err) } - iterator2, err := array2.Iterator() + iterator2, err := array2.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get array2 iterator: %w", err) } @@ -309,12 +309,12 @@ func mapEqual(a atree.Value, b atree.Value) error { return fmt.Errorf("map %s count %d != map %s count %d", m1, m1.Count(), m2, m2.Count()) } - iterator1, err := m1.Iterator() + iterator1, err := m1.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get m1 iterator: %w", err) } - iterator2, err := m2.Iterator() + iterator2, err := m2.ReadOnlyIterator() if err != nil { return fmt.Errorf("failed to get m2 iterator: %w", err) } diff --git a/encode.go b/encode.go index c88fa3a8..5f46505c 100644 --- a/encode.go +++ b/encode.go @@ -30,19 +30,45 @@ type Encoder struct { io.Writer CBOR *cbor.StreamEncoder Scratch [64]byte + encMode cbor.EncMode } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, + Writer: w, + CBOR: streamEncoder, + encMode: encMode, } } +// encodeStorableAsElement encodes storable as Array or OrderedMap element. +// Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. +func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *inlinedExtraData) error { + + switch storable := storable.(type) { + + case *ArrayDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + case *MapDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + default: + err := storable.Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + } + } + + return nil +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, + inlinedExtraData []ExtraData, ) ( Storable, error, @@ -101,7 +127,7 @@ func DecodeSlab( case slabStorable: cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) - storable, err := decodeStorable(cborDec, id) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode slab storable") @@ -116,7 +142,6 @@ func DecodeSlab( } } -// TODO: make it inline func GetUintCBORSize(n uint64) uint32 { if n <= 23 { return 1 diff --git a/map.go b/map.go index 7d9eabf8..13654279 100644 --- a/map.go +++ b/map.go @@ -29,6 +29,8 @@ import ( "github.com/fxamacker/circlehash" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( digestSize = 8 @@ -83,6 +85,18 @@ const ( // CircleHash64fx and SipHash might use this const as part of their // 128-bit seed (when they don't use 64-bit -> 128-bit seed expansion func). typicalRandomConstant = uint64(0x1BD11BDAA9FC1A22) // DO NOT MODIFY + + // inlined map data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value index head (1 byte) + + // value index (8 bytes) + inlinedMapDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize ) // MaxCollisionLimitPerDigest is the noncryptographic hash collision limit @@ -107,7 +121,7 @@ type element interface { hkey Digest, comparator ValueComparator, key Value, - ) (MapValue, error) + ) (MapKey, MapValue, error) // Set returns updated element, which may be a different type of element because of hash collision. Set( @@ -121,7 +135,7 @@ type element interface { hip HashInputProvider, key Value, value Value, - ) (newElem element, existingValue MapValue, err error) + ) (newElem element, keyStorable MapKey, existingMapValueStorable MapValue, err error) // Remove returns matched key, value, and updated element. // Updated element may be nil, modified, or a different type of element. @@ -134,7 +148,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error hasPointer() bool @@ -159,9 +173,36 @@ type elementGroup interface { type elements interface { fmt.Stringer - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) - Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) Merge(elements) error Split() (elements, elements, error) @@ -174,7 +215,7 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error hasPointer() bool @@ -239,6 +280,8 @@ type MapExtraData struct { Seed uint64 } +var _ ExtraData = &MapExtraData{} + // MapDataSlab is leaf node, implementing MapSlab. // anySize is true for data slab that isn't restricted by size requirement. type MapDataSlab struct { @@ -253,9 +296,11 @@ type MapDataSlab struct { anySize bool collisionGroup bool + inlined bool } var _ MapSlab = &MapDataSlab{} +var _ Storable = &MapDataSlab{} // MapMetaDataSlab is internal node, implementing MapSlab. type MapMetaDataSlab struct { @@ -272,9 +317,35 @@ var _ MapSlab = &MapMetaDataSlab{} type MapSlab interface { Slab - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) - Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) IsData() bool @@ -292,15 +363,42 @@ type MapSlab interface { SetExtraData(*MapExtraData) PopIterate(SlabStorage, MapPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } +// OrderedMap is an ordered map of key-value pairs; keys can be any hashable type +// and values can be any serializable value type. It supports heterogeneous key +// or value types (e.g. first key storing a boolean and second key storing a string). +// OrderedMap keeps values in specific sorted order and operations are deterministic +// so the state of the segments after a sequence of operations are always unique. +// +// OrderedMap key-value pairs can be stored in one or more relatively fixed-sized segments. +// +// OrderedMap can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, OrderedMap with one segment +// which fits in size limit can be inlined, while OrderedMap with multiple segments +// can't be inlined. type OrderedMap struct { Storage SlabStorage root MapSlab digesterBuilder DigesterBuilder + + // parentUpdater is a callback that notifies parent container when this map is modified. + // If this callback is nil, this map has no parent. Otherwise, this map has parent + // and this callback must be used when this map is changed by Set and Remove. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child map is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. + parentUpdater parentUpdater } var _ Value = &OrderedMap{} +var _ mutableValueNotifier = &OrderedMap{} const mapExtraDataLength = 3 @@ -365,6 +463,10 @@ func newMapExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (* }, nil } +func (m *MapExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info, count, seed] @@ -399,7 +501,7 @@ func (m *MapExtraData) Encode(enc *Encoder) error { return nil } -func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (element, error) { +func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (element, error) { nt, err := cborDec.NextType() if err != nil { return nil, NewDecodingError(err) @@ -408,7 +510,7 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch nt { case cbor.ArrayType: // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). - return newSingleElementFromData(cborDec, decodeStorable) + return newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case cbor.TagType: tagNum, err := cborDec.DecodeTagNumber() @@ -418,10 +520,10 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch tagNum { case CBORTagInlineCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newInlineCollisionGroupFromData(). - return newInlineCollisionGroupFromData(cborDec, decodeStorable) + return newInlineCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case CBORTagExternalCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newExternalCollisionGroupFromData(). - return newExternalCollisionGroupFromData(cborDec, decodeStorable) + return newExternalCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) default: return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized tag number %d", tagNum)) } @@ -452,7 +554,7 @@ func newSingleElement(storage SlabStorage, address Address, key Value, value Val }, nil } -func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*singleElement, error) { +func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*singleElement, error) { elemCount, err := cborDec.DecodeArrayHead() if err != nil { return nil, NewDecodingError(err) @@ -462,13 +564,13 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab return nil, NewDecodingError(fmt.Errorf("failed to decode single element: expect array of 2 elements, got %d elements", elemCount)) } - key, err := decodeStorable(cborDec, SlabIDUndefined) + key, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - value, err := decodeStorable(cborDec, SlabIDUndefined) + value, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode value's storable") @@ -484,7 +586,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder) error { +func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -500,7 +602,7 @@ func (e *singleElement) Encode(enc *Encoder) error { } // Encode value - err = e.value.Encode(enc) + err = encodeStorableAsElement(enc, e.value, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") @@ -514,16 +616,16 @@ func (e *singleElement) Encode(enc *Encoder) error { return nil } -func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return e.value, nil + return e.key, e.value, nil } - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } // Set updates value if key matches, otherwise returns inlineCollisionGroup with existing and new elements. @@ -542,27 +644,27 @@ func (e *singleElement) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } // Key matches, overwrite existing value if equal { - existingValue := e.value + existingMapValueStorable := e.value valueStorable, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(e.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } e.value = valueStorable e.size = singleElementPrefixSize + e.key.ByteSize() + e.value.ByteSize() - return e, existingValue, nil + return e, e.key, existingMapValueStorable, nil } // Hash collision detected @@ -586,20 +688,20 @@ func (e *singleElement) Set( kv, err := e.key.StoredValue(storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") } existingKeyDigest, err := b.Digest(hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigestBuilder interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") } defer putDigester(existingKeyDigest) d, err := existingKeyDigest.Digest(level + 1) if err != nil { // Wrap err as external error (if needed) because err is returned by Digester interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) } group := &inlineCollisionGroup{ @@ -648,8 +750,8 @@ func (e *singleElement) String() string { return fmt.Sprintf("%s:%s", e.key, e.value) } -func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*inlineCollisionGroup, error) { - elements, err := newElementsFromData(cborDec, decodeStorable) +func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*inlineCollisionGroup, error) { + elements, err := newElementsFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). return nil, err @@ -661,7 +763,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -671,7 +773,7 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = e.elements.Encode(enc) + err = e.elements.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -686,12 +788,12 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return nil } -func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -711,19 +813,19 @@ func (e *inlineCollisionGroup) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, nil, err + return nil, nil, nil, err } if level == 1 { @@ -734,7 +836,7 @@ func (e *inlineCollisionGroup) Set( id, err := storage.GenerateSlabID(address) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded( + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded( err, fmt.Sprintf("failed to generate slab ID for address 0x%x", address)) } @@ -754,18 +856,18 @@ func (e *inlineCollisionGroup) Set( err = storage.Store(id, slab) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) } // Create and return externalCollisionGroup (wrapper of newly created MapDataSlab) return &externalCollisionGroup{ slabID: id, size: externalCollisionGroupPrefixSize + SlabIDStorable(id).ByteSize(), - }, existingValue, nil + }, keyStorable, existingMapValueStorable, nil } } - return e, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -829,9 +931,9 @@ func (e *inlineCollisionGroup) String() string { return "inline[" + e.elements.String() + "]" } -func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*externalCollisionGroup, error) { +func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*externalCollisionGroup, error) { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode Storable") @@ -851,7 +953,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder) error { +func (e *externalCollisionGroup) Encode(enc *Encoder, _ *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -875,17 +977,17 @@ func (e *externalCollisionGroup) Encode(enc *Encoder) error { return nil } -func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -894,26 +996,37 @@ func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, lev return slab.Get(storage, digester, level, hkey, comparator, key) } -func (e *externalCollisionGroup) Set(storage SlabStorage, _ Address, b DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (element, MapValue, error) { +func (e *externalCollisionGroup) Set( + storage SlabStorage, + _ Address, + b DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (element, MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, nil, err + return nil, nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, nil, err + return nil, nil, nil, err } - return e, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -1029,7 +1142,7 @@ func (e *externalCollisionGroup) String() string { return fmt.Sprintf("external(%s)", e.slabID) } -func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (elements, error) { +func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (elements, error) { arrayCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -1076,7 +1189,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(singleElementsPrefixSize) elems := make([]*singleElement, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newSingleElementFromData(cborDec, decodeStorable) + elem, err := newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). return nil, err @@ -1102,7 +1215,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newElementFromData(cborDec, decodeStorable) + elem, err := newElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementFromData(). return nil, err @@ -1146,7 +1259,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder) error { +func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1200,7 +1313,7 @@ func (e *hkeyElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Encode(). return err @@ -1216,10 +1329,10 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } -func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } // binary search by hkey @@ -1241,7 +1354,7 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h // No matching hkey if equalIndex == -1 { - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } elem := e.elems[equalIndex] @@ -1250,11 +1363,22 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h return elem.Get(storage, digester, level, hkey, comparator, key) } -func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *hkeyElements) Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { // Check hkeys are not empty if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } if len(e.hkeys) == 0 { @@ -1263,7 +1387,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = []Digest{hkey} @@ -1272,7 +1396,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey < e.hkeys[0] { @@ -1281,7 +1405,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, Digest(0)) @@ -1294,7 +1418,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey > e.hkeys[len(e.hkeys)-1] { @@ -1303,7 +1427,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, hkey) @@ -1312,7 +1436,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } equalIndex := -1 // first index that m.hkeys[h] == hkey @@ -1347,10 +1471,10 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild elementCount, err := elem.Count(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Count(). - return nil, err + return nil, nil, err } if elementCount == 0 { - return nil, NewMapElementCountError("expect element count > 0, got element count == 0") + return nil, nil, NewMapElementCountError("expect element count > 0, got element count == 0") } // collisionCount is elementCount-1 because: @@ -1362,22 +1486,22 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild // Check if existing collision count reached MaxCollisionLimitPerDigest if collisionCount >= MaxCollisionLimitPerDigest { // Enforce collision limit on inserts and ignore updates. - _, err = elem.Get(storage, digester, level, hkey, comparator, key) + _, _, err = elem.Get(storage, digester, level, hkey, comparator, key) if err != nil { var knfe *KeyNotFoundError if errors.As(err, &knfe) { // Don't allow any more collisions for a digest that // already reached MaxCollisionLimitPerDigest. - return nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) + return nil, nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) } } } } - elem, existingValue, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + elem, keyStorable, existingMapValueStorable, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). - return nil, err + return nil, nil, err } e.elems[equalIndex] = elem @@ -1391,7 +1515,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild } e.size = size - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } // No matching hkey @@ -1399,7 +1523,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } // insert into sorted hkeys @@ -1414,7 +1538,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } func (e *hkeyElements) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -1797,7 +1921,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder) error { +func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1828,7 +1952,7 @@ func (e *singleElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by singleElement.Encode(). return err @@ -1844,10 +1968,10 @@ func (e *singleElements) Encode(enc *Encoder) error { return nil } -func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search by key @@ -1855,20 +1979,31 @@ func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return elem.value, nil + return elem.key, elem.value, nil } } - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } -func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, _ HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *singleElements) Set( + storage SlabStorage, + address Address, + _ DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + _ HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search key and update value @@ -1878,16 +2013,17 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - existingValue := elem.value + existingKeyStorable := elem.key + existingValueStorable := elem.value vs, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(elem.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } elem.value = vs @@ -1902,7 +2038,7 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui } e.size = size - return existingValue, nil + return existingKeyStorable, existingValueStorable, nil } } @@ -1910,12 +2046,12 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.elems = append(e.elems, newElem) e.size += newElem.size - return nil, nil + return newElem.key, nil, nil } func (e *singleElements) Remove(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -2147,7 +2283,7 @@ func newMapDataSlabFromDataV0( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV0(). return nil, err @@ -2177,23 +2313,18 @@ func newMapDataSlabFromDataV0( // newMapDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ -// -// Non-root DataSlab Header (18 bytes): +// DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func newMapDataSlabFromDataV1( id SlabID, @@ -2208,6 +2339,7 @@ func newMapDataSlabFromDataV1( ) { var err error var extraData *MapExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -2219,7 +2351,21 @@ func newMapDataSlabFromDataV1( } } - // Decode next slab ID + // Decode inlined extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newInlinedExtraDataFromData(). + return nil, err + } + } + + // Decode next slab ID for non-root slab if h.hasNextSlabID() { if len(data) < slabIDSize { return nil, NewDecodingErrorf("data is too short for map data slab") @@ -2236,7 +2382,7 @@ func newMapDataSlabFromDataV1( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV1(). return nil, err @@ -2264,28 +2410,289 @@ func newMapDataSlabFromDataV1( }, nil } -// Encode encodes this map data slab to the given encoder. +// DecodeInlinedCompactMapStorable decodes inlined compact map data. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedCompactMap, and tag contant +// as 3-element array: // -// Root DataSlab Header: +// - index of inlined extra data +// - value ID index +// - CBOR array of elements // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedCompactMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*compactMapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect *compactMapExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode values + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if elemCount != uint64(len(extraData.keys)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode compact map values: got %d, expect %d", + elemCount, + extraData.mapExtraData.Count)) + } + + // Make a copy of digests because extraData is shared by all inlined compact map data referring to the same type. + hkeys := make([]Digest, len(extraData.hkeys)) + copy(hkeys, extraData.hkeys) + + // Decode values + elementsSize := uint32(hkeyElementsPrefixSize) + elems := make([]element, elemCount) + for i := 0; i < int(elemCount); i++ { + value, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + return nil, err + } + + // Make a copy of key in case it is shared. + key := extraData.keys[i].Copy() + + elemSize := singleElementPrefixSize + key.ByteSize() + value.ByteSize() + elem := &singleElement{key, value, elemSize} + + elems[i] = elem + elementsSize += digestSize + elem.Size() + } + + // Create hkeyElements + elements := &hkeyElements{ + hkeys: hkeys, + elems: elems, + level: 0, + size: elementsSize, + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.mapExtraData.TypeInfo.Copy(), + Count: extraData.mapExtraData.Count, + Seed: extraData.mapExtraData.Seed, + }, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// DecodeInlinedMapStorable decodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, and tag contant +// as 3-element array: // -// Non-root DataSlab Header (18 bytes): +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // -// +-------------------------------+-------------------------+ -// | slab version + flag (2 bytes) | next slab ID (16 bytes) | -// +-------------------------------+-------------------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined map data slab, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + extraData, ok := inlinedExtraData[extraDataIndex].(*MapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "extra data (%T) is wrong type, expect *MapExtraData", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode elements + elements, err := newElementsFromData(dec, decodeStorable, slabID, inlinedExtraData) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). + return nil, err + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // NOTE: extra data doesn't need to be copied because every inlined map has its own inlined extra data. + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + Count: extraData.Count, + Seed: extraData.Seed, + }, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// Encode encodes this map data slab to the given encoder. +// +// Root DataSlab Header: +// +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func (m *MapDataSlab) Encode(enc *Encoder) error { + if m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined map data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode map elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elemEnc := NewEncoder(elementBuf, enc.encMode) + + err := m.encodeElements(elemEnc, inlinedTypes) + if err != nil { + return err + } + const version = 1 slabType := slabMapData @@ -2314,7 +2721,11 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - // Write head (version + flag) + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + + // Encode head _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) @@ -2329,7 +2740,15 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } } - // Encode next slab ID + // Encode inlined types + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + // Encode next slab ID for non-root slab if m.next != SlabIDUndefined { n, err := m.next.ToRawBytes(enc.Scratch[:]) if err != nil { @@ -2345,7 +2764,21 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode elements - err = m.elements.Encode(enc) + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraData) error { + err := m.elements.Encode(enc, inlinedTypes) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -2359,19 +2792,325 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } -func (m *MapDataSlab) hasPointer() bool { - return m.elements.hasPointer() -} +// encodeAsInlined encodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, +// and tag contant as 3-element array: +// +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ +func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if m.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root map data slab as inlined")) + } + + if !m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone map data slab as inlined")) + } + + if hkeys, keys, values, ok := m.canBeEncodedAsCompactMap(); ok { + return encodeAsInlinedCompactMap(enc, m.header.slabID, m.extraData, hkeys, keys, values, inlinedTypeInfo) + } + + return m.encodeAsInlinedMap(enc, inlinedTypeInfo) +} + +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + + extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) + + if extraDataIndex > maxInlinedExtraDataIndex { + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements + err = m.elements.Encode(enc, inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by elements.Encode(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// encodeAsInlinedCompactMap encodes hkeys, keys, and values as inlined compact map value. +func encodeAsInlinedCompactMap( + enc *Encoder, + slabID SlabID, + extraData *MapExtraData, + hkeys []Digest, + keys []ComparableStorable, + values []Storable, + inlinedTypeInfo *inlinedExtraData, +) error { + + extraDataIndex, cachedKeys := inlinedTypeInfo.addCompactMapExtraData(extraData, hkeys, keys) + + if len(keys) != len(cachedKeys) { + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) + } + + if extraDataIndex > maxInlinedExtraDataIndex { + // This should never happen because of slab size. + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedCompactMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab id + err = enc.CBOR.EncodeBytes(slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: compact map values in the order of cachedKeys + err = encodeCompactMapValues(enc, cachedKeys, keys, values, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// encodeCompactMapValues encodes compact values as an array of values ordered by cachedKeys. +func encodeCompactMapValues( + enc *Encoder, + cachedKeys []ComparableStorable, + keys []ComparableStorable, + values []Storable, + inlinedTypeInfo *inlinedExtraData, +) error { + + var err error + + err = enc.CBOR.EncodeArrayHead(uint64(len(cachedKeys))) + if err != nil { + return NewEncodingError(err) + } + + keyIndexes := make([]int, len(keys)) + for i := 0; i < len(keys); i++ { + keyIndexes[i] = i + } + + // Encode values in the same order as cachedKeys. + for i, cachedKey := range cachedKeys { + found := false + for j := i; j < len(keyIndexes); j++ { + index := keyIndexes[j] + key := keys[index] + + if cachedKey.Equal(key) { + found = true + keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] + + err = encodeStorableAsElement(enc, values[index], inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by encodeStorable(). + return err + } + + break + } + } + if !found { + return NewEncodingError(fmt.Errorf("failed to find key %v", cachedKey)) + } + } + + return nil +} + +// canBeEncodedAsCompactMap returns true if: +// - map data slab is inlined +// - map type is composite type +// - no collision elements +// - keys are stored inline (not in a separate slab) +func (m *MapDataSlab) canBeEncodedAsCompactMap() ([]Digest, []ComparableStorable, []Storable, bool) { + if !m.inlined { + return nil, nil, nil, false + } + + if !m.extraData.TypeInfo.IsComposite() { + return nil, nil, nil, false + } + + elements, ok := m.elements.(*hkeyElements) + if !ok { + return nil, nil, nil, false + } + + keys := make([]ComparableStorable, m.extraData.Count) + values := make([]Storable, m.extraData.Count) + + for i, e := range elements.elems { + se, ok := e.(*singleElement) + if !ok { + // Has collision element + return nil, nil, nil, false + } + + if _, ok = se.key.(SlabIDStorable); ok { + // Key is stored in a separate slab + return nil, nil, nil, false + } + + key, ok := se.key.(ComparableStorable) + if !ok { + // Key can't be compared (sorted) + return nil, nil, nil, false + } + + keys[i] = key + values[i] = se.value + } + + return elements.hkeys, keys, values, true +} + +func (m *MapDataSlab) hasPointer() bool { + return m.elements.hasPointer() +} + +func (m *MapDataSlab) ChildStorables() []Storable { + return elementsStorables(m.elements, nil) +} + +func (m *MapDataSlab) getPrefixSize() uint32 { + if m.inlined { + return inlinedMapDataSlabPrefixSize + } + if m.extraData != nil { + return mapRootDataSlabPrefixSize + } + return mapDataSlabPrefixSize +} + +func (m *MapDataSlab) Inlined() bool { + return m.inlined +} + +// Inlinable returns true if +// - map data slab is root slab +// - size of inlined map data slab <= maxInlineSize +func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { + if m.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + inlinedSize := inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + +// inline converts not-inlined MapDataSlab to inlined MapDataSlab and removes it from storage. +func (m *MapDataSlab) Inline(storage SlabStorage) error { + if m.inlined { + return NewFatalError(fmt.Errorf("failed to inline MapDataSlab %s: it is inlined already", m.header.slabID)) + } + + id := m.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size from not inlined to inlined + m.header.size = inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Update data slab inlined status. + m.inlined = true + + return nil +} + +// uninline converts an inlined MapDataSlab to uninlined MapDataSlab and stores it in storage. +func (m *MapDataSlab) Uninline(storage SlabStorage) error { + if !m.inlined { + return NewFatalError(fmt.Errorf("failed to uninline MapDataSlab %s: it is not inlined", m.header.slabID)) + } -func (m *MapDataSlab) ChildStorables() []Storable { - return elementsStorables(m.elements, nil) -} + // Update data slab size from inlined to not inlined. + m.header.size = mapRootDataSlabPrefixSize + m.elements.Size() -func (m *MapDataSlab) getPrefixSize() uint32 { - if m.extraData != nil { - return mapRootDataSlabPrefixSize + // Update data slab inlined status. + m.inlined = false + + // Store slab in storage + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) } - return mapDataSlabPrefixSize + + return nil } func elementsStorables(elems elements, childStorables []Storable) []Storable { @@ -2426,12 +3165,22 @@ func (m *MapDataSlab) StoredValue(storage SlabStorage) (Value, error) { }, nil } -func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { - existingValue, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, err + return nil, nil, err } // Adjust header's first key @@ -2441,13 +3190,15 @@ func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Diges m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -2465,10 +3216,12 @@ func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } return k, v, nil @@ -3027,6 +3780,22 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { return nil } +func (m *MapMetaDataSlab) Inlined() bool { + return false +} + +func (m *MapMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (m *MapMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline MapMetaDataSlab %s: MapMetaDataSlab can't be inlined", m.header.slabID)) +} + +func (m *MapMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline MapMetaDataSlab %s: MapMetaDataSlab is already unlined", m.header.slabID)) +} + func (m *MapMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { if m.extraData == nil { return nil, NewNotValueError(m.SlabID()) @@ -3053,7 +3822,7 @@ func (m *MapMetaDataSlab) ChildStorables() []Storable { return childIDs } -func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { ans := -1 i, j := 0, len(m.childrenHeaders) @@ -3068,7 +3837,7 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint } if ans == -1 { - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } childHeaderIndex := ans @@ -3078,14 +3847,24 @@ func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint child, err := getMapSlab(storage, childID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). return child.Get(storage, digester, level, hkey, comparator, key) } -func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapMetaDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { ans := 0 i, j := 0, len(m.childrenHeaders) @@ -3106,13 +3885,13 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D child, err := getMapSlab(storage, childID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } - existingValue, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, err + return nil, nil, err } m.childrenHeaders[childHeaderIndex] = child.Header() @@ -3126,26 +3905,26 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D err := m.SplitChildSlab(storage, child, childHeaderIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.SplitChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } if underflowSize, underflow := child.IsUnderflow(); underflow { err := m.MergeOrRebalanceChildSlab(storage, child, childHeaderIndex, underflowSize) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.MergeOrRebalanceChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } err = storage.Store(m.header.slabID, m) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -3888,8 +4667,142 @@ func NewMapWithRootID(storage SlabStorage, rootID SlabID, digestBuilder Digester }, nil } +func (m *OrderedMap) Inlined() bool { + return m.root.Inlined() +} + +func (m *OrderedMap) Inlinable(maxInlineSize uint64) bool { + return m.root.Inlinable(maxInlineSize) +} + +func (m *OrderedMap) setParentUpdater(f parentUpdater) { + m.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value (child) +// so parent map (m) can be notified when child value is modified. +func (m *OrderedMap) setCallbackWithChild( + comparator ValueComparator, + hip HashInputProvider, + key Value, + child Value, + maxInlineSize uint64, +) { + c, ok := child.(mutableValueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + c.setParentUpdater(func() (found bool, err error) { + + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return true, nil + } + + // Retrieve element value under the same key and + // verify retrieved value is this child (c). + _, valueStorable, err := m.get(comparator, hip, key) + if err != nil { + var knf *KeyNotFoundError + if errors.As(err, &knf) { + return false, nil + } + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Get(). + return false, err + } + + // Verify retrieved element value is either SlabIDStorable or Slab, with identical value ID. + switch valueStorable := valueStorable.(type) { + case SlabIDStorable: + sid := SlabID(valueStorable) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := valueStorable.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil + } + + // Set child value with parent map using same key. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := m.set(comparator, hip, key, c) + if err != nil { + return false, err + } + + // Verify overwritten storable has identical value ID. + + switch existingValueStorable := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(existingValueStorable) + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := existingValueStorable.SlabID() + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + + default: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) + } + + return true, nil + }) +} + +// notifyParentIfNeeded calls parent updater if this map (m) is a child +// element in another container. +func (m *OrderedMap) notifyParentIfNeeded() error { + if m.parentUpdater == nil { + return nil + } + + // If parentUpdater() doesn't find child map (m), then no-op on parent container + // and unset parentUpdater callback in child map. This can happen when child + // map is an outdated reference (removed or overwritten in parent container). + found, err := m.parentUpdater() + if err != nil { + return err + } + if !found { + m.parentUpdater = nil + } + return nil +} + func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { - _, err := m.get(comparator, hip, key) + _, _, err := m.get(comparator, hip, key) if err != nil { var knf *KeyNotFoundError if errors.As(err, &knf) { @@ -3903,26 +4816,32 @@ func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key Value) (Value, error) { - storable, err := m.get(comparator, hip, key) + keyStorable, valueStorable, err := m.get(comparator, hip, key) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). return nil, err } - v, err := storable.StoredValue(m.Storage) + v, err := valueStorable.StoredValue(m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + // As a parent, this map (m) sets up notification callback with child + // value (v) so this map can be notified when child value is modified. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) + return v, nil } -func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, error) { +func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") } defer putDigester(keyDigest) @@ -3931,7 +4850,7 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key hkey, err := keyDigest.Digest(level) if err != nil { // Wrap err as external error (if needed) because err is returned by Digesert interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). @@ -3939,6 +4858,34 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key } func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { + storable, err := m.set(comparator, hip, key, value) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case ArraySlab: // inlined array slab + err = s.Uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + case MapSlab: // inlined map slab + err = s.Uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + } + + return storable, nil +} + +func (m *OrderedMap) set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -3955,13 +4902,13 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } - existingValue, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, err } - if existingValue == nil { + if existingMapValueStorable == nil { m.root.ExtraData().incrementCount() } @@ -3974,7 +4921,6 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, err } - return existingValue, nil } } @@ -3986,10 +4932,63 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } } - return existingValue, nil + // This map (m) is a parent to the new child (value), and this map + // can also be a child in another container. + // + // As a parent, this map needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this map is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by setting new child. + err = m.notifyParentIfNeeded() + if err != nil { + return nil, err + } + + // As a parent, this map sets up notification callback with child value + // so this map can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this map notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, value, maxInlineSize) + + return existingMapValueStorable, nil } func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { + keyStorable, valueStorable, err := m.remove(comparator, hip, key) + if err != nil { + return nil, nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := valueStorable.(type) { + case ArraySlab: + err = s.Uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.SlabID()) + + case MapSlab: + err = s.Uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.SlabID()) + } + + return keyStorable, valueStorable, nil +} + +func (m *OrderedMap) remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -4023,7 +5022,6 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, nil, err } - return k, v, nil } } @@ -4035,6 +5033,13 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k } } + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by removing element. + err = m.notifyParentIfNeeded() + if err != nil { + return nil, nil, err + } + return k, v, nil } @@ -4142,25 +5147,61 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { } func (m *OrderedMap) SlabID() SlabID { + if m.root.Inlined() { + return SlabIDUndefined + } return m.root.SlabID() } func (m *OrderedMap) ValueID() ValueID { - sid := m.SlabID() + return slabIDToValueID(m.root.SlabID()) +} - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) +// Storable returns OrderedMap m as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { - return id -} + inlined := m.root.Inlined() + inlinable := m.root.Inlinable(maxInlineSize) -func (m *OrderedMap) StoredValue(_ SlabStorage) (Value, error) { - return m, nil -} + switch { + + case inlinable && inlined: + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return m.root, nil + + case !inlinable && !inlined: + // Root slab is not inlinable and was not inlined. + // Return root slab as storable, no size adjustment and change to storage. + return SlabIDStorable(m.SlabID()), nil + + case inlinable && !inlined: + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + err := m.root.Inline(m.Storage) + if err != nil { + return nil, err + } + + return m.root, nil + + case !inlinable && inlined: + // Root slab is NOT inlinable and was inlined. + + // Uninline root slab. + err := m.root.Uninline(m.Storage) + if err != nil { + return nil, err + } -func (m *OrderedMap) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(m.SlabID()), nil + return SlabIDStorable(m.SlabID()), nil + + default: + panic("not reachable") + } } func (m *OrderedMap) Count() uint64 { @@ -4179,7 +5220,7 @@ func (m *OrderedMap) Type() TypeInfo { } func (m *OrderedMap) String() string { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { return err.Error() } @@ -4238,19 +5279,19 @@ func (m *MapExtraData) decrementCount() { m.Count-- } -type MapElementIterator struct { +type mapElementIterator struct { storage SlabStorage elements elements index int - nestedIterator *MapElementIterator + nestedIterator *mapElementIterator } -func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { +func (i *mapElementIterator) next() (key MapKey, value MapValue, err error) { if i.nestedIterator != nil { - key, value, err = i.nestedIterator.Next() + key, value, err = i.nestedIterator.next() if err != nil { - // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). + // Don't need to wrap error as external error because err is already categorized by mapElementIterator.next(). return nil, nil, err } if key != nil { @@ -4281,14 +5322,14 @@ func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { return nil, nil, err } - i.nestedIterator = &MapElementIterator{ + i.nestedIterator = &mapElementIterator{ storage: i.storage, elements: elems, } i.index++ // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). - return i.nestedIterator.Next() + return i.nestedIterator.next() default: return nil, nil, NewSlabDataError(fmt.Errorf("unexpected element type %T during map iteration", e)) @@ -4299,9 +5340,11 @@ type MapEntryIterationFunc func(Value, Value) (resume bool, err error) type MapElementIterationFunc func(Value) (resume bool, err error) type MapIterator struct { - storage SlabStorage + m *OrderedMap + comparator ValueComparator + hip HashInputProvider id SlabID - elemIterator *MapElementIterator + elemIterator *mapElementIterator } func (i *MapIterator) Next() (key Value, value Value, err error) { @@ -4318,24 +5361,29 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { } var ks, vs Storable - ks, vs, err = i.elemIterator.Next() + ks, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") } - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + if i.CanMutate() { + maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) + i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) + } + return key, value, nil } @@ -4359,13 +5407,13 @@ func (i *MapIterator) NextKey() (key Value, err error) { } var ks Storable - ks, _, err = i.elemIterator.Next() + ks, _, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") @@ -4393,19 +5441,30 @@ func (i *MapIterator) NextValue() (value Value, err error) { } } - var vs Storable - _, vs, err = i.elemIterator.Next() + var ks, vs Storable + ks, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err } if vs != nil { - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + if i.CanMutate() { + key, err := ks.StoredValue(i.m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") + } + + maxInlineSize := maxInlineMapValueSize(uint64(ks.ByteSize())) + i.m.setCallbackWithChild(i.comparator, i.hip, key, value, maxInlineSize) + } + return value, nil } @@ -4416,7 +5475,7 @@ func (i *MapIterator) NextValue() (value Value, err error) { } func (i *MapIterator) advance() error { - slab, found, err := i.storage.Retrieve(i.id) + slab, found, err := i.m.Storage.Retrieve(i.id) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) @@ -4432,15 +5491,15 @@ func (i *MapIterator) advance() error { i.id = dataSlab.next - i.elemIterator = &MapElementIterator{ - storage: i.storage, + i.elemIterator = &mapElementIterator{ + storage: i.m.Storage, elements: dataSlab.elements, } return nil } -func (m *OrderedMap) Iterator() (*MapIterator, error) { +func (m *OrderedMap) iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { slab, err := firstMapDataSlab(m.Storage, m.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). @@ -4450,23 +5509,41 @@ func (m *OrderedMap) Iterator() (*MapIterator, error) { dataSlab := slab.(*MapDataSlab) return &MapIterator{ - storage: m.Storage, - id: dataSlab.next, - elemIterator: &MapElementIterator{ + m: m, + comparator: comparator, + hip: hip, + id: dataSlab.next, + elemIterator: &mapElementIterator{ storage: m.Storage, elements: dataSlab.elements, }, }, nil } -func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { +func (i *MapIterator) CanMutate() bool { + return i.comparator != nil && i.hip != nil +} - iterator, err := m.Iterator() +func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (*MapIterator, error) { + iterator, err := m.iterator(comparator, hip) if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err + return nil, err } + if !iterator.CanMutate() { + return nil, NewUserError(fmt.Errorf("failed to create MapIterator: ValueComparator or HashInputProvider is nil")) + } + return iterator, nil +} +// ReadOnlyIterator returns readonly iterator for map elements. +// If elements of child containers are mutated, those changes +// are not guaranteed to persist. +func (m *OrderedMap) ReadOnlyIterator() (*MapIterator, error) { + return m.iterator(nil, nil) +} + +func iterateMap(iterator *MapIterator, fn MapEntryIterationFunc) error { + var err error var key, value Value for { key, value, err = iterator.Next() @@ -4488,9 +5565,27 @@ func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { } } -func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { +func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, fn MapEntryIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMap(iterator, fn) +} + +func (m *OrderedMap) IterateReadOnly(fn MapEntryIterationFunc) error { + iterator, err := m.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMap(iterator, fn) +} + +func (m *OrderedMap) IterateReadOnlyKeys(fn MapElementIterationFunc) error { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err @@ -4517,14 +5612,8 @@ func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { } } -func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { - - iterator, err := m.Iterator() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err - } - +func iterateMapValues(iterator *MapIterator, fn MapElementIterationFunc) error { + var err error var value Value for { value, err = iterator.NextValue() @@ -4546,6 +5635,24 @@ func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { } } +func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMapValues(iterator, fn) +} + +func (m *OrderedMap) IterateReadOnlyValues(fn MapElementIterationFunc) error { + iterator, err := m.ReadOnlyIterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMapValues(iterator, fn) +} + type MapPopIterationFunc func(Storable, Storable) // PopIterate iterates and removes elements backward. @@ -4564,22 +5671,33 @@ func (m *OrderedMap) PopIterate(fn MapPopIterationFunc) error { extraData := m.root.ExtraData() extraData.Count = 0 + inlined := m.root.Inlined() + + prefixSize := uint32(mapRootDataSlabPrefixSize) + if inlined { + prefixSize = uint32(inlinedMapDataSlabPrefixSize) + } + // Set root to empty data slab m.root = &MapDataSlab{ header: MapSlabHeader{ slabID: rootID, - size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + size: prefixSize + hkeyElementsPrefixSize, }, elements: newHkeyElements(0), extraData: extraData, + inlined: inlined, } - // Save root slab - err = m.Storage.Store(m.root.SlabID(), m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + if !m.Inlined() { + // Save root slab + err = m.Storage.Store(m.root.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + } } + return nil } @@ -4672,12 +5790,12 @@ func NewMapFromBatchData( prevElem := elements.elems[lastElementIndex] prevElemSize := prevElem.Size() - elem, existingValue, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) + elem, _, existingMapValueStorable, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). return nil, err } - if existingValue != nil { + if existingMapValueStorable != nil { return nil, NewDuplicateKeyError(key) } @@ -5132,8 +6250,8 @@ func (i *MapLoadedValueIterator) Next() (Value, Value, error) { return nil, nil, nil } -// LoadedValueIterator returns iterator to iterate loaded map elements. -func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded map elements. +func (m *OrderedMap) ReadOnlyLoadedValueIterator() (*MapLoadedValueIterator, error) { switch slab := m.root.(type) { case *MapDataSlab: @@ -5171,9 +6289,9 @@ func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded map values. -func (m *OrderedMap) IterateLoadedValues(fn MapEntryIterationFunc) error { - iterator, err := m.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded map values. +func (m *OrderedMap) IterateReadOnlyLoadedValues(fn MapEntryIterationFunc) error { + iterator, err := m.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.LoadedValueIterator(). return err diff --git a/map_debug.go b/map_debug.go index 051b7acb..3b444350 100644 --- a/map_debug.go +++ b/map_debug.go @@ -66,53 +66,58 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { return MapStats{}, err } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: dataSlabCount++ - leaf := slab.(*MapDataSlab) - elementGroups := []elements{leaf.elements} + elementGroups := []elements{slab.elements} for len(elementGroups) > 0 { var nestedElementGroups []elements - for i := 0; i < len(elementGroups); i++ { - - elems := elementGroups[i] - - for j := 0; j < int(elems.Count()); j++ { - elem, err := elems.Element(j) + for _, group := range elementGroups { + for i := 0; i < int(group.Count()); i++ { + elem, err := group.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return MapStats{}, err } - if group, ok := elem.(elementGroup); ok { - if !group.Inline() { + switch e := elem.(type) { + case elementGroup: + nestedGroup := e + + if !nestedGroup.Inline() { collisionDataSlabCount++ } - nested, err := group.Elements(m.Storage) + nested, err := nestedGroup.Elements(m.Storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return MapStats{}, err } + nestedElementGroups = append(nestedElementGroups, nested) - } else { - e := elem.(*singleElement) + case *singleElement: if _, ok := e.key.(SlabIDStorable); ok { storableDataSlabCount++ } if _, ok := e.value.(SlabIDStorable); ok { storableDataSlabCount++ } + // This handles use case of inlined array or map value containing SlabID + ids := getSlabIDFromStorable(e.value, nil) + storableDataSlabCount += uint64(len(ids)) } } } + elementGroups = nestedElementGroups } - } else { + + case *MapMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -170,12 +175,12 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*MapDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *MapDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - for i := 0; i < int(dataSlab.elements.Count()); i++ { - elem, err := dataSlab.elements.Element(i) + for i := 0; i < int(slab.elements.Count()); i++ { + elem, err := slab.elements.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return nil, err @@ -188,16 +193,10 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } } - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*MapMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *MapMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -247,8 +246,30 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return dumps, nil } -func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyMap(m, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { + + // Verify map address (independent of array inlined status) + if address != m.Address() { + return NewFatalError(fmt.Errorf("map address %v, got %v", address, m.Address())) + } + + // Verify map value ID (independent of array inlined status) + err := verifyMapValueID(m) + if err != nil { + return err + } + // Verify map slab ID (dependent of array inlined status) + err = verifyMapSlabID(m) + if err != nil { + return err + } + + // Verify map extra data extraData := m.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", m.root.SlabID())) @@ -270,10 +291,19 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash return NewFatalError(fmt.Errorf("root slab %d seed is uninitialized", m.root.SlabID())) } - computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - m.Storage, m.digesterBuilder, tic, hip, m.root.SlabID(), 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + v := &mapVerifier{ + storage: m.Storage, + address: address, + digesterBuilder: m.digesterBuilder, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifySlab( + m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -315,17 +345,23 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash return nil } -func validMapSlab( - storage SlabStorage, - digesterBuilder DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, - id SlabID, +type mapVerifier struct { + storage SlabStorage + address Address + digesterBuilder DigesterBuilder + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +func (v *mapVerifier) verifySlab( + slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -334,10 +370,30 @@ func validMapSlab( err error, ) { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return 0, nil, nil, nil, err + id := slab.Header().slabID + + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + + // Verify slab address (independent of map inlined status) + if v.address != id.address { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("map slab address %v, got %v", v.address, id.address)) + } + + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } } if level > 0 { @@ -367,89 +423,147 @@ func validMapSlab( } } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) - } + case *MapMetaDataSlab: + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) - // Verify data slab's elements - elementCount, elementSize, err := validMapElements(storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElements(). - return 0, nil, nil, nil, err - } + default: + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } +} - // Verify slab's first key - if dataSlab.elements.firstKey() != dataSlab.header.firstKey { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header first key %d is wrong, want %d", - id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) - } +func (v *mapVerifier) verifyDataSlab( + dataSlab *MapDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := dataSlab.header.slabID - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(mapDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(mapRootDataSlabPrefixSize) - } - computedSize += elementSize + if !dataSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapDataSlab %s is not data", id)) + } - if computedSize != dataSlab.header.size { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) - } + // Verify data slab's elements + elementCount, elementSize, err := v.verifyElements(id, dataSlab.elements, 0, nil, slabIDs) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyElements(). + return 0, nil, nil, nil, err + } - // Verify any size flag - if dataSlab.anySize { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d anySize %t is wrong, want false", - id, dataSlab.anySize)) - } + // Verify slab's first key + if dataSlab.elements.firstKey() != dataSlab.header.firstKey { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header first key %d is wrong, want %d", + id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) + } - // Verify collision group flag - if dataSlab.collisionGroup { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", - id, dataSlab.collisionGroup)) + // Verify that only root slab can be inlined + if level > 0 && dataSlab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(mapDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(mapRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedMapDataSlabPrefixSize) } + } + computedSize += elementSize - dataSlabIDs = append(dataSlabIDs, id) + if computedSize != dataSlab.header.size { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) - } + // Verify any size flag + if dataSlab.anySize { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d anySize %t is wrong, want false", + id, dataSlab.anySize)) + } + + // Verify collision group flag + if dataSlab.collisionGroup { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", + id, dataSlab.collisionGroup)) + } - firstKeys = append(firstKeys, dataSlab.header.firstKey) + dataSlabIDs = append(dataSlabIDs, id) - return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) } - meta, ok := slab.(*MapMetaDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) + firstKeys = append(firstKeys, dataSlab.header.firstKey) + + return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil +} + +func (v *mapVerifier) verifyMetaDataSlab( + metaSlab *MapMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := metaSlab.header.slabID + + if metaSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s is data", id)) + } + + if metaSlab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s can't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } elementCount = 0 - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] + + childSlab, err := getMapSlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return 0, nil, nil, nil, err + } // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(storage, digesterBuilder, tic, hip, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, nil, err } @@ -457,53 +571,50 @@ func validMapSlab( } // Verify slab header first key - if meta.childrenHeaders[0].firstKey != meta.header.firstKey { + if metaSlab.childrenHeaders[0].firstKey != metaSlab.header.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header first key %d is wrong, want %d", - id, meta.header.firstKey, meta.childrenHeaders[0].firstKey)) + id, metaSlab.header.firstKey, metaSlab.childrenHeaders[0].firstKey)) } // Verify that child slab's first keys are sorted. - sortedHKey := sort.SliceIsSorted(meta.childrenHeaders, func(i, j int) bool { - return meta.childrenHeaders[i].firstKey < meta.childrenHeaders[j].firstKey + sortedHKey := sort.SliceIsSorted(metaSlab.childrenHeaders, func(i, j int) bool { + return metaSlab.childrenHeaders[i].firstKey < metaSlab.childrenHeaders[j].firstKey }) if !sortedHKey { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, meta.childrenHeaders)) + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, metaSlab.childrenHeaders)) } // Verify that child slab's first keys are unique. - if len(meta.childrenHeaders) > 1 { - prev := meta.childrenHeaders[0].firstKey - for _, h := range meta.childrenHeaders[1:] { + if len(metaSlab.childrenHeaders) > 1 { + prev := metaSlab.childrenHeaders[0].firstKey + for _, h := range metaSlab.childrenHeaders[1:] { if prev == h.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d child header first key isn't unique %v", - id, meta.childrenHeaders)) + id, metaSlab.childrenHeaders)) } prev = h.firstKey } } // Verify slab header's size - computedSize := uint32(len(meta.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func validMapElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyElements( id SlabID, elements elements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -512,23 +623,20 @@ func validMapElements( switch elems := elements.(type) { case *hkeyElements: - return validMapHkeyElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return v.verifyHkeyElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) case *singleElements: - return validMapSingleElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return v.verifySingleElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } } -func validMapHkeyElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyHkeyElements( id SlabID, elements *hkeyElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -572,6 +680,10 @@ func validMapHkeyElements( for i := 0; i < len(elements.elems); i++ { e := elements.elems[i] + hkeys := make([]Digest, len(hkeyPrefixes)+1) + copy(hkeys, hkeyPrefixes) + hkeys[len(hkeys)-1] = elements.hkeys[i] + elementSize += digestSize // Verify element size is <= inline size @@ -583,21 +695,17 @@ func validMapHkeyElements( } } - if group, ok := e.(elementGroup); ok { - - ge, err := group.Elements(storage) + switch e := e.(type) { + case elementGroup: + group, err := e.Elements(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return 0, 0, err } - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - - count, size, err := validMapElements(storage, db, tic, hip, id, ge, digestLevel+1, hkeys) + count, size, err := v.verifyElements(id, group, digestLevel+1, hkeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElement(). + // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, 0, err } @@ -616,21 +724,11 @@ func validMapHkeyElements( elementCount += count - } else { - - se, ok := e.(*singleElement) - if !ok { - return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want *singleElement", id, e)) - } - - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - + case *singleElement: // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, se, hkeys) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -644,6 +742,9 @@ func validMapHkeyElements( elementSize += computedSize elementCount++ + + default: + return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want either elementGroup or *singleElement", id, e)) } } @@ -655,15 +756,12 @@ func validMapHkeyElements( return elementCount, elementSize, nil } -func validMapSingleElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifySingleElements( id SlabID, elements *singleElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -682,9 +780,9 @@ func validMapSingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, e, hkeyPrefixes) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -713,42 +811,86 @@ func validMapSingleElements( return uint64(len(elements.elems)), elementSize, nil } -func validSingleElement( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifySingleElement( e *singleElement, digests []Digest, + slabIDs map[SlabID]struct{}, ) ( size uint32, digestMaxLevel uint, err error, ) { + // Verify key storable's size is less than size limit + if e.key.ByteSize() > uint32(maxInlineMapKeySize) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element key %s size %d exceeds size limit %d", + e.key, e.key.ByteSize(), maxInlineMapKeySize, + )) + } + + // Verify value storable's size is less than size limit + valueSizeLimit := maxInlineMapValueSize(uint64(e.key.ByteSize())) + if e.value.ByteSize() > uint32(valueSizeLimit) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element value %s size %d exceeds size limit %d", + e.value, e.value.ByteSize(), valueSizeLimit, + )) + } // Verify key - kv, err := e.key.StoredValue(storage) + kv, err := e.key.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, nil, tic, hip) + switch e.key.(type) { + case *ArrayDataSlab, *MapDataSlab: + // Verify key can't be inlined array or map + return 0, 0, NewFatalError(fmt.Errorf("element %s key shouldn't be inlined array or map", e)) + } + + err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) } // Verify value - vv, err := e.value.StoredValue(storage) + vv, err := e.value.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, nil, tic, hip) + switch e := e.value.(type) { + case SlabIDStorable: + // Verify not-inlined value > inline size, or can't be inlined + if v.inlineEnabled { + err = verifyNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) + if err != nil { + return 0, 0, err + } + } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } + + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } + } + + err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } @@ -759,7 +901,7 @@ func validSingleElement( } // Verify digest - digest, err := db.Digest(hip, kv) + digest, err := v.digesterBuilder.Digest(v.hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create digester") @@ -778,21 +920,21 @@ func validSingleElement( return computedSize, digest.Levels(), nil } -func ValidValue(value Value, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { switch v := value.(type) { case *Array: - return ValidArray(v, typeInfo, tic, hip) + return verifyArray(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) case *OrderedMap: - return ValidMap(v, typeInfo, tic, hip) + return verifyMap(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) } return nil } -// ValidMapSerialization traverses ordered map tree and verifies serialization +// VerifyMapSerialization traverses ordered map tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidMapSerialization( +func VerifyMapSerialization( m *OrderedMap, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -800,151 +942,128 @@ func ValidMapSerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validMapSlabSerialization( - m.Storage, - m.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + v := &serializationVerifier{ + storage: m.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyMapSlab(m.root) } -func validMapSlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return err - } + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := Encode(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined compact map because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // compact map extra data section for reuse, and only compact map field + // values are encoded in non-extra data section. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError( - fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) - } - - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError( - fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError( + fmt.Errorf("slab %d encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) } + } + switch slab := slab.(type) { + case *MapDataSlab: decodedDataSlab, ok := decodedSlab.(*MapDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not MapDataSlab", id)) } // Compare slabs - err = mapDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) - } + case *MapMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) - } + // Compare slabs + err = v.mapMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = mapMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + slab, err := getMapSlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validMapSlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). - return err + // Verify child slabs + err = v.verifyMapSlab(slab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } } -func mapDataSlabEqual( - expected *MapDataSlab, - actual *MapDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) error { // Compare extra data err := mapExtraDataEqual(expected.extraData, actual.extraData) @@ -953,6 +1072,11 @@ func mapDataSlabEqual( return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -974,16 +1098,7 @@ func mapDataSlabEqual( } // Compare elements - err = mapElementsEqual( - expected.elements, - actual.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapElementsEqual(expected.elements, actual.elements) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementsEqual(). return err @@ -992,16 +1107,7 @@ func mapDataSlabEqual( return nil } -func mapElementsEqual( - expected elements, - actual elements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementsEqual(expected, actual elements) error { switch expectedElems := expected.(type) { case *hkeyElements: @@ -1009,48 +1115,21 @@ func mapElementsEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapHkeyElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapHkeyElementsEqual(expectedElems, actualElems) case *singleElements: actualElems, ok := actual.(*singleElements) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementsEqual(expectedElems, actualElems) } return nil } -func mapHkeyElementsEqual( - expected *hkeyElements, - actual *hkeyElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyElements) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("hkeyElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1078,16 +1157,7 @@ func mapHkeyElementsEqual( expectedEle := expected.elems[i] actualEle := actual.elems[i] - err := mapElementEqual( - expectedEle, - actualEle, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err := v.mapElementEqual(expectedEle, actualEle) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). return err @@ -1097,16 +1167,7 @@ func mapHkeyElementsEqual( return nil } -func mapSingleElementsEqual( - expected *singleElements, - actual *singleElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementsEqual(expected, actual *singleElements) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("singleElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1124,16 +1185,7 @@ func mapSingleElementsEqual( expectedElem := expected.elems[i] actualElem := actual.elems[i] - err := mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err := v.mapSingleElementEqual(expectedElem, actualElem) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapSingleElementEqual(). return err @@ -1143,16 +1195,7 @@ func mapSingleElementsEqual( return nil } -func mapElementEqual( - expected element, - actual element, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementEqual(expected, actual element) error { switch expectedElem := expected.(type) { case *singleElement: @@ -1160,64 +1203,27 @@ func mapElementEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementEqual(expectedElem, actualElem) case *inlineCollisionGroup: actualElem, ok := actual.(*inlineCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapElementsEqual( - expectedElem.elements, - actualElem.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapElementsEqual(expectedElem.elements, actualElem.elements) case *externalCollisionGroup: actualElem, ok := actual.(*externalCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapExternalCollisionElementsEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - + return v.mapExternalCollisionElementsEqual(expectedElem, actualElem) } return nil } -func mapExternalCollisionElementsEqual( - expected *externalCollisionGroup, - actual *externalCollisionGroup, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapExternalCollisionElementsEqual(expected, actual *externalCollisionGroup) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("externalCollisionGroup size %d is wrong, want %d", actual.size, expected.size)) @@ -1227,97 +1233,93 @@ func mapExternalCollisionElementsEqual( return NewFatalError(fmt.Errorf("externalCollisionGroup id %d is wrong, want %d", actual.slabID, expected.slabID)) } + slab, err := getMapSlab(v.storage, expected.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } + // Compare external collision slab - err := validMapSlabSerialization( - storage, - expected.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyMapSlab(slab) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). return err } return nil } -func mapSingleElementEqual( - expected *singleElement, - actual *singleElement, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleElement) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("singleElement size %d is wrong, want %d", actual.size, expected.size)) } - if !compare(expected.key, actual.key) { + if !v.compare(expected.key, actual.key) { return NewFatalError(fmt.Errorf("singleElement key %v is wrong, want %v", actual.key, expected.key)) } // Compare key stored in a separate slab if idStorable, ok := expected.key.(SlabIDStorable); ok { - v, err := idStorable.StoredValue(storage) + value, err := idStorable.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return err } } - if !compare(expected.value, actual.value) { - return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) - } - - // Compare value stored in a separate slab - if idStorable, ok := expected.value.(SlabIDStorable); ok { + // Compare nested element + switch ee := expected.value.(type) { + case SlabIDStorable: // Compare not-inlined element + if !v.compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } - v, err := idStorable.StoredValue(storage) + value, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyVaue(). return err } + + case *ArrayDataSlab: // Compare inlined array element + ae, ok := actual.value.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return v.arrayDataSlabEqual(ee, ae) + + case *MapDataSlab: // Compare inlined map element + ae, ok := actual.value.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return v.mapDataSlabEqual(ee, ae) + + default: + if !v.compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } } return nil } -func mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { +func (v *serializationVerifier) mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { // Compare extra data err := mapExtraDataEqual(expected.extraData, actual.extraData) @@ -1355,3 +1357,66 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return nil } + +// verifyMapValueID verifies map ValueID is always the same as +// root slab's SlabID indepedent of map's inlined status. +func verifyMapValueID(m *OrderedMap) error { + rootSlabID := m.root.Header().slabID + + vid := m.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) + } + + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// verifyMapSlabID verifies map SlabID is either empty for inlined map, or +// same as root slab's SlabID for not-inlined map. +func verifyMapSlabID(m *OrderedMap) error { + sid := m.SlabID() + + if m.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := m.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) + } + + return nil +} diff --git a/map_test.go b/map_test.go index 6ccf380b..30d25ccb 100644 --- a/map_test.go +++ b/map_test.go @@ -89,19 +89,27 @@ func (h *errorDigesterBuilder) Digest(_ HashInputProvider, _ Value) (Digester, e return nil, h.err } -func verifyEmptyMap( +func testEmptyMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, m *OrderedMap, ) { - verifyMap(t, storage, typeInfo, address, m, nil, nil, false) + testMapV0(t, storage, typeInfo, address, m, nil, nil, false) } -// verifyMap verifies map elements and validates serialization and in-memory slab tree. -// It also verifies elements ordering if sortedKeys is not nil. -func verifyMap( +func testEmptyMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, +) { + testMap(t, storage, typeInfo, address, m, nil, nil, false) +} + +func testMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -110,49 +118,78 @@ func verifyMap( keyValues map[Value]Value, sortedKeys []Value, hasNestedArrayMapElement bool, +) { + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, false) +} + +func testMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + keyValues mapValue, + sortedKeys []Value, + hasNestedArrayMapElement bool, +) { + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, true) +} + +// _testMap verifies map elements and validates serialization and in-memory slab tree. +// It also verifies elements ordering if sortedKeys is not nil. +func _testMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + expectedKeyValues map[Value]Value, + sortedKeys []Value, + hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, m.Type())) require.Equal(t, address, m.Address()) - require.Equal(t, uint64(len(keyValues)), m.Count()) + require.Equal(t, uint64(len(expectedKeyValues)), m.Count()) var err error // Verify map elements - for k, v := range keyValues { - e, err := m.Get(compare, hashInputProvider, k) + for k, expected := range expectedKeyValues { + actual, err := m.Get(compare, hashInputProvider, k) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify map elements ordering if len(sortedKeys) > 0 { - require.Equal(t, len(keyValues), len(sortedKeys)) + require.Equal(t, len(expectedKeyValues), len(sortedKeys)) i := 0 - err = m.Iterate(func(k, v Value) (bool, error) { + err = m.IterateReadOnly(func(k, v Value) (bool, error) { expectedKey := sortedKeys[i] - expectedValue := keyValues[expectedKey] + expectedValue := expectedKeyValues[expectedKey] - valueEqual(t, typeInfoComparator, expectedKey, k) - valueEqual(t, typeInfoComparator, expectedValue, v) + valueEqual(t, expectedKey, k) + valueEqual(t, expectedValue, v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(keyValues), i) + require.Equal(t, len(expectedKeyValues), i) } // Verify in-memory slabs - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintMap(m) } require.NoError(t, err) // Verify slab serializations - err = ValidMapSerialization( + err = VerifyMapSerialization( m, storage.cborDecMode, storage.cborEncMode, @@ -187,7 +224,7 @@ func verifyMap( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(keyValues) == 0 { + if len(expectedKeyValues) == 0 { // Verify slab count for empty map require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -274,7 +311,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys", func(t *testing.T) { @@ -321,12 +358,12 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("random key and value", func(t *testing.T) { @@ -361,7 +398,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("unique keys with hash collision", func(t *testing.T) { @@ -410,7 +447,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys with hash collision", func(t *testing.T) { @@ -469,12 +506,12 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -511,7 +548,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision", func(t *testing.T) { @@ -554,7 +591,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision group", func(t *testing.T) { @@ -597,7 +634,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -682,11 +719,11 @@ func testMapRemoveElement(t *testing.T, m *OrderedMap, k Value, expectedV Value) removedKey, err := removedKeyStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, expectedV, removedValue) + valueEqual(t, expectedV, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = m.Storage.Remove(SlabID(id)) @@ -763,7 +800,7 @@ func TestMapRemove(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) count := len(tc.keyValues) @@ -779,7 +816,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) } @@ -864,7 +901,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -878,7 +915,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("collision with data root", func(t *testing.T) { @@ -947,7 +984,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -961,7 +998,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("no collision key not found", func(t *testing.T) { @@ -997,7 +1034,7 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision key not found", func(t *testing.T) { @@ -1041,12 +1078,54 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } func TestMapIterate(t *testing.T) { + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // Iterate key value pairs + i := 0 + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate keys + i = 0 + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate values + i = 0 + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + t.Run("no collision", func(t *testing.T) { const ( mapSize = 2048 @@ -1086,9 +1165,9 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i = uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1098,8 +1177,8 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) @@ -1109,9 +1188,9 @@ func TestMapIterate(t *testing.T) { // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { k := sortedKeys[i] - valueEqual(t, typeInfoComparator, keyValues[k], v) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1119,7 +1198,7 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(mapSize), i) - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) t.Run("collision", func(t *testing.T) { @@ -1163,51 +1242,167 @@ func TestMapIterate(t *testing.T) { } } - t.Log("created map of unique key value pairs") - // Sort keys by digest sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - t.Log("sorted keys by digests") - // Iterate key value pairs i := uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated key value pairs") - // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated keys") - // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], v) + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { + valueEqual(t, keyValues[sortedKeys[i]], v) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated values") + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutation", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + elementSize := digestSize + singleElementPrefixSize + Uint64Value(0).ByteSize() + NewStringValue(randStr(r, valueStringSize)).ByteSize() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, 0, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys = append(sortedKeys, k) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + sizeBeforeMutation := m.root.Header().size + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + i++ + + require.Equal(t, m.root.Header().size, sizeBeforeMutation+uint32(i)*elementSize) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + sizeAfterInsertionMutation := m.root.Header().size + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + i++ + + require.Equal(t, m.root.Header().size, sizeAfterInsertionMutation-uint32(i)*elementSize) + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + for k := range keyValues { + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + delete(expectedChildMapValues, Uint64Value(0)) + } - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -1273,7 +1468,7 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) stats, err := GetMapStats(m) require.NoError(t, err) @@ -1286,11 +1481,11 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + valueEqual(t, v, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1303,7 +1498,7 @@ func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLeve } } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) } func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { @@ -1345,7 +1540,7 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) // Remove all elements for k, v := range keyValues { @@ -1354,11 +1549,11 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + valueEqual(t, v, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1371,7 +1566,7 @@ func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) } } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) } func TestMapHashCollision(t *testing.T) { @@ -1455,7 +1650,7 @@ func testMapSetRemoveRandomValues( existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldv, existingValue) + valueEqual(t, oldv, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = storage.Remove(SlabID(id)) @@ -1478,11 +1673,11 @@ func testMapSetRemoveRandomValues( removedKey, err := removedKeyStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[k], removedValue) + valueEqual(t, keyValues[k], removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err := storage.Remove(SlabID(id)) @@ -1520,7 +1715,7 @@ func TestMapSetRemoveRandomValues(t *testing.T) { m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) } func TestMapDecodeV0(t *testing.T) { @@ -1577,7 +1772,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, NewDefaultDigesterBuilder()) require.NoError(t, err) - verifyEmptyMap(t, storage, typeInfo, address, decodedMap) + testEmptyMapV0(t, storage, typeInfo, address, decodedMap) }) t.Run("dataslab as root", func(t *testing.T) { @@ -1650,7 +1845,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("has pointer no collision", func(t *testing.T) { @@ -1684,16 +1879,16 @@ func TestMapDecodeV0(t *testing.T) { id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} nestedSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} - nested, err := NewArray(storage, address, typeInfo2) - nested.root.SetSlabID(nestedSlabID) + childArray, err := NewArray(storage, address, typeInfo2) + childArray.root.SetSlabID(nestedSlabID) require.NoError(t, err) - err = nested.Append(Uint64Value(0)) + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + + keyValues[k] = arrayValue{Uint64Value(0)} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -1863,7 +2058,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 1 level", func(t *testing.T) { @@ -2037,7 +2232,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("inline collision 2 levels", func(t *testing.T) { @@ -2261,7 +2456,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) t.Run("external collision", func(t *testing.T) { @@ -2480,7 +2675,7 @@ func TestMapDecodeV0(t *testing.T) { decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) }) } @@ -2547,7 +2742,7 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, NewDefaultDigesterBuilder()) require.NoError(t, err) - verifyEmptyMap(t, storage2, typeInfo, address, decodedMap) + testEmptyMap(t, storage2, typeInfo, address, decodedMap) }) t.Run("dataslab as root", func(t *testing.T) { @@ -2636,10 +2831,10 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("has pointer no collision", func(t *testing.T) { + t.Run("has inlined array", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -2671,18 +2866,19 @@ func TestMapEncodeDecode(t *testing.T) { r++ } - // Create nested array + // Create child array typeInfo2 := testTypeInfo{43} - nested, err := NewArray(storage, address, typeInfo2) + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - err = nested.Append(Uint64Value(0)) + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + v := childArray + + keyValues[k] = arrayValue{Uint64Value(0)} digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) @@ -2697,7 +2893,6 @@ func TestMapEncodeDecode(t *testing.T) { id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -2731,7 +2926,7 @@ func TestMapEncodeDecode(t *testing.T) { // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x00, 0xf2, + 0x00, 0xf3, }, // data slab @@ -2786,9 +2981,16 @@ func TestMapEncodeDecode(t *testing.T) { // data slab id3: { // version - 0x10, - // flag: has pointer + map data - 0x48, + 0x11, + // flag: has inlined slab + map data + 0x08, + + // inlined slab extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, // the following encoded data is valid CBOR @@ -2827,23 +3029,7 @@ func TestMapEncodeDecode(t *testing.T) { // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] 0x82, 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - // array data slab - id4: { - // version - 0x10, - // flag: root + array data - 0x80, - // extra data (CBOR encoded array of 1 elements) - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, }, } @@ -2855,15 +3041,15 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], stored[id1]) require.Equal(t, expected[id2], stored[id2]) require.Equal(t, expected[id3], stored[id3]) - require.Equal(t, expected[id4], stored[id4]) // Verify slab size in header is correct. meta, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - // Need to add slabIDSize to encoded data slab here because empty slab ID is omitted during encoding. - require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + const inlinedExtraDataSize = 6 + require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -2872,61 +3058,99 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 1 level", func(t *testing.T) { - + t.Run("root data slab, inlined child map of same type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo := testTypeInfo{43} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 + const mapSize = 2 keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) - digests := []Digest{Digest(i % 4), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map metadata slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -2935,120 +3159,76 @@ func TestMapEncodeDecode(t *testing.T) { // level: 0 0x00, - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 2 elements) - 0x99, 0x00, 0x04, - - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // elements (array of 2 elements) // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3059,61 +3239,107 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 2 levels", func(t *testing.T) { - + t.Run("root data slab, inlined child map of different type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) - digests := []Digest{Digest(i % 4), Digest(i % 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo2 + } else { + ti = childMapTypeInfo1 + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" + // type info 0x18, 0x2a, - // count: 8 - 0x08, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3122,170 +3348,329 @@ func TestMapEncodeDecode(t *testing.T) { // level: 0 0x00, - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 4 elements) - 0x99, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level 1 - 0x01, - - // hkeys (byte string of length 8 * 1) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // elements (array of 1 elements) + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // inline collision group corresponding to hkey [0, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + }, + } - // level: 2 - 0x02, + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) - // hkeys (empty byte string) - 0x40, + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) - // level: 1 - 0x01, + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + t.Run("root data slab, multiple levels of inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - // elements (array of 1 elements) - 0x99, 0x00, 0x01, + childMapTypeInfo := testTypeInfo{43} - // inline collision group corresponding to hkey [1, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Create and populate map in memory + storage := newTestBasicStorage(t) - // level: 2 - 0x02, + digesterBuilder := &mockDigesterBuilder{} - // hkeys (empty byte string) - 0x40, + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) - // level: 1 - 0x01, + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) + require.NoError(t, err) + require.Nil(t, existingStorable) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - // elements (array of 1 element) - 0x99, 0x00, 0x01, + ck := Uint64Value(i) - // inline collision group corresponding to hkey [2, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 2 - 0x02, + k := NewStringValue(string(r)) + r++ - // hkeys (empty byte string) - 0x40, + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // level: 1 + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // elements (array of 1 element) - 0x99, 0x00, 0x01, + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, - // inline collision group corresponding to hkey [3, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x0, + // value: 0 + 0xd8, 0xa4, 0x0, - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3296,63 +3681,149 @@ func TestMapEncodeDecode(t *testing.T) { decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("external collision", func(t *testing.T) { - + t.Run("root data slab, multiple levels of inlined child map of different type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + gchildMapTypeInfo1 := testTypeInfo{45} + gchildMapTypeInfo2 := testTypeInfo{46} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 20 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var gti TypeInfo + if i%2 == 0 { + gti = gchildMapTypeInfo2 + } else { + gti = gchildMapTypeInfo1 + } - digests := []Digest{Digest(i % 2), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gti) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) + + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + var cti TypeInfo + if i%2 == 0 { + cti = childMapTypeInfo2 + } else { + cti = childMapTypeInfo1 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), cti) + require.NoError(t, err) + + ck := Uint64Value(i) + + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, - // flag: root + has pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) + // version 1, flag: has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x14, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 44 + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 46 + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 43 + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 45 + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3363,3511 +3834,9821 @@ func TestMapEncodeDecode(t *testing.T) { // hkeys (byte string of length 8 * 2) 0x59, 0x00, 0x10, - // hkey: 0 + // hkey: 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // external collision group corresponding to hkey 0 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined child map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // external collision group corresponding to hkey 1 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, }, + } - // external collision group - id2: { + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { // version 0x10, - // flag: any size + collision group - 0x2b, - - // the following encoded data is valid CBOR + // flag: root + map metadata + 0x89, - // elements (array of 3 elements) + // slab extra data + // CBOR encoded array of 3 elements 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // level: 1 - 0x01, + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 0 + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 4 + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 8 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 10 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // hkey: 12 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, - // hkey: 14 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, - // hkey: 16 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 18 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, - - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - // element: [uint64(8), uint64(16)] - 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, - // element: [uint64(10), uint64(20)] - 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, - // element: [uint64(12), uint64(24)] - 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, - // element: [uint64(14), uint64(28)] - 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, - // element: [uint64(16), uint64(32)] - 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, - // element: [uint64(18), uint64(36)] - 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + 0x00, 0xda, }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, - // external collision group - id3: { - // version - 0x10, - // flag: any size + collision group - 0x2b, + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, // the following encoded data is valid CBOR // elements (array of 3 elements) 0x83, - // level: 1 - 0x01, + // level: 0 + 0x00, - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // hkey: 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // hkey: 3 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - // hkey: 9 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - // hkey: 11 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, - // hkey: 13 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, - // hkey: 15 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - // hkey: 17 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - // hkey: 19 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, - // elements (array of 10 elements) + // elements (array of 4 elements) // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - // element: [uint64(9), uint64(18)] - 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, - // element: [uint64(11), uint64(22))] - 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, - // element: [uint64(13), uint64(26)] - 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, - // element: [uint64(15), uint64(30)] - 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, - // element: [uint64(17), uint64(34)] - 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, - // element: [uint64(19), uint64(38)] - 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, - }, - } + 0x99, 0x00, 0x04, - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) - require.Equal(t, expected[id2], stored[id2]) - require.Equal(t, expected[id3], stored[id3]) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, - // Test new map from storage2 + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("pointer", func(t *testing.T) { + t.Run("root metadata slab, inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + childMapTypeInfo3 := testTypeInfo{45} + childMapTypeInfo4 := testTypeInfo{46} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - k := Uint64Value(0) - v := Uint64Value(0) + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + var ti TypeInfo + switch i % 4 { + case 0: + ti = childMapTypeInfo1 + case 1: + ti = childMapTypeInfo2 + case 2: + ti = childMapTypeInfo3 + case 3: + ti = childMapTypeInfo4 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 1 level", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 2 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 2 levels", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i % 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [0, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [1, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [2, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [3, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("external collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 20 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 2), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x14, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + 0x99, 0x00, 0x02, + + // external collision group corresponding to hkey 0 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 1 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // external collision group + id2: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // hkey: 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + // hkey: 14 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + // hkey: 16 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // element: [uint64(8), uint64(16)] + 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, + // element: [uint64(10), uint64(20)] + 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, + // element: [uint64(12), uint64(24)] + 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, + // element: [uint64(14), uint64(28)] + 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, + // element: [uint64(16), uint64(32)] + 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, + // element: [uint64(18), uint64(36)] + 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + }, + + // external collision group + id3: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // hkey: 9 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // hkey: 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + // hkey: 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + // hkey: 15 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, + // hkey: 17 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + // hkey: 19 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element: [uint64(9), uint64(18)] + 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, + // element: [uint64(11), uint64(22))] + 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, + // element: [uint64(13), uint64(26)] + 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, + // element: [uint64(15), uint64(30)] + 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, + // element: [uint64(17), uint64(34)] + 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, + // element: [uint64(19), uint64(38)] + 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child map + typeInfo2 := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo2) + require.NoError(t, err) + + expectedChildMapValues := mapValue{} + for i := 0; i < 2; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat("b", 22)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[k] = v + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childMap + keyValues[k] = expectedChildMapValues + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2b, + // count + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x4f, 0x6a, 0x3e, 0x93, 0xdd, 0xb1, 0xbe, 0x5, + // hkey + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [1:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x1, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [0:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x0, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize-1; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Create child map + childTypeInfo := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childTypeInfo) + require.NoError(t, err) + + // Create grand child map + gchildTypeInfo := testTypeInfo{44} + + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gchildTypeInfo) + require.NoError(t, err) + + expectedGChildMapValues := mapValue{} + r := 'a' + for i := 0; i < 2; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedGChildMapValues[k] = v + + r++ + } + + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, Uint64Value(0), gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(mapSize - 1) + v := childMap + keyValues[k] = mapValue{Uint64Value(0): expectedGChildMapValues} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [0:0] + 0x82, + 0xd8, 0xa4, 0x0, + 0xd8, 0xa4, 0x0, + // element: [1:inlined map] + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2c, + // count + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x30, 0x43, 0xc5, 0x14, 0x8f, 0x52, 0x18, 0x43, + // hkey + 0x98, 0x0f, 0x5c, 0xdb, 0x37, 0x71, 0x6c, 0x13, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + const childArraySize = 5 + + typeInfo2 := testTypeInfo{43} + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + expectedChildValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues[i] = v + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + + keyValues[k] = arrayValue(expectedChildValues) + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert nested array + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // metadata slab + id1: { + // version + 0x10, + // flag: root + map meta + 0x89, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf6, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xf2, + }, + + // data slab + id2: { + // version + 0x12, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x10, + // flag: has pointer + map data + 0x48, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + id4: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + require.Equal(t, expected[id4], stored[id4]) + + // Verify slab size in header is correct. + meta, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + require.Equal(t, 2, len(meta.childrenHeaders)) + require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) + require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + childTypeInfo := testTypeInfo{43} + + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + // Create grand child array + const gchildArraySize = 5 + + gchildTypeInfo := testTypeInfo{44} + + gchildArray, err := NewArray(storage, address, gchildTypeInfo) + require.NoError(t, err) + + expectedGChildValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedGChildValues[i] = v + } + + // Insert grand child array to child array + err = childArray.Append(gchildArray) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + + keyValues[k] = arrayValue{arrayValue(expectedGChildValues)} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child array to parent map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // parent map root slab ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child array root slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined array extra data + 0xd8, 0xf7, + 0x81, + // type info + 0x18, 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:inlined array] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + + // value: inlined array (tag: CBORTagInlinedArray) + 0xd8, 0xfa, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined array elements (1 element) + 0x99, 0x00, 0x01, + // SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // grand array data slab + id2: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to storable slab", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + v := Uint64Value(0) + + digests := []Digest{Digest(0), Digest(1)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedNoPointer := []byte{ + + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(stored)) + require.Equal(t, expectedNoPointer, stored[id1]) + + // Overwrite existing value with long string + vs := NewStringValue(strings.Repeat("a", 128)) + existingStorable, err = m.Set(compare, hashInputProvider, k, vs) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, v, existingValue) + + expectedHasPointer := []byte{ + + // version + 0x10, + // flag: root + pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), slab id] + 0x82, 0xd8, 0xa4, 0x00, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + } + + expectedStorableSlab := []byte{ + // version + 0x10, + // flag: storable + no size limit + 0x3f, + // "aaaa..." + 0x78, 0x80, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + } + + stored, err = storage.Encode() + require.NoError(t, err) + require.Equal(t, 2, len(stored)) + require.Equal(t, expectedHasPointer, stored[id1]) + require.Equal(t, expectedStorableSlab, stored[id2]) + }) + + t.Run("same composite with one field", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x48, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (same order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + expectedChildMapVaues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapVaues[ck] = cv + + ck = NewStringValue("amount") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapVaues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapVaues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, 0x3b, 0xef, 0x5b, 0xe2, 0x9b, 0x8d, 0xf9, 0x65, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["amount", "uuid"] + 0x82, 0x66, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (different order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 3 + keyValues := make(map[Value]Value, mapSize) + + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert first element "uuid" to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + // Insert second element to child map (second element is different) + switch i % 3 { + case 0: + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + + case 1: + ck = NewStringValue("b") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + + case 2: + ck = NewStringValue("c") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + } + + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 3 + 0x03, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 3 inlined slab extra data + 0x83, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0x82, 0x41, 0xee, 0xef, 0xc7, 0xb3, 0x2f, 0x28, + // composite keys ["uuid", "b"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x62, + + // element 2 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // composite digests + 0x50, + 0x5a, 0x98, 0x80, 0xf4, 0xa6, 0x52, 0x9e, 0x2d, + 0x6d, 0x8a, 0x0a, 0xe7, 0x19, 0xf1, 0xbb, 0x8b, + // composite keys ["uuid", "c"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x63, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 3) + 0x59, 0x00, 0x18, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // elements (array of 3 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x03, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different number of fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + if i == 0 { + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + } + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x48, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("different composite", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testCompositeTypeInfo{43} + childMapTypeInfo2 := testCompositeTypeInfo{44} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 4 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo1 + } else { + ti = childMapTypeInfo2 + } + + // Create child map, composite with two field "uuid" and "a" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 4 + 0x04, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2c, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0xea, 0x8e, 0x6f, 0x69, 0x81, 0x19, 0x68, 0x81, + // composite keys ["uuid", "a"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x61, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 4 + 0xd8, 0xa4, 0x04, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) +} + +func TestMapEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Create a new storage with encoded data from base storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Create new map from new storage + m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) +} + +func TestMapStoredValue(t *testing.T) { + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + keyValues := make(map[Value]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + keyValues[k] = Uint64Value(i) + i++ + } + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + rootID := m.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + m2, ok := value.(*OrderedMap) + require.True(t, ok) + + testMap(t, storage, typeInfo, address, m2, keyValues, nil, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestMapPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + i := uint64(0) + err = m.PopIterate(func(k Storable, v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-dataslab", func(t *testing.T) { + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + key, value := Uint64Value(i), Uint64Value(i*10) + sortedKeys[i] = key + keyValues[key] = value + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := mapSize + err = m.PopIterate(func(k, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-metaslab", func(t *testing.T) { + const mapSize = 4096 + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + if _, found := keyValues[k]; !found { + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + i++ + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + err = storage.Commit() + require.NoError(t, err) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i = len(keyValues) + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("collision", func(t *testing.T) { + //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := &mockDigesterBuilder{} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + + if _, found := keyValues[k]; !found { + + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + + digests := []Digest{ + Digest(i % 100), + Digest(i % 5), + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) + require.NoError(t, err) + require.Nil(t, existingStorable) + + i++ + } + } + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + err = storage.Commit() + require.NoError(t, err) + + // Iterate key value pairs + i = mapSize + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) +} + +func TestEmptyMap(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, s) + }) + + t.Run("remove", func(t *testing.T) { + existingMapKeyStorable, existingMapValueStorable, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, existingMapKeyStorable) + require.Nil(t, existingMapValueStorable) + }) + + t.Run("iterate", func(t *testing.T) { + i := 0 + err := m.IterateReadOnly(func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("count", func(t *testing.T) { + count := m.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, m.Type())) + }) + + t.Run("address", func(t *testing.T) { + require.Equal(t, address, m.Address()) + }) + + // TestMapEncodeDecode/empty tests empty map encoding and decoding +} + +func TestMapFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + NewDefaultDigesterBuilder(), + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + testEmptyMap(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + + k, v, err := iter.Next() + + // Save key value pair + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 8 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + storable, err := m.Set( + compare, + hashInputProvider, + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + ) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + require.Equal(t, typeInfo, m.Type()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for m.Count() < mapSize { + k := randomValue(r, int(maxInlineMapElementSize)) + v := randomValue(r, int(maxInlineMapElementSize)) + + _, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + var sortedKeys []Value + keyValues := make(map[Value]Value, mapSize) + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("collision", func(t *testing.T) { + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + MaxCollisionLimitPerDigest = mapSize / 2 + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + + k, v := Uint64Value(i), Uint64Value(i*10) + + digests := make([]Digest, 2) + if i%2 == 0 { + digests[0] = 0 + } else { + digests[0] = Digest(i % (mapSize / 2)) + } + digests[1] = Digest(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + i := 0 + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + i++ + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + maxStringSize := int(maxInlineMapKeySize - 2) + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue(randStr(r, maxStringSize)) + v = NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") + v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) +} + +func TestMapNestedStorables(t *testing.T) { + + t.Run("SomeValue", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + keyValues[k] = v + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) + + t.Run("Array", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + // Create a child array with one element + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + err = childArray.Append(v) + require.NoError(t, err) + + // Insert nested array into map + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + keyValues[k] = arrayValue{v} + + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) +} + +func TestMapMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + maxStringSize := int(maxInlineMapKeySize - 2) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for len(keyValues) < 2 { + // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + keyValues[k] = v + + _, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.True(t, m.root.IsData()) + + // Size of root data slab with two elements (key+value pairs) of + // max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab) + require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) +} + +func TestMapString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2]` + require.Equal(t, want, m.String()) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` + require.Equal(t, want, m.String()) + }) +} + +func TestMapSlabDump(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", + "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("inline collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", + "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("external collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", + "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", + "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("key overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("value overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func TestMaxCollisionLimitPerDigest(t *testing.T) { + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + + t.Run("collision limit 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 0, + // meaning no collision is allowed at first level. + MaxCollisionLimitPerDigest = uint32(0) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("collision limit > 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 7, + // meaning at most 8 elements in collision group per digest at first level. + MaxCollisionLimitPerDigest = uint32(7) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + +func TestMapLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, nil) + }) + + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in external collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision group, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload external key from front to back. + for i := 0; i < len(values); i++ { + k := values[i][0] + + s, ok := k.(StringValue) + require.True(t, ok) + + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } + + require.NoError(t, keyID.Valid()) + + err := storage.Remove(keyID) + require.NoError(t, err) + + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload external collision group slab from front to back + + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) + + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + + for i, id := range externalCollisionSlabIDs { + err := storage.Remove(id) + require.NoError(t, err) + + expectedValues := values[i*4+4:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from back to front. + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := len(values) - 1; i >= 0; i-- { + k := values[i][0] + + s, ok := k.(StringValue) + require.True(t, ok) + + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } + + require.NoError(t, keyID.Valid()) + + err := storage.Remove(keyID) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload external slabs from back to front + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) + + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + + for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { + err := storage.Remove(externalCollisionSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i*4] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload value in the middle + unloadValueIndex := 1 + + err := storage.Remove(childSlabIDs[unloadValueIndex]) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload key in the middle. + unloadValueIndex := 1 + + k := values[unloadValueIndex][0] + + s, ok := k.(StringValue) + require.True(t, ok) + + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } + + require.NoError(t, keyID.Valid()) + + err := storage.Remove(keyID) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element in the middle + for _, unloadValueIndex := range []int{1, 3, 5} { + err := storage.Remove(childSlabIDs[unloadValueIndex]) + require.NoError(t, err) + } + + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + } + testMapLoadedElements(t, m, expectedValues) + }) + + t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite value in the middle. + for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { + err := storage.Remove(childSlabIDs[unloadValueIndex]) + require.NoError(t, err) + } + + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + values[6], + values[8], + values[10], + } + testMapLoadedElements(t, m, expectedValues) + }) + + t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload external slabs in the middle. + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) + + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + + id := externalCollisionSlabIDs[1] + err := storage.Remove(id) + require.NoError(t, err) + + copy(values[4:], values[8:]) + values = values[:8] + + testMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + i := 0 + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { + // At this point, iterator returned first element (v). + + // Remove all other nested composite elements (except first element) from storage. + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) + require.NoError(t, err) + } + + require.Equal(t, 0, i) + valueEqual(t, values[0][0], k) + valueEqual(t, values[0][1], v) + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) + + t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { + const mapSize = 3 + + // Create a map with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { + storage := newTestPersistentStorage(t) + + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + childArrayIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite value + err := storage.Remove(childSlabID) + require.NoError(t, err) + + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testMapLoadedElements(t, m, values) + } + }) + + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + }) + + t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values : 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - digests := []Digest{Digest(0), Digest(1)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + testMapLoadedElements(t, m, values) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + // Unload composite element in the middle + for _, index := range []int{4, 14} { + err := storage.Remove(childSlabIDs[index]) + require.NoError(t, err) - require.Equal(t, uint64(1), m.Count()) + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + copy(childSlabIDs[index:], childSlabIDs[index+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - expectedNoPointer := []byte{ + testMapLoadedElements(t, m, values) + } + }) - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { + const mapSize = 20 - // the following encoded data is valid CBOR + // Create a map with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { + storage := newTestPersistentStorage(t) - // elements (array of 3 elements) - 0x83, + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + childArrayIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // level: 0 - 0x00, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 5, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + testMapLoadedElements(t, m, values) - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - } + err := storage.Remove(childSlabID) + require.NoError(t, err) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(stored)) - require.Equal(t, expectedNoPointer, stored[id1]) + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] - // Overwrite existing value with long string - vs := NewStringValue(strings.Repeat("a", 512)) - existingStorable, err = m.Set(compare, hashInputProvider, k, vs) - require.NoError(t, err) + testMapLoadedElements(t, m, values) + } + }) - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - expectedHasPointer := []byte{ + const mapSize = 20 - // version - 0x10, - // flag: root + pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // the following encoded data is valid CBOR + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // elements (array of 3 elements) - 0x83, + testMapLoadedElements(t, m, values) - // level: 0 - 0x00, + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Unload data slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), slab id] - 0x82, 0xd8, 0xa4, 0x00, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - } + childHeader := rootMetaDataSlab.childrenHeaders[i] - stored, err = storage.Encode() - require.NoError(t, err) - require.Equal(t, 2, len(stored)) - require.Equal(t, expectedHasPointer, stored[id1]) - }) -} + // Get data slab element count before unload it from storage. + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) -func TestMapEncodeDecodeRandomValues(t *testing.T) { + count := mapDataSlab.elements.Count() - SetThreshold(256) - defer SetThreshold(1024) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - r := newRand(t) + values = values[count:] - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testMapLoadedElements(t, m, values) + } + }) - m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + const mapSize = 20 - // Create a new storage with encoded data from base storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // Create new map from new storage - m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) - require.NoError(t, err) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) -} + testMapLoadedElements(t, m, values) -func TestMapStoredValue(t *testing.T) { + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - const mapSize = 4096 + // Unload data slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - r := newRand(t) + childHeader := rootMetaDataSlab.childrenHeaders[i] - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + // Get data slab element count before unload it from storage + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - keyValues := make(map[Value]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - keyValues[k] = Uint64Value(i) - i++ - } + count := mapDataSlab.elements.Count() - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + values = values[:len(values)-int(count)] - rootID := m.SlabID() + testMapLoadedElements(t, m, values) + } + }) - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - for { - id, slab := slabIterator() + const mapSize = 20 - if id == SlabIDUndefined { - break - } + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - value, err := slab.StoredValue(storage) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - if id == rootID { - require.NoError(t, err) + testMapLoadedElements(t, m, values) - m2, ok := value.(*OrderedMap) - require.True(t, ok) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) - } - } -} + require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) -func TestMapPopIterate(t *testing.T) { + index := 1 + childHeader := rootMetaDataSlab.childrenHeaders[index] - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() + // Get element count from previous data slab + mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) + require.True(t, ok) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + countAtIndex0 := mapDataSlab.elements.Count() - err = storage.Commit() - require.NoError(t, err) + // Get element count from slab to be unloaded + mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) + require.True(t, ok) - require.Equal(t, 1, storage.Count()) + countAtIndex1 := mapDataSlab.elements.Count() - i := uint64(0) - err = m.PopIterate(func(k Storable, v Storable) { - i++ - }) + err := storage.Remove(childHeader.slabID) require.NoError(t, err) - require.Equal(t, uint64(0), i) - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) + values = values[:m.Count()-uint64(countAtIndex1)] - t.Run("root-dataslab", func(t *testing.T) { - const mapSize = 10 + testMapLoadedElements(t, m, values) + }) - typeInfo := testTypeInfo{42} + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 200 - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - key, value := Uint64Value(i), Uint64Value(i*10) - sortedKeys[i] = key - keyValues[key] = value + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - existingStorable, err := m.Set(compare, hashInputProvider, key, value) + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from front to back. + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) require.NoError(t, err) - require.Nil(t, existingStorable) - } - require.Equal(t, uint64(mapSize), m.Count()) + // Use firstKey to deduce number of elements in slab. + var expectedValues [][2]Value + if i < len(rootMetaDataSlab.childrenHeaders)-1 { + nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] + expectedValues = values[int(nextChildHeader.firstKey):] + } - err = storage.Commit() - require.NoError(t, err) + testMapLoadedElements(t, m, expectedValues) + } + }) - require.Equal(t, 1, storage.Count()) + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + const mapSize = 200 - i := mapSize - err = m.PopIterate(func(k, v Storable) { - i-- + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - require.NoError(t, err) - require.Equal(t, 0, i) + // Unload non-root metadata slabs from back to front. + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + childHeader := rootMetaDataSlab.childrenHeaders[i] - t.Run("root-metaslab", func(t *testing.T) { - const mapSize = 4096 + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - r := newRand(t) + // Use firstKey to deduce number of elements in slabs. + values = values[:childHeader.firstKey] - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - if _, found := keyValues[k]; !found { - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) - i++ - } + testMapLoadedElements(t, m, values) } + }) + + t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - err = storage.Commit() - require.NoError(t, err) + testMapLoadedElements(t, m, values) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + r := newRand(t) - // Iterate key value pairs - i = len(keyValues) - err = m.PopIterate(func(k Storable, v Storable) { - i-- + // Unload composite element in random position + for len(values) > 0 { - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + i := r.Intn(len(values)) - vv, err := v.StoredValue(storage) + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) - require.NoError(t, err) - require.Equal(t, 0, i) + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - t.Run("collision", func(t *testing.T) { - //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + testMapLoadedElements(t, m, values) + } + }) - const mapSize = 1024 + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { - SetThreshold(512) - defer SetThreshold(1024) + storage := newTestPersistentStorage(t) - r := newRand(t) + const mapSize = 500 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} - storage := newTestPersistentStorage(t) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + testMapLoadedElements(t, m, values) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - if _, found := keyValues[k]; !found { + type slabInfo struct { + id SlabID + startIndex int + count int + } - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { - digests := []Digest{ - Digest(i % 100), - Digest(i % 5), - } + nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { + h := nonRootMetaDataSlab.childrenHeaders[i] - existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) - require.NoError(t, err) - require.Nil(t, existingStorable) + if len(dataSlabInfos) > 0 { + // Update previous slabInfo.count + dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex + } - i++ + dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) } } - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + r := newRand(t) - err = storage.Commit() - require.NoError(t, err) + for len(dataSlabInfos) > 0 { + index := r.Intn(len(dataSlabInfos)) - // Iterate key value pairs - i = mapSize - err = m.PopIterate(func(k Storable, v Storable) { - i-- + slabToBeRemoved := dataSlabInfos[index] - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + // Update startIndex for all subsequence data slabs + for i := index + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabToBeRemoved.count + } - vv, err := v.StoredValue(storage) + err := storage.Remove(slabToBeRemoved.id) require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) - - require.NoError(t, err) - require.Equal(t, 0, i) - verifyEmptyMap(t, storage, typeInfo, address, m) - }) -} + if index == len(dataSlabInfos)-1 { + values = values[:slabToBeRemoved.startIndex] + } else { + copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) + values = values[:len(values)-slabToBeRemoved.count] + } -func TestEmptyMap(t *testing.T) { + copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] - t.Parallel() + testMapLoadedElements(t, m, values) + } - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, 0, len(values)) + }) - m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) - require.NoError(t, err) + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { - t.Run("get", func(t *testing.T) { - s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, s) - }) + storage := newTestPersistentStorage(t) - t.Run("remove", func(t *testing.T) { - existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, existingKey) - require.Nil(t, existingValue) - }) + const mapSize = 500 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - t.Run("iterate", func(t *testing.T) { - i := 0 - err := m.Iterate(func(k Value, v Value) (bool, error) { - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, 0, i) - }) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - t.Run("count", func(t *testing.T) { - count := m.Count() - require.Equal(t, uint64(0), count) - }) + testMapLoadedElements(t, m, values) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, m.Type())) - }) + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo + } - t.Run("address", func(t *testing.T) { - require.Equal(t, address, m.Address()) - }) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // TestMapEncodeDecode/empty tests empty map encoding and decoding -} + metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { -func TestMapFromBatchData(t *testing.T) { + if i > 0 { + prevMetaDataSlabInfo := metadataSlabInfos[i-1] + prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + // Update previous metadata slab count + prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) - require.Equal(t, uint64(0), m.Count()) + // Update previous data slab count + prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex + } - iter, err := m.Iterator() - require.NoError(t, err) + metadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: int(mheader.firstKey), + } - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) + for i, h := range nonRootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: int(h.firstKey), + } + if i > 0 { + children[i-1].count = int(h.firstKey) - children[i-1].startIndex + } + } - verifyEmptyMap(t, storage, typeInfo, address, copied) - }) + metadataSlabInfo.children = children + metadataSlabInfos[i] = metadataSlabInfo + } - t.Run("root-dataslab", func(t *testing.T) { - SetThreshold(1024) + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) - const mapSize = 10 + r := newRand(t) - typeInfo := testTypeInfo{42} + for len(metadataSlabInfos) > 0 { - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + switch r.Intn(maxSlabType) { - require.Equal(t, uint64(mapSize), m.Count()) + case metadataSlabType: - iter, err := m.Iterator() - require.NoError(t, err) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - var sortedKeys []Value - keyValues := make(map[Value]Value) + isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 - storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { + count := slabInfoToBeRemoved.count - k, v, err := iter.Next() + // Update startIndex for subsequence metadata slabs + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - // Save key value pair - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } } - return k, v, err - }) + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + case dataSlabType: - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] - const mapSize = 4096 + dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) - typeInfo := testTypeInfo{42} + isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && + (dataSlabIndex == len(metadataSlabInfo.children)-1) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + count := slabInfoToBeRemoved.count - require.Equal(t, uint64(mapSize), m.Count()) + // Update startIndex for all subsequence data slabs in this metadata slab info + for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { + metadataSlabInfo.children[i].startIndex -= count + } - iter, err := m.Iterator() - require.NoError(t, err) + copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) + metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] - var sortedKeys []Value - keyValues := make(map[Value]Value) + metadataSlabInfo.count -= count - storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v + if len(metadataSlabInfo.children) == 0 { + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] } + } - return k, v, err - }) + err := storage.Remove(slabInfoToBeRemoved.id) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + testMapLoadedElements(t, m, values) + } - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + require.Equal(t, 0, len(values)) }) +} - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) +func createMapWithLongStringKey( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, +) (*OrderedMap, [][2]Value) { - const mapSize = 10 + digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} + // Create parent map. + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + expectedValues := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { + s := strings.Repeat(string(r), int(maxInlineMapElementSize)) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + k := NewStringValue(s) + v := Uint64Value(i) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + expectedValues[i] = [2]Value{k, v} - require.Equal(t, uint64(mapSize+1), m.Count()) + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - iter, err := m.Iterator() + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - var sortedKeys []Value - keyValues := make(map[Value]Value) - - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() - - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + r++ + } - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + return m, expectedValues +} - return k, v, err - }) +func createMapWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + digesterBuilder := &mockDigesterBuilder{} - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedValues := make([][2]Value, size) + r := rune('a') + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 20)) - const mapSize = 8 + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - typeInfo := testTypeInfo{42} + expectedValues[i] = [2]Value{k, v} - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) + existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) require.NoError(t, err) + require.Nil(t, existingStorable) + } - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + return m, expectedValues +} - storable, err := m.Set( - compare, - hashInputProvider, - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - ) - require.NoError(t, err) - require.Nil(t, storable) +func createMapWithChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value, []SlabID) { + const childArraySize = 50 - require.Equal(t, uint64(mapSize+1), m.Count()) - require.Equal(t, typeInfo, m.Type()) + // Use mockDigesterBuilder to guarantee element order. + digesterBuilder := &mockDigesterBuilder{} - iter, err := m.Iterator() + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + slabIDs := make([]SlabID, size) + expectedValues := make([][2]Value, size) + for i := 0; i < size; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - var sortedKeys []Value - keyValues := make(map[Value]Value) + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + err = childArray.Append(v) + require.NoError(t, err) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + expectedChildValues[j] = v + } - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + k := Uint64Value(i) + v := childArray - return k, v, err - }) + expectedValues[i] = [2]Value{k, arrayValue(expectedChildValues)} + slabIDs[i] = childArray.SlabID() + + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Set child array to parent + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + require.Nil(t, existingStorable) + } - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + return m, expectedValues, slabIDs +} - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) +func createMapWithSimpleAndChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + compositeValueIndex int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value, SlabID) { + const childArraySize = 50 - const mapSize = 4096 + digesterBuilder := &mockDigesterBuilder{} - r := newRand(t) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + var slabID SlabID + values := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + k := Uint64Value(i) - for m.Count() < mapSize { - k := randomValue(r, int(maxInlineMapElementSize)) - v := randomValue(r, int(maxInlineMapElementSize)) + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - _, err = m.Set(compare, hashInputProvider, k, v) + if compositeValueIndex == i { + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - require.Equal(t, uint64(mapSize), m.Count()) + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - iter, err := m.Iterator() - require.NoError(t, err) + expectedChildValues[j] = v + } - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + values[i] = [2]Value{k, arrayValue(expectedChildValues)} - var sortedKeys []Value - keyValues := make(map[Value]Value, mapSize) + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + slabID = childArray.SlabID() - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + } else { + v := NewStringValue(strings.Repeat(string(r), 18)) + values[i] = [2]Value{k, v} - return k, v, err - }) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + } - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + return m, values, slabID +} - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) +func testMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { + i := 0 + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, expectedValues[i][0], k) + valueEqual(t, expectedValues[i][1], v) + i++ + return true, nil }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} - t.Run("collision", func(t *testing.T) { +func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*MapMetaDataSlab); ok { + counter++ + } + } + return counter +} - const mapSize = 1024 +func TestMaxInlineMapValueSize(t *testing.T) { - SetThreshold(512) + t.Run("small key", func(t *testing.T) { + // Value has larger max inline size when key is less than max map key size. + + SetThreshold(256) defer SetThreshold(1024) - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() - MaxCollisionLimitPerDigest = mapSize / 2 + mapSize := 2 + keyStringSize := 16 // Key size is less than max map key size. + valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. - typeInfo := testTypeInfo{42} + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - k, v := Uint64Value(i), Uint64Value(i*10) + // Both key and value are stored in map slab. + require.Equal(t, 1, len(storage.deltas)) - digests := make([]Digest, 2) - if i%2 == 0 { - digests[0] = 0 - } else { - digests[0] = Digest(i % (mapSize / 2)) - } - digests[1] = Digest(i) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + t.Run("max size key", func(t *testing.T) { + // Value max size is about half of max map element size when key is exactly max map key size. - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) - } + SetThreshold(256) + defer SetThreshold(1024) - require.Equal(t, uint64(mapSize), m.Count()) + mapSize := 1 + keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). + valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). - iter, err := m.Iterator() - require.NoError(t, err) + r := newRand(t) - var sortedKeys []Value - keyValues := make(map[Value]Value) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - - i := 0 - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - i++ - return k, v, err - }) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // Key is stored in map slab, while value is stored separately in storable slab. + require.Equal(t, 2, len(storage.deltas)) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + t.Run("large key", func(t *testing.T) { + // Value has larger max inline size when key is more than max map key size because + // when key size exceeds max map key size, it is stored in a separate storable slab, + // and SlabIDStorable is stored as key in the map, which is 19 bytes. SetThreshold(256) defer SetThreshold(1024) + mapSize := 1 + keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size + valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + r := newRand(t) - maxStringSize := int(maxInlineMapKeySize - 2) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - digesterBuilder := &mockDigesterBuilder{} - - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + // Key is stored in separate storable slabs, while value is stored in map slab. + require.Equal(t, 2, len(storage.deltas)) - k = NewStringValue(randStr(r, maxStringSize)) - v = NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) +func TestMapID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") - v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + sid := m.SlabID() + id := m.ValueID() + + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} + +func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { + const ( + mapSize = 3 + keyStringSize = 16 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) + + keyValues := make(map[Value]*testMutableValue, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := newTestMutableValue(initialStorableSize) + keyValues[k] = v + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - iter, err := m.Iterator() + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) + } - var sortedKeys []Value - keyValues := make(map[Value]Value) + require.True(t, m.root.IsData()) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize + expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // Reset mutable values after changing its storable size + for k, v := range keyValues { + v.updateStorableSize(mutatedStorableSize) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } - return k, v, err - }) + require.True(t, m.root.IsData()) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize + expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) } -func TestMapNestedStorables(t *testing.T) { +func TestChildMapInlinabilityInParentMap(t *testing.T) { - t.Run("SomeValue", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - const mapSize = 4096 + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + + t.Run("parent is root data slab, with one child map", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + + r := newRand(t) typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) - - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - keyValues[k] = v + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - t.Run("Array", func(t *testing.T) { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - const mapSize = 4096 + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedChildMapValues[k] = v - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - // Create a nested array with one element - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - err = array.Append(v) - require.NoError(t, err) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + i := 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - // Insert nested array into map - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - keyValues[k] = array + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - existingStorable, err := m.Set(compare, hashInputProvider, k, array) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) -} + expectedChildMapValues[k] = v -func TestMapMaxInlineElement(t *testing.T) { - t.Parallel() + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - r := newRand(t) - maxStringSize := int(maxInlineMapKeySize - 2) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + i++ - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - keyValues := make(map[Value]Value) - for len(keyValues) < 2 { - // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - keyValues[k] = v + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - _, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - } + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - require.True(t, m.root.IsData()) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Size of root data slab with two elements (key+value pairs) of - // max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab) - require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + // Remove elements from child map which triggers standalone map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) -} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) -func TestMapString(t *testing.T) { + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - SetThreshold(256) - defer SetThreshold(1024) + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + delete(expectedChildMapValues, k) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - want := `[0:0 1:1 2:2]` - require.Equal(t, want, m.String()) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("parent is root data slab, with two child maps", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 9 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` - require.Equal(t, want, m.String()) - }) -} + children := getInlinedChildMapsFromParentMap(t, address, parentMap) -func TestMapSlabDump(t *testing.T) { + expectedParentSize := parentMap.root.ByteSize() - SetThreshold(256) - defer SetThreshold(1024) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - t.Run("small", func(t *testing.T) { - const mapSize = 3 + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + expectedChildMapValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + // Test parent slab size + expectedParentSize += expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + i := 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", - "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + expectedChildMapValues[k] = v - t.Run("inline collision", func(t *testing.T) { - const mapSize = 30 + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + i++ - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Subtract inlined child map size from expected parent size + expectedParentSize -= uint32(inlinedMapDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()-1) + // Add slab id storable size to expected parent size + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", - "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("external collision", func(t *testing.T) { - const mapSize = 30 + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + i = 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", - "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", - "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - - t.Run("key overflow", func(t *testing.T) { - - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + delete(expectedChildMapValues, aKey) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.Equal(t, 1+mapSize-1-i, getStoredDeltas(storage)) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + i++ - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - t.Run("value overflow", func(t *testing.T) { + // Subtract slab id storable size from expected parent size + expectedParentSize -= SlabIDStorable(SlabID{}).ByteSize() + // Add expected inlined child map to expected parent size + expectedParentSize += expectedInlinedMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Remove remaining elements from each inlined child map. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) -func TestMaxCollisionLimitPerDigest(t *testing.T) { - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() + delete(expectedChildMapValues, k) - t.Run("collision limit 0", func(t *testing.T) { - const mapSize = 1024 + require.Equal(t, 1, getStoredDeltas(storage)) - SetThreshold(256) - defer SetThreshold(1024) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - // Set noncryptographic hash collision limit as 0, - // meaning no collision is allowed at first level. - MaxCollisionLimitPerDigest = uint32(0) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + expectedParentSize -= expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) + + t.Run("parent is root metadata slab, with four child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 9 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + + r := newRand(t) + typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + expectedChildMapValues[k] = v - t.Run("collision limit > 0", func(t *testing.T) { - const mapSize = 1024 + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - SetThreshold(256) - defer SetThreshold(1024) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - // Set noncryptographic hash collision limit as 7, - // meaning at most 8 elements in collision group per digest at first level. - MaxCollisionLimitPerDigest = uint32(7) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentMap.root.IsData()) - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + expectedChildMapValues[k] = v - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + require.False(t, childMap.Inlined()) - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Parent map has one root data slab. + // Each child maps has one root data slab. + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + require.True(t, parentMap.root.IsData()) - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) -} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) -func TestMapLoadedValueIterator(t *testing.T) { + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - SetThreshold(256) - defer SetThreshold(1024) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) + require.NoError(t, err) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + delete(expectedChildMapValues, aKey) - t.Run("empty", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - digesterBuilder := &mockDigesterBuilder{} + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Parent map has one metadata slab + 2 data slabs. + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child map is inlined again. + require.False(t, parentMap.root.IsData()) - verifyMapLoadedElements(t, m, nil) - }) + // Remove remaining elements from each inlined child map. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - const mapSize = 3 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - verifyMapLoadedElements(t, m, values) - }) + delete(expectedChildMapValues, k) - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + for _, child := range children { + require.Equal(t, uint64(0), child.m.Count()) + } + + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(mapSize) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) }) +} - t.Run("root data slab with composite values in collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 ) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyMapLoadedElements(t, m, values) - }) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - t.Run("root data slab with composite values in external collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // Create parent map with 3 external collision group, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - verifyMapLoadedElements(t, m, values) - }) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedParentSize := parentMap.root.ByteSize() - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + expectedGChildMapValues[k] = v + + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - // Unload external key from front to back. - for i := 0; i < len(values); i++ { - k := values[i][0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - s, ok := k.(StringValue) + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - require.NoError(t, keyID.Valid()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(keyID) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) - - t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedGChildMapValues[k] = v - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(SlabID{}).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + gchildKeys := make([]Value, 0, len(expectedGChildMapValues)) + for k := range expectedGChildMapValues { + gchildKeys = append(gchildKeys, k) + } + + for _, k := range gchildKeys { + existingMapKey, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKey) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers grand child array slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + largeValueStringSize = 40 ) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + encodedLargeValueSize := NewStringValue(strings.Repeat("a", largeValueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload external collision group slab from front to back + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - for i, id := range externalCollisionSlabIDs { - err := storage.Remove(id) - require.NoError(t, err) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - expectedValues := values[i*4+4:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedParentSize := parentMap.root.ByteSize() - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - // Unload composite element from back to front. - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - nestedArray, ok := v.(*Array) - require.True(t, ok) + gchildMap := gchild.m + gValueID := gchild.valueID - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedGChildMapValues[k] = v - verifyMapLoadedElements(t, m, values) + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // Unload composite element from front to back. - for i := len(values) - 1; i >= 0; i-- { - k := values[i][0] + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - s, ok := k.(StringValue) - require.True(t, ok) + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - require.NoError(t, keyID.Valid()) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - err := storage.Remove(keyID) - require.NoError(t, err) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + gchildLargeElementKeys := make(map[Value]Value) // key: child map key, value: gchild map key + // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - nestedArray, ok := v.(*Array) + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, largeValueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + expectedGChildMapValues[k] = v - t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + gchildLargeElementKeys[childKey] = k - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + // Grand child map is NOT inlined + require.False(t, gchildMap.Inlined()) + require.Equal(t, valueIDToSlabID(gValueID), gchildMap.SlabID()) // Slab ID is valid for not inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + // Test standalone grand child slab size + expectedGrandChildElement1Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildElement2Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedLargeValueSize + expectedGrandChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElement1Size + expectedGrandChildElement2Size + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + expectedChildElementSize + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - // Unload external slabs from back to front - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + // Get all grand child map keys with large element key first + keys := make([]Value, 0, len(expectedGChildMapValues)) + keys = append(keys, gchildLargeElementKeys[childKey]) + for k := range expectedGChildMapValues { + if k != gchildLargeElementKeys[childKey] { + keys = append(keys, k) } } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + // Remove all elements (large element first) to trigger grand child map being inlined again. + for _, k := range keys { - for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { - err := storage.Remove(externalCollisionSlabIDs[i]) - require.NoError(t, err) + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - expectedValues := values[:i*4] - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } + + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { + t.Run("parent is root data slab, two child map, one grand child map each, changes to child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Unload value in the middle - unloadValueIndex := 1 + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - v := values[unloadValueIndex][1] + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedParentSize := parentMap.root.ByteSize() - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Insert 1 elements to grand child map (both child map and grand child map are still inlined). + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - verifyMapLoadedElements(t, m, values) - }) + gchildMap := gchild.m + gValueID := gchild.valueID - t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - verifyMapLoadedElements(t, m, values) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Unload key in the middle. - unloadValueIndex := 1 + expectedGChildMapValues[k] = v - k := values[unloadValueIndex][0] + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - s, ok := k.(StringValue) - require.True(t, ok) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - require.NoError(t, keyID.Valid()) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - err := storage.Remove(keyID) - require.NoError(t, err) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedParentSize = parentMap.root.ByteSize() - verifyMapLoadedElements(t, m, values) + // Add 1 element to each child map so child map reaches its max size + for childKey, child := range children { - // Unload composite element in the middle - for _, unloadValueIndex := range []int{1, 3, 5} { - v := values[unloadValueIndex][1] + childMap := child.m + cValueID := child.valueID + + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break + } + + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - } + require.Nil(t, existingStorable) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], + expectedChildMapValues[k] = v + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize += digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) - }) - t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is 1 stored slab because child map is inlined. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Add 1 more element to each child map so child map reaches its max size + i := 0 + for childKey, child := range children { - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - // Unload composite value in the middle. - for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { - v := values[unloadValueIndex][1] + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break + } + + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - } + require.Nil(t, existingStorable) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - values[6], - values[8], - values[10], + expectedChildMapValues[k] = v + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is the same as value ID for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, (1 + i + 1), getStoredDeltas(storage)) + + i++ + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*2 + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) - }) - t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There is 1+mapSize stored slab because all child maps are standalone. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + // Test parent slab size + expectedParentSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (singleElementPrefixSize+digestSize+encodedKeySize+slabIDStorableSize)*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - verifyMapLoadedElements(t, m, values) + expectedParentMapSize := parentMap.root.ByteSize() - // Unload external slabs in the middle. - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + // Remove one element from child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + var aKey Value + for k := range expectedChildMapValues { + if k != gchildKey { + aKey = k + break } } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() + // Remove one element + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) + require.NoError(t, err) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedChildMapValues, aKey) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize = expectedParentMapSize - slabIDStorableSize + expectedChildMapSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // remove remaining elements from child map, except for grand child map + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + keys := make([]Value, 0, len(expectedChildMapValues)-1) + for k := range expectedChildMapValues { + if k != gchildKey { + keys = append(keys, k) + } + } + + // Remove all elements, except grand child map + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize -= digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - return a.AddressAsUint64() < b.AddressAsUint64() - }) - id := externalCollisionSlabIDs[1] - err := storage.Remove(id) - require.NoError(t, err) + require.Equal(t, uint64(1), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) + } - copy(values[4:], values[8:]) - values = values[:8] + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - verifyMapLoadedElements(t, m, values) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + t.Run("parent is root metadata slab, with four child maps, each child map has grand child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 4 + valueStringSize = 8 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - // At this point, iterator returned first element (v). + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Remove all other nested composite elements (except first element) from storage. - for _, element := range values[1:] { - value := element[1] - nestedArray, ok := value.(*Array) - require.True(t, ok) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Insert 1 element to grand child map + // Both child map and grand child map are still inlined, but parent map's root slab is metadata slab. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break } + gchildMap := gchild.m + gValueID := gchild.valueID - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0][0], k) - valueEqual(t, typeInfoComparator, values[0][1], v) - i++ - return true, nil - }) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { - const mapSize = 3 + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedGChildMapValues[k] = v + + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.False(t, parentMap.Inlined()) + require.False(t, parentMap.root.IsData()) + // There is 3 stored slab: parent metadata slab with 2 data slabs (all child and grand child maps are inlined) + require.Equal(t, 3, getStoredDeltas(storage)) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Insert 1 element to grand child map + // - grand child maps are inlined + // - child maps are standalone + // - parent map's root slab is data slab. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID - // parent map: 1 root data slab - // composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - verifyMapLoadedElements(t, m, values) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - // Unload composite value - v := values[nestedCompositeIndex][1].(*Array) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(v.SlabID()) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + expectedGChildMapValues[k] = v + + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is same as value ID + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.False(t, parentMap.Inlined()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) + + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove one element from grand child map to trigger child map inlined again. + // - grand child maps are inlined + // - child maps are inlined + // - parent map root slab is metadata slab + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + var aKey Value + for k := range expectedGChildMapValues { + aKey = k + break + } + + // Remove one element from grand child map + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, aKey) + require.NoError(t, err) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, aKey) - verifyMapLoadedElements(t, m, values) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root metadata slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.False(t, parentMap.root.IsData()) + require.Equal(t, 3, getStoredDeltas(storage)) - const mapSize = 20 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Remove all grand child element to trigger + // - child maps are inlined + // - parent map root slab is data slab + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) - }) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Remove grand children + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Grand child map is returned as SlabIDStorable, even if it was stored inlined in the parent. + id, ok := existingMapValueStorable.(SlabIDStorable) + require.True(t, ok) - verifyMapLoadedElements(t, m, values) - }) + v, err := id.StoredValue(storage) + require.NoError(t, err) - t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + gchildMap, ok := v.(*OrderedMap) + require.True(t, ok) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedGChildMapValues, ok := expectedChildMapValues[k].(mapValue) + require.True(t, ok) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values : 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + valueEqual(t, expectedGChildMapValues, gchildMap) - verifyMapLoadedElements(t, m, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + delete(expectedChildMapValues, k) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + require.Equal(t, uint64(0), childMap.Count()) } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + expectedParentMapSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (digestSize+singleElementPrefixSize+encodedKeySize+expectedChildMapSize)*uint32(mapSize) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) }) +} - t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestChildMapWhenParentMapIsModified(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + ) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + r := newRand(t) - verifyMapLoadedElements(t, m, values) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + parentMapDigesterBuilder := &mockDigesterBuilder{} + parentDigest := 1 - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Create parent map with mock digests + parentMap, err := NewMap(storage, address, parentMapDigesterBuilder, typeInfo) + require.NoError(t, err) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedKeyValues := make(map[Value]Value) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + // Insert 2 child map with digest values of 1 and 3. + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := NewStringValue(randStr(r, keyStringSize)) + + digests := []Digest{ + Digest(parentDigest), } - }) + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + parentDigest += 2 - t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedKeyValues[k] = mapValue{} - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) - verifyMapLoadedElements(t, m, values) + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - v := values[index][1] + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - nestedArray, ok := v.(*Array) - require.True(t, ok) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + var keysForNonChildMaps []Value + + t.Run("insert elements in parent map", func(t *testing.T) { - verifyMapLoadedElements(t, m, values) + newDigests := []Digest{ + 0, // insert value at digest 0, so all child map physical positions are moved by +1 + 2, // insert value at digest 2, so second child map physical positions are moved by +1 + 4, // insert value at digest 4, so no child map physical positions are moved } - }) - t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { - const mapSize = 20 + for _, digest := range newDigests { - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + digests := []Digest{digest} + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 5, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - verifyMapLoadedElements(t, m, values) + expectedKeyValues[k] = v + keysForNonChildMaps = append(keysForNonChildMaps, k) - v := values[nestedCompositeIndex][1].(*Array) + i := 0 + for childKey, child := range children { + childMap := child.m + childValueID := child.valueID - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - verifyMapLoadedElements(t, m, values) - } - }) + i++ - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - const mapSize = 20 + expectedChildMapValues[k] = v - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + t.Run("remove elements from parent map", func(t *testing.T) { + // Remove element at digest 0, so all child map physical position are moved by -1. + // Remove element at digest 2, so only second child map physical position is moved by -1 + // Remove element at digest 4, so no child map physical position is moved by -1 - // Unload data slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + for _, k := range keysForNonChildMaps { - childHeader := rootMetaDataSlab.childrenHeaders[i] + existingMapKeyStorable, existingMapValueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.NotNil(t, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedKeyValues, k) + + i := 0 + for childKey, child := range children { + childMap := child.m + childValueID := child.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - // Get data slab element count before unload it from storage. - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - count := mapDataSlab.elements.Count() + i++ - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - values = values[count:] + expectedChildMapValues[k] = v - verifyMapLoadedElements(t, m, values) - } - }) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - const mapSize = 20 + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } + }) + }) +} - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) +func createMapWithEmptyChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - verifyMapLoadedElements(t, m, values) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + expectedKeyValues := make(map[Value]Value) - // Unload data slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + k := getKey() - // Get data slab element count before unload it from storage - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - count := mapDataSlab.elements.Count() + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + expectedKeyValues[k] = mapValue{} - values = values[:len(values)-int(count)] + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) - verifyMapLoadedElements(t, m, values) - } - }) + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - const mapSize = 20 + return parentMap, expectedKeyValues +} - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) +func createMapWithEmpty2LevelChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - verifyMapLoadedElements(t, m, values) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + expectedKeyValues := make(map[Value]Value) - require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - index := 1 - childHeader := rootMetaDataSlab.childrenHeaders[index] + // Create grand child map + gchildMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Get element count from previous data slab - mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) - require.True(t, ok) + k := getKey() - countAtIndex0 := mapDataSlab.elements.Count() + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - // Get element count from slab to be unloaded - mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) - require.True(t, ok) + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - countAtIndex1 := mapDataSlab.elements.Count() + require.True(t, gchildMap.Inlined()) + testInlinedMapIDs(t, address, gchildMap) - err := storage.Remove(childHeader.slabID) + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) - values = values[:m.Count()-uint64(countAtIndex1)] + expectedKeyValues[k] = mapValue{k: mapValue{}} - verifyMapLoadedElements(t, m, values) - }) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test grand child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, gchildMap.root.ByteSize()) - const mapSize = 200 + // Test child map slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedChildMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + testNotInlinedMapIDs(t, address, parentMap) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + return parentMap, expectedKeyValues +} - // Unload non-root metadata slabs from front to back. - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { +type mapInfo struct { + m *OrderedMap + valueID ValueID + children map[Value]*mapInfo +} - childHeader := rootMetaDataSlab.childrenHeaders[i] +func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) map[Value]*mapInfo { - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + children := make(map[Value]*mapInfo) - // Use firstKey to deduce number of elements in slab. - var expectedValues [][2]Value - if i < len(rootMetaDataSlab.childrenHeaders)-1 { - nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] - expectedValues = values[int(nextChildHeader.firstKey):] - } + err := parentMap.IterateReadOnlyKeys(func(k Value) (bool, error) { + if k == nil { + return false, nil + } + + e, err := parentMap.Get(compare, hashInputProvider, k) + require.NoError(t, err) - verifyMapLoadedElements(t, m, expectedValues) + childMap, ok := e.(*OrderedMap) + if !ok { + return true, nil } - }) - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + if childMap.Inlined() { + testInlinedMapIDs(t, address, childMap) + } else { + testNotInlinedMapIDs(t, address, childMap) + } - const mapSize = 200 + children[k] = &mapInfo{ + m: childMap, + valueID: childMap.ValueID(), + children: getInlinedChildMapsFromParentMap(t, address, childMap), + } - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + return true, nil + }) + require.NoError(t, err) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + return children +} - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) +func TestMapSetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Unload non-root metadata slabs from back to front. - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 - childHeader := rootMetaDataSlab.childrenHeaders[i] + storage := newTestPersistentStorage(t) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Use firstKey to deduce number of elements in slabs. - values = values[:childHeader.firstKey] + expectedKeyValues := make(map[Value]Value) - verifyMapLoadedElements(t, m, values) - } - }) + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { + k := Uint64Value(i) - storage := newTestPersistentStorage(t) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + err = childArray.Append(v) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + expectedChildValues = append(expectedChildValues, v) - r := newRand(t) + if !childArray.Inlined() { + break + } + } - // Unload composite element in random position - for len(values) > 0 { + expectedKeyValues[k] = expectedChildValues + } - i := r.Intn(len(values)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - v := values[i][1] + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - nestedArray, ok := v.(*Array) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + valueEqual(t, expectedKeyValues[k], child) - verifyMapLoadedElements(t, m, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + expectedKeyValues[k] = Uint64Value(0) } + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 storage := newTestPersistentStorage(t) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) - - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) - - verifyMapLoadedElements(t, m, values) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + expectedKeyValues := make(map[Value]Value) - type slabInfo struct { - id SlabID - startIndex int - count int - } + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { + k := Uint64Value(i) - nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { - h := nonRootMetaDataSlab.childrenHeaders[i] + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - if len(dataSlabInfos) > 0 { - // Update previous slabInfo.count - dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex - } + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) - dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) - } + expectedKeyValues[k] = arrayValue{v} } - r := newRand(t) - - for len(dataSlabInfos) > 0 { - index := r.Intn(len(dataSlabInfos)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - slabToBeRemoved := dataSlabInfos[index] + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - // Update startIndex for all subsequence data slabs - for i := index + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabToBeRemoved.count - } + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - err := storage.Remove(slabToBeRemoved.id) + child, err := id.StoredValue(storage) require.NoError(t, err) - if index == len(dataSlabInfos)-1 { - values = values[:slabToBeRemoved.startIndex] - } else { - copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) - values = values[:len(values)-slabToBeRemoved.count] - } + valueEqual(t, expectedKeyValues[k], child) - copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + expectedKeyValues[k] = Uint64Value(0) - verifyMapLoadedElements(t, m, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - require.Equal(t, 0, len(values)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 storage := newTestPersistentStorage(t) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) - - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) - - verifyMapLoadedElements(t, m, values) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + expectedKeyValues := make(map[Value]Value) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + k := Uint64Value(i) - if i > 0 { - prevMetaDataSlabInfo := metadataSlabInfos[i-1] - prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Update previous metadata slab count - prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues - // Update previous data slab count - prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex - } + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - metadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: int(mheader.firstKey), - } + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + expectedChildValues[k] = v - children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) - for i, h := range nonRootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: int(h.firstKey), - } - if i > 0 { - children[i-1].count = int(h.firstKey) - children[i-1].startIndex + if !childMap.Inlined() { + break } } - - metadataSlabInfo.children = children - metadataSlabInfos[i] = metadataSlabInfo } - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) - - r := newRand(t) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - for len(metadataSlabInfos) > 0 { + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - switch r.Intn(maxSlabType) { + child, err := id.StoredValue(storage) + require.NoError(t, err) - case metadataSlabType: + valueEqual(t, expectedKeyValues[k], child) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + expectedKeyValues[k] = Uint64Value(0) - isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - count := slabInfoToBeRemoved.count + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 - // Update startIndex for subsequence metadata slabs - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + storage := newTestPersistentStorage(t) - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + expectedKeyValues := make(map[Value]Value) - case dataSlabType: + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + k := Uint64Value(i) - metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues - isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && - (dataSlabIndex == len(metadataSlabInfo.children)-1) + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) - slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - count := slabInfoToBeRemoved.count + expectedChildValues[k] = v + } - // Update startIndex for all subsequence data slabs in this metadata slab info - for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { - metadataSlabInfo.children[i].startIndex -= count - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) - metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - metadataSlabInfo.count -= count + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + child, err := id.StoredValue(storage) + require.NoError(t, err) - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + valueEqual(t, expectedKeyValues[k], child) - if len(metadataSlabInfo.children) == 0 { - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - } - } + expectedKeyValues[k] = Uint64Value(0) - err := storage.Remove(slabInfoToBeRemoved.id) + err = storage.Remove(SlabID(id)) require.NoError(t, err) - - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } - - verifyMapLoadedElements(t, m, values) } - require.Equal(t, 0, len(values)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) } -func createMapWithLongStringKey( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, -) (*OrderedMap, [][2]Value) { +func TestMapRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 - // Create parent map. - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + storage := newTestPersistentStorage(t) - expectedValues := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { - s := strings.Repeat(string(r), int(maxInlineMapElementSize)) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - k := NewStringValue(s) - v := Uint64Value(i) + expectedKeyValues := make(map[Value]Value) - expectedValues[i] = [2]Value{k, v} + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + k := Uint64Value(i) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - r++ - } + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - return m, expectedValues -} + err = childArray.Append(v) + require.NoError(t, err) -func createMapWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + expectedChildValues = append(expectedChildValues, v) - digesterBuilder := &mockDigesterBuilder{} + if !childArray.Inlined() { + break + } + } - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedKeyValues[k] = expectedChildValues + } - expectedValues := make([][2]Value, size) - r := rune('a') - for i := 0; i < size; i++ { - k := Uint64Value(i) - v := NewStringValue(strings.Repeat(string(r), 20)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - expectedValues[i] = [2]Value{k, v} + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + delete(expectedKeyValues, k) + } + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - require.Nil(t, existingStorable) - } - return m, expectedValues -} + expectedKeyValues := make(map[Value]Value) -func createMapWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Use mockDigesterBuilder to guarantee element order. - digesterBuilder := &mockDigesterBuilder{} + k := Uint64Value(i) - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := make([][2]Value, size) - for i := 0; i < size; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - err = nested.Append(Uint64Value(i)) - require.NoError(t, err) + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) - k := Uint64Value(i) - v := nested + expectedKeyValues[k] = arrayValue{v} + } - expectedValues[i] = [2]Value{k, v} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - //digests := []Digest{Digest(i)} - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - // Set nested array to parent - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - return m, expectedValues -} + child, err := id.StoredValue(storage) + require.NoError(t, err) -func createMapWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - compositeValueIndex int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + valueEqual(t, expectedKeyValues[k], child) - digesterBuilder := &mockDigesterBuilder{} + delete(expectedKeyValues, k) - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - values := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - k := Uint64Value(i) + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + storage := newTestPersistentStorage(t) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - err = a.Append(Uint64Value(i)) + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) + require.Nil(t, existingStorable) - values[i] = [2]Value{k, a} - } else { - values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} - } + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues - existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - return m, values -} + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) -func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i][0], k) - valueEqual(t, typeInfoComparator, expectedValues[i][1], v) - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) -} + expectedChildValues[k] = v -func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*MapMetaDataSlab); ok { - counter++ + if !childMap.Inlined() { + break + } + } } - } - return counter -} -func TestMaxInlineMapValueSize(t *testing.T) { + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("small key", func(t *testing.T) { - // Value has larger max inline size when key is less than max map key size. + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - SetThreshold(256) - defer SetThreshold(1024) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - mapSize := 2 - keyStringSize := 16 // Key size is less than max map key size. - valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. + child, err := id.StoredValue(storage) + require.NoError(t, err) - r := newRand(t) + valueEqual(t, expectedKeyValues[k], child) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, keyStringSize)) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + delete(expectedKeyValues, k) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 + storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + expectedKeyValues := make(map[Value]Value) + + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) + + expectedChildValues[k] = v } - // Both key and value are stored in map slab. - require.Equal(t, 1, len(storage.deltas)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - t.Run("max size key", func(t *testing.T) { - // Value max size is about half of max map element size when key is exactly max map key size. + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + child, err := id.StoredValue(storage) + require.NoError(t, err) - mapSize := 1 - keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). - valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). + valueEqual(t, expectedKeyValues[k], child) - r := newRand(t) + delete(expectedKeyValues, k) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) +} + +func TestMapWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("overwritten child array", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + expectedKeyValues := make(mapValue) - // Key is stored in map slab, while value is stored separately in storable slab. - require.Equal(t, 2, len(storage.deltas)) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + k := Uint64Value(0) - t.Run("large key", func(t *testing.T) { - // Value has larger max inline size when key is more than max map key size because - // when key size exceeds max map key size, it is stored in a separate storable slab, - // and SlabIDStorable is stored as key in the map, which is 19 bytes. + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - SetThreshold(256) - defer SetThreshold(1024) + v := NewStringValue(strings.Repeat("a", 10)) - mapSize := 1 - keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size - valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + err = childArray.Append(v) + require.NoError(t, err) - r := newRand(t) + expectedKeyValues[k] = arrayValue{v} - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + // Overwrite child array value from parent + valueStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + valueEqual(t, expectedKeyValues[k], child) - // Key is stored in separate storable slabs, while value is stored in map slab. - require.Equal(t, 2, len(storage.deltas)) + expectedKeyValues[k] = Uint64Value(0) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) -} + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) -func TestMapID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) - sid := m.SlabID() - id := m.ValueID() + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) + }) - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} + t.Run("removed child array", func(t *testing.T) { -func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { - const ( - mapSize = 3 - keyStringSize = 16 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) + storage := newTestPersistentStorage(t) - keyValues := make(map[Value]*mutableValue, mapSize) - for i := 0; i < mapSize; i++ { - k := Uint64Value(i) - v := newMutableValue(initialStorableSize) - keyValues[k] = v - } + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedKeyValues := make(mapValue) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k := Uint64Value(0) + + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) require.NoError(t, err) require.Nil(t, existingStorable) - } - require.True(t, m.root.IsData()) + v := NewStringValue(strings.Repeat("a", 10)) - expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize - expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + err = childArray.Append(v) + require.NoError(t, err) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + expectedKeyValues[k] = arrayValue{v} - // Reset mutable values after changing its storable size - for k, v := range keyValues { - v.updateStorableSize(mutatedStorableSize) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Remove child array value from parent + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) require.NoError(t, err) - require.NotNil(t, existingStorable) - } + require.Equal(t, keyStorable, k) - require.True(t, m.root.IsData()) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize - expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + child, err := id.StoredValue(storage) + require.NoError(t, err) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify removed child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) + }) } diff --git a/storable.go b/storable.go index 2d19fefd..02888130 100644 --- a/storable.go +++ b/storable.go @@ -37,6 +37,23 @@ type Storable interface { ChildStorables() []Storable } +// ComparableStorable is an interface that supports comparison and cloning of Storable. +// This is only used for compact keys. +type ComparableStorable interface { + Storable + + // Equal returns true if the given storable is equal to this storable. + Equal(Storable) bool + + // Less returns true if the given storable is less than this storable. + Less(Storable) bool + + // ID returns a unique identifier. + ID() string + + Copy() Storable +} + type containerStorable interface { Storable hasPointer() bool @@ -50,6 +67,19 @@ func hasPointer(storable Storable) bool { } const ( + // WARNING: tag numbers defined in here in github.com/onflow/atree + // MUST not overlap with tag numbers used by Cadence internal value encoding. + // As of Oct. 2, 2023, Cadence uses tag numbers from 128 to 224. + // See runtime/interpreter/encode.go at github.com/onflow/cadence. + + CBORTagInlinedArrayExtraData = 247 + CBORTagInlinedMapExtraData = 248 + CBORTagInlinedCompactMapExtraData = 249 + + CBORTagInlinedArray = 250 + CBORTagInlinedMap = 251 + CBORTagInlinedCompactMap = 252 + CBORTagInlineCollisionGroup = 253 CBORTagExternalCollisionGroup = 254 @@ -59,6 +89,7 @@ const ( type SlabIDStorable SlabID var _ Storable = SlabIDStorable{} +var _ containerStorable = SlabIDStorable{} func (v SlabIDStorable) hasPointer() bool { return true diff --git a/storable_test.go b/storable_test.go index 9f4d6ece..12b732f2 100644 --- a/storable_test.go +++ b/storable_test.go @@ -333,6 +333,7 @@ type StringValue struct { var _ Value = StringValue{} var _ Storable = StringValue{} var _ HashableValue = StringValue{} +var _ ComparableStorable = StringValue{} func NewStringValue(s string) StringValue { size := GetUintCBORSize(uint64(len(s))) + uint32(len(s)) @@ -345,6 +346,28 @@ func (v StringValue) StoredValue(_ SlabStorage) (Value, error) { return v, nil } +func (v StringValue) Equal(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str == other.(StringValue).str +} + +func (v StringValue) Less(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str < other.(StringValue).str +} + +func (v StringValue) ID() string { + return v.str +} + +func (v StringValue) Copy() Storable { + return v +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { @@ -430,7 +453,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id SlabID, inlinedExtraData []ExtraData) (Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -451,6 +474,15 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } switch tagNumber { + case CBORTagInlinedArray: + return DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedMap: + return DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedCompactMap: + return DecodeInlinedCompactMapStorable(dec, decodeStorable, id, inlinedExtraData) + case CBORTagSlabID: return DecodeSlabIDStorable(dec) @@ -492,7 +524,7 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { return Uint64Value(n), nil case cborTagSomeValue: - storable, err := decodeStorable(dec, id) + storable, err := decodeStorable(dec, id, inlinedExtraData) if err != nil { return nil, err } @@ -507,12 +539,43 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } func decodeTypeInfo(dec *cbor.StreamDecoder) (TypeInfo, error) { - value, err := dec.DecodeUint64() + t, err := dec.NextType() if err != nil { return nil, err } - return testTypeInfo{value: value}, nil + switch t { + case cbor.UintType: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testTypeInfo{value: value}, nil + + case cbor.TagType: + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, err + } + + switch tagNum { + case testCompositeTypeInfoTagNum: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testCompositeTypeInfo{value: value}, nil + + default: + return nil, fmt.Errorf("failed to decode type info") + } + + default: + return nil, fmt.Errorf("failed to decode type info") + } + } func compare(storage SlabStorage, value Value, storable Storable) (bool, error) { @@ -677,25 +740,25 @@ func (v SomeStorable) String() string { return fmt.Sprintf("%s", v.Storable) } -type mutableValue struct { +type testMutableValue struct { storable *mutableStorable } -var _ Value = &mutableValue{} +var _ Value = &testMutableValue{} -func newMutableValue(storableSize uint32) *mutableValue { - return &mutableValue{ +func newTestMutableValue(storableSize uint32) *testMutableValue { + return &testMutableValue{ storable: &mutableStorable{ size: storableSize, }, } } -func (v *mutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { +func (v *testMutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { return v.storable, nil } -func (v *mutableValue) updateStorableSize(n uint32) { +func (v *testMutableValue) updateStorableSize(n uint32) { v.storable.size = n } @@ -710,7 +773,7 @@ func (s *mutableStorable) ByteSize() uint32 { } func (s *mutableStorable) StoredValue(SlabStorage) (Value, error) { - return &mutableValue{s}, nil + return &testMutableValue{s}, nil } func (*mutableStorable) ChildStorables() []Storable { diff --git a/storage.go b/storage.go index 005e69fd..42ca7490 100644 --- a/storage.go +++ b/storage.go @@ -25,15 +25,45 @@ import ( "sort" "strings" "sync" + "unsafe" "github.com/fxamacker/cbor/v2" ) const LedgerBaseStorageSlabPrefix = "$" -// ValueID identifies Array and OrderedMap. -type ValueID [16]byte +// ValueID identifies an Array or OrderedMap. ValueID is consistent +// independent of inlining status, while ValueID and SlabID are used +// differently despite having the same size and content under the hood. +// By contrast, SlabID is affected by inlining because it identifies +// a slab in storage. Given this, ValueID should be used for +// resource tracking, etc. +type ValueID [unsafe.Sizeof(Address{}) + unsafe.Sizeof(SlabIndex{})]byte + +var emptyValueID = ValueID{} + +func slabIDToValueID(sid SlabID) ValueID { + var id ValueID + n := copy(id[:], sid.address[:]) + copy(id[n:], sid.index[:]) + return id +} + +func (vid ValueID) equal(sid SlabID) bool { + return bytes.Equal(vid[:len(sid.address)], sid.address[:]) && + bytes.Equal(vid[len(sid.address):], sid.index[:]) +} + +func (vid ValueID) String() string { + return fmt.Sprintf( + "0x%x.%d", + binary.BigEndian.Uint64(vid[:8]), + binary.BigEndian.Uint64(vid[8:]), + ) +} +// WARNING: Any changes to SlabID or its components (Address and SlabIndex) +// require updates to ValueID definition and functions. type ( Address [8]byte SlabIndex [8]byte @@ -448,6 +478,8 @@ func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map atLeastOneExternalSlab = true } + // This handles inlined slab because inlined slab is a child storable (s) and + // we traverse s.ChildStorables() for its inlined elements. next = append(next, s.ChildStorables()...) } @@ -574,6 +606,11 @@ func (s *PersistentSlabStorage) SlabIterator() (SlabIterator, error) { slabIDStorable, ok := childStorable.(SlabIDStorable) if !ok { + // Append child storables of this childStorable to handle inlined slab containing SlabIDStorable. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) continue } @@ -989,12 +1026,18 @@ func (s *PersistentSlabStorage) Retrieve(id SlabID) (Slab, bool, error) { } func (s *PersistentSlabStorage) Store(id SlabID, slab Slab) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to store slab with undefined slab ID") + } // add to deltas s.deltas[id] = slab return nil } func (s *PersistentSlabStorage) Remove(id SlabID) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to remove slab with undefined slab ID") + } // add to nil to deltas under that id s.deltas[id] = nil return nil diff --git a/storage_test.go b/storage_test.go index 40a4e6c8..2cd2a929 100644 --- a/storage_test.go +++ b/storage_test.go @@ -900,7 +900,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { data := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] id1: { - // extra data // version 0x10, // extra data flag @@ -970,7 +969,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { // (data slab) next: 0, data: [0] id4: { - // extra data // version 0x10, // extra data flag diff --git a/typeinfo.go b/typeinfo.go index 35eb718d..cabb1469 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -19,11 +19,18 @@ package atree import ( + "encoding/binary" + "fmt" + "sort" + "github.com/fxamacker/cbor/v2" ) type TypeInfo interface { Encode(*cbor.StreamEncoder) error + IsComposite() bool + ID() string + Copy() TypeInfo } type TypeInfoDecoder func( @@ -32,3 +39,388 @@ type TypeInfoDecoder func( TypeInfo, error, ) + +type ExtraData interface { + isExtraData() bool + Encode(enc *Encoder) error +} + +// compactMapExtraData is used for inlining compact values. +// compactMapExtraData includes hkeys and keys with map extra data +// because hkeys and keys are the same in order and content for +// all values with the same compact type and map seed. +type compactMapExtraData struct { + mapExtraData *MapExtraData + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []ComparableStorable // keys is ordered by mapExtraData.Seed +} + +var _ ExtraData = &compactMapExtraData{} + +const compactMapExtraDataLength = 3 + +func (c *compactMapExtraData) isExtraData() bool { + return true +} + +func (c *compactMapExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) + if err != nil { + return NewEncodingError(err) + } + + // element 0: map extra data + err = c.mapExtraData.Encode(enc) + if err != nil { + return err + } + + // element 1: digests + totalDigestSize := len(c.hkeys) * digestSize + + var digests []byte + if totalDigestSize <= len(enc.Scratch) { + digests = enc.Scratch[:totalDigestSize] + } else { + digests = make([]byte, totalDigestSize) + } + + for i := 0; i < len(c.hkeys); i++ { + binary.BigEndian.PutUint64(digests[i*digestSize:], uint64(c.hkeys[i])) + } + + err = enc.CBOR.EncodeBytes(digests) + if err != nil { + return NewEncodingError(err) + } + + // element 2: field names + err = enc.CBOR.EncodeArrayHead(uint64(len(c.keys))) + if err != nil { + return NewEncodingError(err) + } + + for _, key := range c.keys { + err = key.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newCompactMapExtraData( + dec *cbor.StreamDecoder, + decodeTypeInfo TypeInfoDecoder, + decodeStorable StorableDecoder, +) (*compactMapExtraData, error) { + + length, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if length != compactMapExtraDataLength { + return nil, NewDecodingError( + fmt.Errorf( + "compact extra data has invalid length %d, want %d", + length, + arrayExtraDataLength, + )) + } + + // element 0: map extra data + mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, err + } + + // element 1: digests + digestBytes, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + + if len(digestBytes)%digestSize != 0 { + return nil, NewDecodingError( + fmt.Errorf( + "decoding digests failed: number of bytes %d is not multiple of %d", + len(digestBytes), + digestSize)) + } + + digestCount := len(digestBytes) / digestSize + + // element 2: keys + keyCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if keyCount != uint64(digestCount) { + return nil, NewDecodingError( + fmt.Errorf( + "decoding compact map key failed: number of keys %d is different from number of digests %d", + keyCount, + digestCount)) + } + + hkeys := make([]Digest, digestCount) + for i := 0; i < digestCount; i++ { + hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) + } + + keys := make([]ComparableStorable, keyCount) + for i := uint64(0); i < keyCount; i++ { + // Decode compact map key + key, err := decodeStorable(dec, SlabIDUndefined, nil) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") + } + compactMapKey, ok := key.(ComparableStorable) + if !ok { + return nil, NewDecodingError(fmt.Errorf("failed to decode key's storable: got %T, expect ComparableStorable", key)) + } + keys[i] = compactMapKey + } + + return &compactMapExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil +} + +type compactMapTypeInfo struct { + index int + keys []ComparableStorable +} + +type inlinedExtraData struct { + extraData []ExtraData + compactMapTypes map[string]compactMapTypeInfo + arrayTypes map[string]int +} + +func newInlinedExtraData() *inlinedExtraData { + return &inlinedExtraData{} +} + +// Encode encodes inlined extra data as CBOR array. +func (ied *inlinedExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + if err != nil { + return NewEncodingError(err) + } + + var tagNum uint64 + + for _, extraData := range ied.extraData { + switch extraData.(type) { + case *ArrayExtraData: + tagNum = CBORTagInlinedArrayExtraData + + case *MapExtraData: + tagNum = CBORTagInlinedMapExtraData + + case *compactMapExtraData: + tagNum = CBORTagInlinedCompactMapExtraData + + default: + return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraData)) + } + + err = enc.CBOR.EncodeTagHead(tagNum) + if err != nil { + return NewEncodingError(err) + } + + err = extraData.Encode(enc) + if err != nil { + return err + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newInlinedExtraDataFromData( + data []byte, + decMode cbor.DecMode, + decodeStorable StorableDecoder, + decodeTypeInfo TypeInfoDecoder, +) ([]ExtraData, []byte, error) { + + dec := decMode.NewByteStreamDecoder(data) + + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if count == 0 { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined extra data")) + } + + inlinedExtraData := make([]ExtraData, count) + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + switch tagNum { + case CBORTagInlinedArrayExtraData: + inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedMapExtraData: + inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedCompactMapExtraData: + inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) + if err != nil { + return nil, nil, err + } + + default: + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: unsupported tag number %d", tagNum)) + } + } + + return inlinedExtraData, data[dec.NumBytesDecoded():], nil +} + +// addArrayExtraData returns index of deduplicated array extra data. +// Array extra data is deduplicated by array type info ID because array +// extra data only contains type info. +func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { + if ied.arrayTypes == nil { + ied.arrayTypes = make(map[string]int) + } + + id := data.TypeInfo.ID() + index, exist := ied.arrayTypes[id] + if exist { + return index + } + + index = len(ied.extraData) + ied.extraData = append(ied.extraData, data) + ied.arrayTypes[id] = index + return index +} + +// addMapExtraData returns index of map extra data. +// Map extra data is not deduplicated because it also contains count and seed. +func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { + index := len(ied.extraData) + ied.extraData = append(ied.extraData, data) + return index +} + +// addCompactMapExtraData returns index of deduplicated compact map extra data. +// Compact map extra data is deduplicated by TypeInfo.ID() with sorted field names. +func (ied *inlinedExtraData) addCompactMapExtraData( + data *MapExtraData, + digests []Digest, + keys []ComparableStorable, +) (int, []ComparableStorable) { + + if ied.compactMapTypes == nil { + ied.compactMapTypes = make(map[string]compactMapTypeInfo) + } + + id := makeCompactMapTypeID(data.TypeInfo, keys) + info, exist := ied.compactMapTypes[id] + if exist { + return info.index, info.keys + } + + compactMapData := &compactMapExtraData{ + mapExtraData: data, + hkeys: digests, + keys: keys, + } + + index := len(ied.extraData) + ied.extraData = append(ied.extraData, compactMapData) + + ied.compactMapTypes[id] = compactMapTypeInfo{ + keys: keys, + index: index, + } + + return index, keys +} + +func (ied *inlinedExtraData) empty() bool { + return len(ied.extraData) == 0 +} + +// makeCompactMapTypeID returns id of concatenated t.ID() with sorted names with "," as separator. +func makeCompactMapTypeID(t TypeInfo, names []ComparableStorable) string { + const separator = "," + + if len(names) == 1 { + return t.ID() + separator + names[0].ID() + } + + sorter := newFieldNameSorter(names) + + sort.Sort(sorter) + + return t.ID() + separator + sorter.join(separator) +} + +// fieldNameSorter sorts names by index (not in place sort). +type fieldNameSorter struct { + names []ComparableStorable + index []int +} + +func newFieldNameSorter(names []ComparableStorable) *fieldNameSorter { + index := make([]int, len(names)) + for i := 0; i < len(names); i++ { + index[i] = i + } + return &fieldNameSorter{ + names: names, + index: index, + } +} + +func (fn *fieldNameSorter) Len() int { + return len(fn.names) +} + +func (fn *fieldNameSorter) Less(i, j int) bool { + i = fn.index[i] + j = fn.index[j] + return fn.names[i].Less(fn.names[j]) +} + +func (fn *fieldNameSorter) Swap(i, j int) { + fn.index[i], fn.index[j] = fn.index[j], fn.index[i] +} + +func (fn *fieldNameSorter) join(sep string) string { + var s string + for _, i := range fn.index { + s += sep + fn.names[i].ID() + } + return s +} diff --git a/utils_test.go b/utils_test.go index a40a3599..5762b3d3 100644 --- a/utils_test.go +++ b/utils_test.go @@ -20,6 +20,7 @@ package atree import ( "flag" + "fmt" "math/rand" "testing" "time" @@ -91,6 +92,18 @@ type testTypeInfo struct { var _ TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() TypeInfo { + return i +} + +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(enc *cbor.StreamEncoder) error { return enc.EncodeUint64(i.value) } @@ -100,13 +113,50 @@ func (i testTypeInfo) Equal(other TypeInfo) bool { return ok && i.value == otherTestTypeInfo.value } +const testCompositeTypeInfoTagNum = 246 + +type testCompositeTypeInfo struct { + value uint64 +} + +var _ TypeInfo = testCompositeTypeInfo{} + +func (i testCompositeTypeInfo) Copy() TypeInfo { + return i +} + +func (i testCompositeTypeInfo) IsComposite() bool { + return true +} + +func (i testCompositeTypeInfo) ID() string { + return fmt.Sprintf("composite(%d)", i) +} + +func (i testCompositeTypeInfo) Encode(enc *cbor.StreamEncoder) error { + err := enc.EncodeTagHead(testCompositeTypeInfoTagNum) + if err != nil { + return err + } + return enc.EncodeUint64(i.value) +} + +func (i testCompositeTypeInfo) Equal(other TypeInfo) bool { + otherTestTypeInfo, ok := other.(testCompositeTypeInfo) + return ok && i.value == otherTestTypeInfo.value +} + func typeInfoComparator(a, b TypeInfo) bool { - x, ok := a.(testTypeInfo) - if !ok { + switch a := a.(type) { + case testTypeInfo: + return a.Equal(b) + + case testCompositeTypeInfo: + return a.Equal(b) + + default: return false } - y, ok := b.(testTypeInfo) - return ok && x.value == y.value } func newTestPersistentStorage(t testing.TB) *PersistentSlabStorage { @@ -264,80 +314,118 @@ func (s *InMemBaseStorage) ResetReporter() { s.segmentsTouched = make(map[SlabID]struct{}) } -func valueEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - switch a.(type) { +func valueEqual(t *testing.T, expected Value, actual Value) { + switch expected := expected.(type) { + case arrayValue: + actual, ok := actual.(*Array) + require.True(t, ok) + + arrayEqual(t, expected, actual) + case *Array: - arrayEqual(t, tic, a, b) + require.FailNow(t, "expected value shouldn't be *Array") + + case mapValue: + actual, ok := actual.(*OrderedMap) + require.True(t, ok) + + mapEqual(t, expected, actual) + case *OrderedMap: - mapEqual(t, tic, a, b) + require.FailNow(t, "expected value shouldn't be *OrderedMap") + default: - require.Equal(t, a, b) + require.Equal(t, expected, actual) } } -func arrayEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - array1, ok := a.(*Array) - require.True(t, ok) +func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { + require.Equal(t, uint64(len(expected)), actual.Count()) - array2, ok := b.(*Array) - require.True(t, ok) + iterator, err := actual.ReadOnlyIterator() + require.NoError(t, err) - require.True(t, tic(array1.Type(), array2.Type())) - require.Equal(t, array1.Address(), array2.Address()) - require.Equal(t, array1.Count(), array2.Count()) - require.Equal(t, array1.SlabID(), array2.SlabID()) + i := 0 + for { + actualValue, err := iterator.Next() + require.NoError(t, err) - iterator1, err := array1.Iterator() - require.NoError(t, err) + if actualValue == nil { + break + } + + valueEqual(t, expected[i], actualValue) + i++ + } + require.Equal(t, len(expected), i) +} - iterator2, err := array2.Iterator() +func mapEqual(t *testing.T, expected mapValue, actual *OrderedMap) { + require.Equal(t, uint64(len(expected)), actual.Count()) + + iterator, err := actual.ReadOnlyIterator() require.NoError(t, err) + i := 0 for { - value1, err := iterator1.Next() - require.NoError(t, err) - - value2, err := iterator2.Next() + actualKey, actualValue, err := iterator.Next() require.NoError(t, err) - valueEqual(t, tic, value1, value2) - - if value1 == nil || value2 == nil { + if actualKey == nil { break } + + expectedValue, exist := expected[actualKey] + require.True(t, exist) + + valueEqual(t, expectedValue, actualValue) + i++ } + require.Equal(t, len(expected), i) } -func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - m1, ok := a.(*OrderedMap) - require.True(t, ok) +func valueIDToSlabID(vid ValueID) SlabID { + var id SlabID + copy(id.address[:], vid[:slabAddressSize]) + copy(id.index[:], vid[slabAddressSize:]) + return id +} - m2, ok := b.(*OrderedMap) - require.True(t, ok) +func testInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} - require.True(t, tic(m1.Type(), m2.Type())) - require.Equal(t, m1.Address(), m2.Address()) - require.Equal(t, m1.Count(), m2.Count()) - require.Equal(t, m1.SlabID(), m2.SlabID()) +func testNotInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testNotInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} - iterator1, err := m1.Iterator() - require.NoError(t, err) +func testInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, SlabIDUndefined, slabID) - iterator2, err := m2.Iterator() - require.NoError(t, err) + require.Equal(t, expectedAddress[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) +} - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) +func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, expectedAddress, slabID.address) + require.NotEqual(t, SlabIndexUndefined, slabID.index) - key2, value2, err := iterator2.Next() - require.NoError(t, err) + require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) + require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) +} - valueEqual(t, tic, key1, key2) - valueEqual(t, tic, value1, value2) +type arrayValue []Value - if key1 == nil || key2 == nil { - break - } - } +var _ Value = &arrayValue{} + +func (v arrayValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") +} + +type mapValue map[Value]Value + +var _ Value = &mapValue{} + +func (v mapValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") } diff --git a/value.go b/value.go index 06ce3a5c..c8be86e3 100644 --- a/value.go +++ b/value.go @@ -25,3 +25,14 @@ type Value interface { type ValueComparator func(SlabStorage, Value, Storable) (bool, error) type StorableComparator func(Storable, Storable) bool + +type parentUpdater func() (found bool, err error) + +// mutableValueNotifier is an interface that allows mutable child value to notify and update parent. +type mutableValueNotifier interface { + Value + ValueID() ValueID + setParentUpdater(parentUpdater) + Inlined() bool + Inlinable(uint64) bool +}