From 657379f484e6e0597a61af21f0613d36b5000aee Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Mon, 4 Dec 2023 20:39:26 +0000 Subject: [PATCH] Aristo db update merkle hasher (#1925) * Register paths for added leafs because of trie re-balancing why: While the payload would not change, the prefix in the leaf vertex would. So it needs to be flagged for hash recompilation for the `hashify()` module. also: Make sure that `Hike` paths which might have vertex links into the backend filter are replaced by vertex copies before manipulating. Otherwise the vertices on the immutable filter might be involuntarily changed. * Also check for paths where the leaf vertex is on the backend, already why: A a path can have dome vertices on the top layer cache with the `Leaf` vertex on the backend. * Re-define a void `HashLabel` type. why: A `HashLabel` type is a pair `(root-vertex-ID, Keccak-hash)`. Previously, a valid `HashLabel` consisted of a non-empty hash and a non-zero vertex ID. This definition leads to a non-unique representation of a void `HashLabel` with either root-ID or has void. This has been changed to the unique void `HashLabel` exactly if the hash entry is void. * Update consistency checkers * Re-org `hashify()` procedure why: Syncing against block chain showed serious deficiencies which produced wrong hashes or simply bailed out with error. So all fringe cases (mainly due to deleted entries) could be integrated into the labelling schedule rather than handling separate fringe cases. --- nimbus/db/aristo/aristo_check/check_be.nim | 28 +- nimbus/db/aristo/aristo_check/check_top.nim | 22 +- nimbus/db/aristo/aristo_constants.nim | 2 +- nimbus/db/aristo/aristo_debug.nim | 84 ++- nimbus/db/aristo/aristo_delete.nim | 31 +- nimbus/db/aristo/aristo_desc.nim | 2 +- nimbus/db/aristo/aristo_desc/desc_error.nim | 17 +- .../aristo/aristo_desc/desc_identifiers.nim | 2 +- nimbus/db/aristo/aristo_get.nim | 9 + nimbus/db/aristo/aristo_hashify.nim | 578 ++++++++---------- nimbus/db/aristo/aristo_hike.nim | 4 + nimbus/db/aristo/aristo_merge.nim | 38 +- nimbus/db/aristo/aristo_utils.nim | 38 +- tests/test_aristo/test_filter.nim | 11 +- tests/test_aristo/test_tx.nim | 37 +- 15 files changed, 496 insertions(+), 407 deletions(-) diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 316a1f7ef1..56d829f367 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -117,14 +117,12 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( for (_,vid,key) in T.walkKeyBE db: if not key.isvalid: return err((vid,CheckBeKeyInvalid)) - let rc = db.getVtxBE vid - if rc.isErr or not rc.value.isValid: + let vtx = db.getVtxBE(vid).valueOr: return err((vid,CheckBeVtxMissing)) - let rx = rc.value.toNodeBE db # backend only - if rx.isErr: + let node = vtx.toNodeBE(db).valueOr: # backend links only return err((vid,CheckBeKeyCantCompile)) if not relax: - let expected = rx.value.digestTo(HashKey) + let expected = node.digestTo(HashKey) if expected != key: return err((vid,CheckBeKeyMismatch)) discard vids.reduce Interval[VertexID,uint64].new(vid,vid) @@ -186,19 +184,21 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( lastTrg = filter.trg # Check key table + var list: seq[VertexID] for (vid,lbl) in db.top.kMap.pairs: + list.add vid let vtx = db.getVtx vid if not db.top.sTab.hasKey(vid) and not vtx.isValid: return err((vid,CheckBeCacheKeyDangling)) - if lbl.isValid and not relax: - if not vtx.isValid: - return err((vid,CheckBeCacheVtxDangling)) - let rc = vtx.toNode db # compile cache first - if rc.isErr: - return err((vid,CheckBeCacheKeyCantCompile)) - let expected = rc.value.digestTo(HashKey) - if expected != lbl.key: - return err((vid,CheckBeCacheKeyMismatch)) + if not lbl.isValid or relax: + continue + if not vtx.isValid: + return err((vid,CheckBeCacheVtxDangling)) + let node = vtx.toNode(db).valueOr: # compile cache first + return err((vid,CheckBeCacheKeyCantCompile)) + let expected = node.digestTo(HashKey) + if expected != lbl.key: + return err((vid,CheckBeCacheKeyMismatch)) # Check vGen let diff --git a/nimbus/db/aristo/aristo_check/check_top.nim b/nimbus/db/aristo/aristo_check/check_top.nim index 63c026051a..e1919061ae 100644 --- a/nimbus/db/aristo/aristo_check/check_top.nim +++ b/nimbus/db/aristo/aristo_check/check_top.nim @@ -40,9 +40,11 @@ proc checkTopStrict*( if vid notin revVids: return err((vid,CheckStkRevKeyMismatch)) - let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0) - if 0 < pAmkVtxCount and pAmkVtxCount < db.top.sTab.len: - # Cannot have less changes than cached entries + let + pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0) + sTabVtxCount = db.top.sTab.values.toSeq.filterIt(it.isValid).len + # Non-zero values mist sum up the same + if pAmkVtxCount < sTabVtxCount: return err((VertexID(0),CheckStkVtxCountMismatch)) ok() @@ -95,13 +97,17 @@ proc checkTopCommon*( kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len kMapNilCount = db.top.kMap.len - kMapCount - # Check deleted entries - var nNilVtx = 0 + # Collect leafs and check deleted entries + var + nNilVtx = 0 + leafs = db.top.lTab.values.toSeq.filterIt(it.isValid).toHashSet for (vid,vtx) in db.top.sTab.pairs: if vtx.isValid: case vtx.vType: of Leaf: - discard + if vid notin leafs: + return err((vid,CheckAnyLeafUnregistered)) + leafs.excl vid of Branch: block check42Links: var seen = false @@ -123,6 +129,10 @@ proc checkTopCommon*( if db.top.kMap.getOrVoid(vid).isValid: return err((vid,CheckAnyVtxEmptyKeyExpected)) + # Check for dangling leaf records + if 0 < leafs.len: + return err((leafs.toSeq[0],CheckAnyLeafVidDangling)) + # If present, there are at least as many deleted hashes as there are deleted # vertices. if kMapNilCount != 0 and kMapNilCount < nNilVtx: diff --git a/nimbus/db/aristo/aristo_constants.nim b/nimbus/db/aristo/aristo_constants.nim index f6cb0d9193..3055400d82 100644 --- a/nimbus/db/aristo/aristo_constants.nim +++ b/nimbus/db/aristo/aristo_constants.nim @@ -34,7 +34,7 @@ const VOID_HASH_KEY* = HashKey() ## Void equivalent for Merkle hash value - VOID_HASH_LABEL* = HashLabel() + VOID_HASH_LABEL* = HashLabel(key: VOID_HASH_KEY) ## Void equivalent for Merkle hash value EmptyQidPairSeq* = seq[(QueueID,QueueID)].default diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index 1320a61d63..3a096c9365 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -15,7 +15,7 @@ import eth/[common, trie/nibbles], results, stew/byteutils, - "."/[aristo_constants, aristo_desc, aristo_hike], + "."/[aristo_constants, aristo_desc, aristo_get, aristo_hike], ./aristo_desc/desc_backend, ./aristo_init/[memory_db, memory_only, rocks_db], ./aristo_filter/filter_scheduler @@ -85,8 +85,36 @@ proc squeeze(s: string; hex = false; ignLen = false): string = result &= "..(" & $s.len & ")" result &= ".." & s[s.len-16 .. ^1] -proc stripZeros(a: string): string = - a.strip(leading=true, trailing=false, chars={'0'}) +proc stripZeros(a: string; toExp = false): string = + if 0 < a.len: + result = a.strip(leading=true, trailing=false, chars={'0'}) + if result.len == 0: + result = "0" + elif result[^1] == '0' and toExp: + var n = 0 + while result[^1] == '0': + let w = result.len + result.setLen(w-1) + n.inc + if n == 1: + result &= "0" + elif n == 2: + result &= "00" + elif 2 < n: + result &= "↑" & $n + +proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 = + if lbl.isValid: + if not db.top.isNil: + let vids = db.top.pAmk.getOrVoid lbl + if vids.isValid: + return vids.sortedKeys[0].uint64 + block: + let vids = db.xMap.getOrVoid lbl + if vids.isValid: + return vids.sortedKeys[0].uint64 + +# --------------------- proc ppVid(vid: VertexID; pfx = true): string = if pfx: @@ -96,6 +124,15 @@ proc ppVid(vid: VertexID; pfx = true): string = else: result &= "ø" +func ppCodeHash(h: Hash256): string = + result = "¢" + if h == Hash256(): + result &= "©" + elif h == EMPTY_CODE_HASH: + result &= "ø" + else: + result &= h.data.toHex.squeeze(hex=true,ignLen=true) + proc ppFid(fid: FilterID): string = if not fid.isValid: return "ø" @@ -130,17 +167,6 @@ proc ppVidList(vGen: openArray[VertexID]): string = #proc ppVidList(vGen: HashSet[VertexID]): string = # "{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}" -proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 = - if lbl.isValid: - if not db.top.isNil: - let vids = db.top.pAmk.getOrVoid lbl - if vids.isValid: - return vids.sortedKeys[0].uint64 - block: - let vids = db.xMap.getOrVoid lbl - if vids.isValid: - return vids.sortedKeys[0].uint64 - proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string = proc getVids: HashSet[VertexID] = if not db.top.isNil: @@ -153,10 +179,10 @@ proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string = return vids if pfx: result = "£" - if key == VOID_HASH_KEY: - result &= "ø" + if key.len == 0 or key.to(Hash256) == Hash256(): + result &= "©" elif not key.isValid: - result &= "r" + result &= "ø" else: let tag = if key.len < 32: "[#" & $key.len & "]" else: "" @@ -179,11 +205,9 @@ proc ppLabel(lbl: HashLabel; db: AristoDbRef): string = "%ø" proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string = - if not db.top.isNil: - let vid = db.top.lTab.getOrVoid lty - if vid.isValid: - return "@" & vid.ppVid - "@" & $lty + let pfx = lty.path.to(NibblesSeq) + "@" & lty.root.ppVid(pfx=false) & ":" & + ($pfx).squeeze(hex=true,ignLen=(pfx.len==64)) proc ppPathPfx(pfx: NibblesSeq): string = let s = $pfx @@ -203,10 +227,10 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string = result &= "[#" & p.rlpBlob.toHex.squeeze(hex=true) & "]" of AccountData: result = "(" - result &= $p.account.nonce & "," - result &= $p.account.balance & "," + result &= ($p.account.nonce).stripZeros(toExp=true) & "," + result &= ($p.account.balance).stripZeros(toExp=true) & "," result &= p.account.storageID.ppVid & "," - result &= $p.account.codeHash & ")" + result &= p.account.codeHash.ppCodeHash & ")" proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string = if not nd.isValid: @@ -581,7 +605,7 @@ proc pp*(leg: Leg; db = AristoDbRef()): string = result &= lbl.ppLabel(db) result &= "," if leg.backend: - result &= "*" + result &= "¶" result &= "," if 0 <= leg.nibble: result &= $leg.nibble.ppNibble @@ -628,6 +652,9 @@ proc pp*(tx: AristoTxRef): string = result &= ", par=" & $tx.parent.txUid result &= ")" +proc pp*(wp: VidVtxPair; db: AristoDbRef): string = + "(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")" + # --------------------- proc pp*( @@ -700,14 +727,15 @@ proc pp*( proc pp*( db: AristoDbRef; - backendOk = false; root = VertexID(1); indent = 4; + backendOk = false; + filterOk = true; ): string = result = db.top.pp(db, indent=indent) & indent.toPfx if backendOk: result &= db.backend.pp(db) - else: + elif filterOk: result &= db.roFilter.ppFilter(db, root, indent+1) proc pp*(sdb: MerkleSignRef; indent = 4): string = diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index 249384fbb1..6f3ca3adf6 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -214,13 +214,20 @@ proc collapseLeaf( if 2 < hike.legs.len: # (1), (2), or (3) # Merge `br` into the leaf `vtx` and unlink `br`. - let par = hike.legs[^3].wp + let par = hike.legs[^3].wp.dup # Writable vertex case par.vtx.vType: of Branch: # (1) # Replace `vtx` by `^2 & vtx` (use `lf` as-is) par.vtx.bVid[hike.legs[^3].nibble] = lf.vid db.top.sTab[par.vid] = par.vtx db.top.sTab[lf.vid] = lf.vtx + # Make sure that there is a cache enty in case the leaf was pulled from + # the backend.! + let + lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx + tag = lfPath.pathToTag.valueOr: + return err((lf.vid,error)) + db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid return ok() of Extension: # (2) or (3) @@ -230,13 +237,20 @@ proc collapseLeaf( if 3 < hike.legs.len: # (2) # Grandparent exists - let gpr = hike.legs[^4].wp + let gpr = hike.legs[^4].wp.dup # Writable vertex if gpr.vtx.vType != Branch: return err((gpr.vid,DelBranchExpexted)) db.doneWith par.vid # `par` is obsolete now gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid db.top.sTab[gpr.vid] = gpr.vtx db.top.sTab[lf.vid] = lf.vtx + # Make sure that there is a cache enty in case the leaf was pulled from + # the backend.! + let + lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx + tag = lfPath.pathToTag.valueOr: + return err((lf.vid,error)) + db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid return ok() # No grandparent, so ^3 is root vertex # (3) @@ -264,6 +278,17 @@ proc collapseLeaf( # Clean up stale leaf vertex which has moved to root position db.doneWith lf.vid + + # If some `Leaf` vertex was installed as root, there must be a an extra + # `LeafTie` lookup entry. + let rootVtx = db.getVtx hike.root + if rootVtx.isValid and + rootVtx != hike.legs[0].wp.vtx and + rootVtx.vType == Leaf: + let tag = rootVtx.lPfx.pathToTag.valueOr: + return err((hike.root,error)) + db.top.lTab[LeafTie(root: hike.root, path: tag)] = hike.root + ok() # ------------------------- @@ -380,7 +405,7 @@ proc delete*( root: VertexID; path: openArray[byte]; ): Result[void,(VertexID,AristoError)] = - ## Variant of `fetchPayload()` + ## Variant of `delete()` ## db.delete(? path.initNibbleRange.hikeUp(root, db).mapErr toVae) diff --git a/nimbus/db/aristo/aristo_desc.nim b/nimbus/db/aristo/aristo_desc.nim index deee0b5930..8930c799f5 100644 --- a/nimbus/db/aristo/aristo_desc.nim +++ b/nimbus/db/aristo/aristo_desc.nim @@ -128,7 +128,7 @@ func isValid*(vid: VertexID): bool = vid != VertexID(0) func isValid*(lbl: HashLabel): bool = - lbl.root.isValid and lbl.key.isValid + lbl.key.isValid func isValid*(sqv: HashSet[VertexID]): bool = sqv != EmptyVidSet diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index e6c5b15991..98f6526a80 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -80,8 +80,8 @@ type PathExpectedLeaf # Merge leaf `merge()` - MergeBrLinkLeafGarbled - MergeBrLinkVtxPfxTooShort + MergeBranchLinkLeafGarbled + MergeBranchLinkVtxPfxTooShort MergeBranchGarbledNibble MergeBranchGarbledTail MergeBranchLinkLockedKey @@ -90,6 +90,7 @@ type MergeBranchRootExpected MergeLeafGarbledHike MergeLeafPathCachedAlready + MergeLeafPathOnBackendAlready MergeNonBranchProofModeLock MergeRootBranchLinkBusy MergeAssemblyFailed # Ooops, internal error @@ -106,15 +107,11 @@ type MergeNodeVtxDuplicates # Update `Merkle` hashes `hashify()` - HashifyCannotComplete - HashifyCannotHashRoot + HashifyEmptyHike HashifyExistingHashMismatch - HashifyDownVtxlevelExceeded - HashifyDownVtxLeafUnexpected + HashifyNodeUnresolved HashifyRootHashMismatch - HashifyRootVidMismatch - HashifyVidCircularDependence - HashifyVtxMissing + HashifyRootNodeUnresolved # Cache checker `checkCache()` CheckStkVtxIncomplete @@ -131,6 +128,8 @@ type CheckRlxRevKeyMissing CheckRlxRevKeyMismatch + CheckAnyLeafUnregistered + CheckAnyLeafVidDangling CheckAnyVidVtxMissing CheckAnyVtxEmptyKeyMissing CheckAnyVtxEmptyKeyExpected diff --git a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim index 550922c33d..6cfbed39ab 100644 --- a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim +++ b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim @@ -96,7 +96,7 @@ type ## `Aristo Trie`. They are used temporarily and in caches or backlog ## tables. root*: VertexID ## Root ID for the sub-trie. - key*: HashKey ## Merkle hash or encoded small node data + key*: HashKey ## Merkle hash or encoded small node data # ------------------------------------------------------------------------------ # Chronicles formatters diff --git a/nimbus/db/aristo/aristo_get.nim b/nimbus/db/aristo/aristo_get.nim index 885a7be901..205c985dd6 100644 --- a/nimbus/db/aristo/aristo_get.nim +++ b/nimbus/db/aristo/aristo_get.nim @@ -23,6 +23,15 @@ type vid*: VertexID ## Table lookup vertex ID (if any) vtx*: VertexRef ## Reference to vertex +# ------------------------------------------------------------------------------ +# Public helpers +# ------------------------------------------------------------------------------ + +func dup*(wp: VidVtxPair): VidVtxPair = + VidVtxPair( + vid: wp.vid, + vtx: wp.vtx.dup) + # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_hashify.nim b/nimbus/db/aristo/aristo_hashify.nim index 622c19c004..aaadd009df 100644 --- a/nimbus/db/aristo/aristo_hashify.nim +++ b/nimbus/db/aristo/aristo_hashify.nim @@ -16,29 +16,42 @@ ## (i.e. recalculated and compared) unless the ID is locked. In the latter ## case, the key is assumed to be correct without checking. ## +## The folllowing properties are required from the top layer cache. +## +## * All recently (i.e. not saved to backend) added entries must have an +## `lTab[]` entry with `(root-vertex,path,leaf-vertex-ID)`. +## +## * All recently (i.e. not saved to backend) deleted entries must have an +## `lTab[]` entry with `(root-vertex,path,VertexID(0))`. +## +## * All vertices where the key (aka Merkle hash) has changed must have a +## top layer cache `kMap[]` entry `(vertex-ID,VOID_HASH_LABEL)` indicating +## that there is no key available for this vertex. This also applies for +## backend verices where the key has changed while the structural logic +## did not change. +## ## The association algorithm is an optimised version of: ## -## * For all leaf vertices, label them with parent vertex so that there are -## chains from the leafs to the root vertex. +## * For all leaf vertices which have all child links on the top layer cache +## where the node keys (aka hashes) can be compiled, proceed with the parent +## vertex. Note that a top layer cache vertex can only have a key on the top +## top layer cache (whereas a bachend b ## -## * Apply a width-first traversal starting with the set of leafs vertices -## compiling the keys to associate with by hashing the current vertex. +## Apparently, keys (aka hashes) can be compiled for leaf vertices. The same +## holds for follow up vertices where the child keys were available, alteady. +## This process stops when a vertex has children on the backend or children +## lead to a chain not sorted, yet. ## -## Apperently, keys (aka hashes) can be compiled for leaf vertices. For the -## other vertices, the keys can be compiled if all the children keys are -## known which is assured by the nature of the width-first traversal method. +## * For the remaining vertex chains (where the process stopped) up to the root +## vertex, set up a width-first schedule starting at the vertex where the +## previous chain broke off and follow up to the root vertex. ## -## For production, this algorithm is slightly optimised: +## * Follow the width-first schedule fo labelling all vertices with a hash key. ## -## * For each leaf vertex, calculate the chain from the leaf to the root vertex. -## + Starting at the leaf, calculate the key for each vertex towards the root -## vertex as long as possible. -## + Stash the rest of the partial chain to be completed later +## Note that there are some tweaks for `proof` nodes with incomplete tries and +## handling of possible stray vertices on the top layer cache left over from +## deletion processes. ## -## * While there is a partial chain left, use the ends towards the leaf -## vertices and calculate the remaining keys (which results in a width-first -## traversal, again.) - {.push raises: [].} import @@ -46,25 +59,30 @@ import chronicles, eth/common, results, - stew/interval_set, + stew/byteutils, "."/[aristo_desc, aristo_get, aristo_hike, aristo_serialise, aristo_utils, aristo_vid] type - BackVidValRef = ref object - root: VertexID ## Root vertex - onBe: bool ## Table key vid refers to backend - toVid: VertexID ## Next/follow up vertex + FollowUpVid = object + ## Link item: VertexID -> VertexID + root: VertexID ## Root vertex, might be void unless known + toVid: VertexID ## Valid next/follow up vertex BackVidTab = - Table[VertexID,BackVidValRef] + Table[VertexID,FollowUpVid] - BackWVtxRef = ref object - w: BackVidValRef - vtx: VertexRef + WidthFirstForest = object + ## Collected width first search trees + root: HashSet[VertexID] ## Top level, root targets + pool: BackVidTab ## Upper links pool + base: BackVidTab ## Width-first leaf level links - BackWVtxTab = - Table[VertexID,BackWVtxRef] + DfReport = object + ## Depth first traversal report tracing back a hike with + ## `leafToRootCrawler()` + legInx: int ## First leg that failed to resolve + unresolved: seq[VertexID] ## List of unresolved links const SubTreeSearchDepthMax = 64 @@ -80,11 +98,14 @@ template logTxt(info: static[string]): static[string] = "Hashify " & info -func getOrVoid(tab: BackVidTab; vid: VertexID): BackVidValRef = - tab.getOrDefault(vid, BackVidValRef(nil)) +func getOrVoid(tab: BackVidTab; vid: VertexID): FollowUpVid = + tab.getOrDefault(vid, FollowUpVid()) -func isValid(brv: BackVidValRef): bool = - brv != BackVidValRef(nil) +func isValid(w: FollowUpVid): bool = + w.toVid.isValid + +func contains(wff: WidthFirstForest; vid: VertexID): bool = + vid in wff.base or vid in wff.pool or vid in wff.root # ------------------------------------------------------------------------------ # Private functions @@ -119,8 +140,7 @@ proc updateHashKey( # Ok, vertex is on the backend. let rc = db.getKeyBE vid if rc.isOk: - let key = rc.value - if key == expected: + if rc.value == expected: return ok() # Changes on the upper layers overload the lower layers. Some hash keys @@ -142,209 +162,120 @@ proc updateHashKey( ok() -proc leafToRootHasher( +proc leafToRootCrawler( db: AristoDbRef; # Database, top layer hike: Hike; # Hike for labelling leaf..root - ): Result[int,(VertexID,AristoError)] = - ## Returns the index of the first node that could not be hashed + ): Result[DfReport,(VertexID,AristoError)] = + ## Returns the index of the first node that could not be hashed by + ## vertices all from the top layer cache. + ## for n in (hike.legs.len-1).countDown(0): let wp = hike.legs[n].wp bg = hike.legs[n].backend - rc = wp.vtx.toNode db - if rc.isErr: - return ok n + node = wp.vtx.toNode(db, stopEarly=false, beKeyOk=false).valueOr: + return ok DfReport(legInx: n, unresolved: error) # Vertices marked proof nodes need not be checked - if wp.vid in db.top.pPrf: - continue + if wp.vid notin db.top.pPrf: - # Check against existing key, or store new key - let key = rc.value.digestTo(HashKey) - db.updateHashKey(hike.root, wp.vid, key, bg).isOkOr: - return err((wp.vid,error)) + # Check against existing key, or store new key + let key = node.digestTo(HashKey) + db.updateHashKey(hike.root, wp.vid, key, bg).isOkOr: + return err((wp.vid,error)) - ok -1 # all could be hashed + ok DfReport(legInx: -1) # all could be hashed -# ------------------ -proc deletedLeafHasher( +proc cloudConnect( + cloud: HashSet[VertexID]; # Vertex IDs to start connecting from db: AristoDbRef; # Database, top layer - hike: Hike; # Hike for labelling leaf..root - ): Result[void,(VertexID,AristoError)] = - var - todo = hike.legs.reversed.mapIt(it.wp) - solved: HashSet[VertexID] - # Edge case for empty `hike` - if todo.len == 0: - let vtx = db.getVtx hike.root - if not vtx.isValid: - return err((hike.root,HashifyVtxMissing)) - todo = @[VidVtxPair(vid: hike.root, vtx: vtx)] - while 0 < todo.len: - var - delayed: seq[VidVtxPair] - didHere: HashSet[VertexID] # avoid duplicates - for wp in todo: - let rc = wp.vtx.toNode(db, stopEarly=false) - if rc.isOk: - let - expected = rc.value.digestTo(HashKey) - key = db.getKey wp.vid - if key.isValid: - if key != expected: - return err((wp.vid,HashifyExistingHashMismatch)) - else: - db.vidAttach(HashLabel(root: hike.root, key: expected), wp.vid) - solved.incl wp.vid - else: - # Resolve follow up vertices first - for vid in rc.error: - let vtx = db.getVtx vid - if not vtx.isValid: - return err((vid,HashifyVtxMissing)) - if vid in solved: - discard wp.vtx.toNode(db, stopEarly=false) - return err((vid,HashifyVidCircularDependence)) - if vid notin didHere: - didHere.incl vid - delayed.add VidVtxPair(vid: vid, vtx: vtx) - - # Followed by this vertex which relies on the ones registered above. - if wp.vid notin didHere: - didHere.incl wp.vid - delayed.add wp - - todo = delayed - - ok() - -# ------------------ - -proc resolveStateRoots( - db: AristoDbRef; # Database, top layer - uVids: BackVidTab; # Unresolved vertex IDs - ): Result[void,(VertexID,AristoError)] = - ## Resolve unresolved nodes. There might be a sub-tree on the backend which - ## blocks resolving the current structure. So search the `uVids` argument - ## list for missing vertices and resolve it. - # - # Update out-of-path hashes, i.e. fill gaps caused by branching out from - # `downMost` table vertices. - # - # Example - # :: - # $1 ^ - # \ | - # $7 -- $6 -- leaf $8 | on top layer, - # \ `--- leaf $9 | $5..$9 were inserted, - # $5 | $1 was redefined - # \ v - # \ - # \ ^ - # $4 -- leaf $2 | from - # `--- leaf $3 | backend (BE) - # v - # backLink[] = {$7} - # downMost[] = {$7} - # top.kMap[] = {£1, £6, £8, £9} - # BE.kMap[] = {£1, £2, £3, £4} - # - # So `$5` (needed for `$7`) cannot be resolved because it is neither on - # the path `($1..$8)`, nor is it on `($1..$9)`. - # - var follow: BackWVtxTab - - proc wVtxRef(db: AristoDbRef; root, vid, toVid: VertexID): BackWVtxRef = - let vtx = db.getVtx vid - if vtx.isValid: - return BackWVtxRef( - vtx: vtx, - w: BackVidValRef( - root: root, - onBe: not db.top.sTab.getOrVoid(vid).isValid, - toVid: toVid)) - - # Init `follow` table by unresolved `Branch` leafs from `vidTab` - for (uVid,uVal) in uVids.pairs: - let uVtx = db.getVtx uVid - if uVtx.isValid and uVtx.vType == Branch: - var didSomething = false - for vid in uVtx.bVid: - if vid.isValid and not db.getKey(vid).isValid: - let w = db.wVtxRef(root=uVal.root, vid=vid, toVid=uVid) - if not w.isNil: - follow[vid] = w - didSomething = true - # Add state root to be resolved, as well - if didSomething and not follow.hasKey uVal.root: - let w = db.wVtxRef(root=uVal.root, vid=uVal.root, toVid=uVal.root) - if not w.isNil: - follow[uVal.root] = w - - # Update and re-collect into `follow` table - var level = 0 - while 0 < follow.len: - var - changes = false - redo: BackWVtxTab - for (fVid,fVal) in follow.pairs: - # Resolve or keep for later - let rc = fVal.vtx.toNode db - if rc.isOk: - # Update Merkle hash - let key = rc.value.digestTo(HashKey) - db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe).isOkOr: - return err((fVid, error)) - changes = true - else: - # Cannot complete with this vertex, so dig deeper and do it later - redo[fVid] = fVal - - case fVal.vtx.vType: - of Branch: - for vid in fVal.vtx.bVid: - if vid.isValid and not db.getKey(vid).isValid: - let w = db.wVtxRef(root=fVal.w.root, vid=vid, toVid=fVid) - if not w.isNil: - changes = true - redo[vid] = w - of Extension: - let vid = fVal.vtx.eVid - if vid.isValid and not db.getKey(vid).isValid: - let w = db.wVtxRef(root=fVal.w.root,vid=vid, toVid=fVid) - if not w.isNil: - changes = true - redo[vid] = w - of Leaf: - # Should habe been hashed earlier - return err((fVid,HashifyDownVtxLeafUnexpected)) - - # Beware of loops - if not changes or SubTreeSearchDepthMax < level: - return err((VertexID(0),HashifyDownVtxlevelExceeded)) - - # Restart with a new instance of `follow` - redo.swap follow - level.inc - - ok() + target: BackVidTab; # Vertices to arrive to + ): tuple[paths: WidthFirstForest, unresolved: HashSet[VertexID]] = + ## For each vertex ID from argument `cloud` find a chain of `FollowUpVid` + ## type links reaching into argument `target`. The `paths` entry from the + ## `result` tuple contains the connections to the `target` argument and the + ## `unresolved` entries the IDs left over from `cloud`. + if 0 < cloud.len: + result.unresolved = cloud + var hold = target + while 0 < hold.len: + # Greedily trace back `bottomUp[]` entries for finding parents of + # unresolved vertices from `cloud` + var redo: BackVidTab + for (vid,val) in hold.pairs: + let vtx = db.getVtx vid + if vtx.isValid: + result.paths.pool[vid] = val + # Grab child links + for sub in vtx.subVids: + let w = FollowUpVid( + root: val.root, + toVid: vid) + if sub notin cloud: + redo[sub] = w + else: + result.paths.base[sub] = w # ok, use this + result.unresolved.excl sub + if result.unresolved.len == 0: + return + redo.swap hold + + +proc updateWFF( + wff: var WidthFirstForest; # Search tree to update + hike: Hike; # Chain of vertices + ltr: DfReport; # Index and extra vertex IDs for `hike` + ) = + ## Use vertices from the `hike` argument and link them leaf-to-root in a way + ## so so that they can be traversed later in a width-first search. + ## + ## The `ltr` argument augments the `hike` path in that it defines a set of + ## extra vertices where the width-first search is supposed to start. + ## + ## ..unresolved hash keys | all set here.. + ## | + ## hike.legs: (leg[0], leg[1], ..leg[legInx], ..) + ## | | | + ## | <---- | <----- | + ## | | + ## | wff.pool[] | + ## + ## and the set `unresolved{} × leg[legInx]` will be registered in `base[]`. + ## + # Root target to reach via width-first search + wff.root.incl hike.root + + # Add unresolved nodes for top level links + for u in 1 .. ltr.legInx: + let vid = hike.legs[u].wp.vid + # Make sure that `base[]` and `pool[]` are disjunkt, possibly moving + # `base[]` entries to the `pool[]`. + wff.base.del vid + wff.pool[vid] = FollowUpVid( + root: hike.root, + toVid: hike.legs[u-1].wp.vid) + + # These ones have been resolved, already + for u in ltr.legInx+1 ..< hike.legs.len: + let vid = hike.legs[u].wp.vid + wff.pool.del vid + wff.base.del vid + + assert 0 < ltr.unresolved.len # debugging, only + let vid = hike.legs[ltr.legInx].wp.vid + for sub in ltr.unresolved: + # Update request for unresolved sub-links by adding a new tail + # entry (unless registered, already.) + if sub notin wff: + wff.base[sub] = FollowUpVid( + root: hike.root, + toVid: vid) # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ -proc hashifyClear*( - db: AristoDbRef; # Database, top layer - locksOnly = false; # If `true`, then clear only proof locks - ) = - ## Clear all `Merkle` hashes from the `db` argument database top layer. - if not locksOnly: - db.top.pAmk.clear - db.top.kMap.clear - db.top.pPrf.clear - - proc hashify*( db: AristoDbRef; # Database, top layer ): Result[HashSet[VertexID],(VertexID,AristoError)] = @@ -352,104 +283,129 @@ proc hashify*( ## Tree`. If successful, the function returns the keys (aka Merkle hash) of ## the root vertices. var - roots: HashSet[VertexID] - completed: HashSet[VertexID] - - # Width-first leaf-to-root traversal structure - backLink: BackVidTab - downMost: BackVidTab - - # Unconditionally mark the top layer - db.top.dirty = true + deleted = false # Need extra check for orphaned vertices + completed: HashSet[VertexID] # Root targets reached, already + wff: WidthFirstForest # Leaf-to-root traversal structure - for (lky,vid) in db.top.lTab.pairs: - let rc = lky.hikeUp db + if not db.top.dirty: + return ok completed - # There might be deleted entries on the leaf table. If this is the case, - # the Merkle hashes for the vertices in the `hike` can all be compiled. - if not vid.isValid: - ? db.deletedLeafHasher rc.to(Hike) + for (lky,lfVid) in db.top.lTab.pairs: + let + rc = lky.hikeUp db + hike = rc.to(Hike) + + if not lfVid.isValid: + # Remember that there are left overs from a delete proedure which have + # to be eventually found before starting width-first processing. + deleted = true + + if hike.legs.len == 0: + # Ignore left over path from deleted entry. + if not lfVid.isValid: + # FIXME: Is there a case for adding child-to-root links to the `wff` + # schedule? + continue + if rc.isErr: + return err((lfVid,rc.error[1])) + return err((hike.root,HashifyEmptyHike)) - elif rc.isErr: - return err((vid,rc.error[1])) + # Hash as much of as possible from `hike` starting at the downmost `leg` + let ltr = ? db.leafToRootCrawler hike + if ltr.legInx < 0: + completed.incl hike.root else: - # Hash as much of the `hike` as possible - let - hike = rc.value - n = ? db.leafToRootHasher hike - roots.incl hike.root - - if 0 < n: - # Backtrack and register remaining nodes. Note that in case *n == 0*, - # the root vertex has not been fully resolved yet. - # - # .. unresolved hash keys | all set here .. - # | - # | - # hike.legs: (leg[0], leg[1], ..leg[n-1], leg[n], ..) - # | | | | - # | <---- | <---- | <-------- | - # | | | - # | backLink[] | downMost[] | - # - if n+1 < hike.legs.len: - downMost.del hike.legs[n+1].wp.vid - downMost[hike.legs[n].wp.vid] = BackVidValRef( - root: hike.root, - onBe: hike.legs[n].backend, - toVid: hike.legs[n-1].wp.vid) - for u in (n-1).countDown(1): - backLink[hike.legs[u].wp.vid] = BackVidValRef( - root: hike.root, - onBe: hike.legs[u].backend, - toVid: hike.legs[u-1].wp.vid) - elif n < 0: - completed.incl hike.root - - # At least one full path leaf..root should have succeeded with labelling - # for each root. - if completed.len < roots.len: - ? db.resolveStateRoots backLink - - # Update remaining hashes - while 0 < downMost.len: - var - redo: BackVidTab - done: HashSet[VertexID] - - for (vid,val) in downMost.pairs: - # Try to convert vertex to a node. This is possible only if all link - # references have Merkle hashes. - # - # Also `db.getVtx(vid)` => not nil as it was fetched earlier, already - let rc = db.getVtx(vid).toNode db - if rc.isErr: - # Cannot complete with this vertex, so do it later - redo[vid] = val - - else: - # Update Merkle hash - let key = rc.value.digestTo(HashKey) - db.updateHashKey(val.root, vid, key, val.onBe).isOkOr: - return err((vid,error)) - - done.incl vid - - # Proceed with back link - let nextItem = backLink.getOrVoid val.toVid - if nextItem.isValid: - redo[val.toVid] = nextItem - - # Make sure that the algorithm proceeds - if done.len == 0: - let error = HashifyCannotComplete - return err((VertexID(0),error)) - - # Clean up dups from `backLink` and restart `downMost` - for vid in done.items: - backLink.del vid - downMost = redo + # Not all could be hashed, merge the rest into `wff` width-first schedule + wff.updateWFF(hike, ltr) + + # Update unresolved keys left over after delete operations when overlay + # vertices have been added and there was no `hike` path to capture them. + # + # Considering a list of updated paths to these vertices after deleting a + # `Leaf` vertex is deemed too expensive and more error prone. So it is + # the task to search for unresolved node keys and add glue paths them to + # the depth-first schedule. + if deleted: + var unresolved: HashSet[VertexID] + for (vid,lbl) in db.top.kMap.pairs: + if not lbl.isValid and + vid notin wff and + (vid notin db.top.sTab or db.top.sTab.getOrVoid(vid).isValid): + unresolved.incl vid + + let glue = unresolved.cloudConnect(db, wff.base) + if 0 < glue.unresolved.len: + return err((glue.unresolved.toSeq[0],HashifyNodeUnresolved)) + + # Add glue items to `wff.base[]` and `wff.pool[]` tables + for (vid,val) in glue.paths.base.pairs: + # Add vid to `wff.base[]` list + wff.base[vid] = val + # Move tail of VertexID chain to `wff.pool[]` + var toVid = val.toVid + while true: + let w = glue.paths.pool.getOrVoid toVid + if not w.isValid: + break + wff.base.del toVid + wff.pool[toVid] = w + toVid = w.toVid + + # Traverse width-first schedule and update remaining hashes. + while 0 < wff.base.len: + var redo: BackVidTab + for (vid,val) in wff.base.pairs: + block thisVtx: + let vtx = db.getVtx vid + # Try to convert the vertex to a node. This is possible only if all + # link references have Merkle hash keys, already. + if not vtx.isValid: + # This might happen when proof nodes (see `snap` protocol) are on + # an incomplete trie where this `vid` has a key but no vertex yet. + # Also, the key (as part of the proof data) must be on the backend + # by the way `leafToRootCrawler()` works. So it is enough to verify + # the key there. + discard db.getKeyBE(vid).valueOr: + return err((vid,HashifyNodeUnresolved)) + break thisVtx + + # Try to resolve the current vertex as node + let node = vtx.toNode(db).valueOr: + # Cannot complete with this vertex unless updated, so do it later. + redo[vid] = val + break thisVtx + # End block `thisVtx` + + # Could resolve => update Merkle hash + let key = node.digestTo(HashKey) + db.vidAttach(HashLabel(root: val.root, key: key), vid) + + # Proceed with back link + let nextVal = wff.pool.getOrVoid val.toVid + if nextVal.isValid: + # Make sure that we we keep strict hierachial order + if nextVal.toVid in redo: + # Push back from `redo[]` to be considered later + wff.pool[nextVal.toVid] = redo.getOrVoid nextVal.toVid + redo.del nextVal.toVid + # And move the next one to `redo[]` + wff.pool.del val.toVid + redo[val.toVid] = nextVal + elif val.toVid notin redo.values.toSeq.mapIt(it.toVid): + wff.pool.del val.toVid + redo[val.toVid] = nextVal + + # Restart `wff.base[]` + wff.base.swap redo + + # Update root nodes + for vid in wff.root - db.top.pPrf: + # Convert root vertex to a node. + let node = db.getVtx(vid).toNode(db,stopEarly=false).valueOr: + return err((vid,HashifyRootNodeUnresolved)) + db.vidAttach(HashLabel(root: vid, key: node.digestTo(HashKey)), vid) + completed.incl vid db.top.dirty = false ok completed diff --git a/nimbus/db/aristo/aristo_hike.nim b/nimbus/db/aristo/aristo_hike.nim index c6627137fd..0064139f41 100644 --- a/nimbus/db/aristo/aristo_hike.nim +++ b/nimbus/db/aristo/aristo_hike.nim @@ -60,6 +60,10 @@ func legsTo*(hike: Hike; T: type NibblesSeq): T = ## Convert back hike.getNibblesImpl() +func legsTo*(hike: Hike; numLegs: int; T: type NibblesSeq): T = + ## variant of `legsTo()` + hike.getNibblesImpl(0, numLegs) + # -------- proc hikeUp*( diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index 0e7bb082a4..049f0adc7a 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -68,7 +68,8 @@ proc to( ## Return code converter if rc.isOk: ok true - elif rc.error == MergeLeafPathCachedAlready: + elif rc.error in {MergeLeafPathCachedAlready, + MergeLeafPathOnBackendAlready}: ok false else: err(rc.error) @@ -130,7 +131,7 @@ proc insertBranch( # Should have been tackeld by `hikeUp()`, already return err(MergeLeafGarbledHike) if linkVtx.xPfx.len == n: - return err(MergeBrLinkVtxPfxTooShort) + return err(MergeBranchLinkVtxPfxTooShort) # Provide and install `forkVtx` let @@ -158,7 +159,7 @@ proc insertBranch( rc = path.pathToTag() if rc.isErr: debug "Branch link leaf path garbled", linkID, path - return err(MergeBrLinkLeafGarbled) + return err(MergeBranchLinkLeafGarbled) let local = db.vidFetch(pristine = true) @@ -453,22 +454,35 @@ proc updatePayload( payload: PayloadRef; # Payload value ): Result[Hike,AristoError] = ## Update leaf vertex if payloads differ - let vtx = hike.legs[^1].wp.vtx + let leafLeg = hike.legs[^1] # Update payloads if they differ - if vtx.lData != payload: - let vid = hike.legs[^1].wp.vid + if leafLeg.wp.vtx.lData != payload: - # Will modify top level cache - db.top.dirty = true + # Update vertex and hike + let + vid = leafLeg.wp.vid + vtx = VertexRef( + vType: Leaf, + lPfx: leafLeg.wp.vtx.lPfx, + lData: payload) + var hike = hike + hike.legs[^1].backend = false + hike.legs[^1].wp.vtx = vtx - vtx.lData = payload + # Modify top level cache + db.top.dirty = true db.top.sTab[vid] = vtx db.top.dirty = true # Modified top level cache db.top.lTab[leafTie] = vid db.clearMerkleKeys(hike, vid) + ok hike - ok hike + elif leafLeg.backend: + err(MergeLeafPathOnBackendAlready) + + else: + err(MergeLeafPathCachedAlready) # ------------------------------------------------------------------------------ # Private functions: add Merkle proof node @@ -501,6 +515,7 @@ proc mergeNodeImpl( ## ## has no result for all images of the argument `node` under `pAmk`: ## + # Check for error after RLP decoding doAssert node.error == AristoError(0) if not rootVid.isValid: return err(MergeRootKeyInvalid) @@ -694,7 +709,8 @@ proc merge*( let rc = db.merge(w.leafTie, w.payload) if rc.isOk: merged.inc - elif rc.error == MergeLeafPathCachedAlready: + elif rc.error in {MergeLeafPathCachedAlready, + MergeLeafPathOnBackendAlready}: dups.inc else: return (n,dups,rc.error) diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 840a816e41..c4e9c665ad 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -92,19 +92,30 @@ proc toNode*( vtx: VertexRef; # Vertex to convert db: AristoDbRef; # Database, top layer stopEarly = true; # Full list of missing links if `false` - beKeyOk = false; # Allow fetching DB backend keys + beKeyOk = true; # Allow fetching DB backend keys ): Result[NodeRef,seq[VertexID]] = ## Convert argument the vertex `vtx` to a node type. Missing Merkle hash ## keys are searched for on the argument database `db`. ## - ## If backend keys are allowed by passing `beKeyOk` as `true`, there is no - ## compact embedding of a small node into another rather than its hash - ## reference. In that case, the hash reference will always be used. - ## ## On error, at least the vertex ID of the first missing Merkle hash key is ## returned. If the argument `stopEarly` is set `false`, all missing Merkle ## hash keys are returned. ## + ## In the argument `beKeyOk` is set `false`, keys for node links are accepted + ## only from the cache layer. This does not affect a link key for a payload + ## storage root. + ## + proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey = + block: + let lbl = db.top.kMap.getOrVoid vid + if lbl.isValid: + return lbl.key + if beOk: + let rc = db.getKeyBE vid + if rc.isOk: + return rc.value + VOID_HASH_KEY + case vtx.vType: of Leaf: let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData) @@ -126,7 +137,7 @@ proc toNode*( for n in 0 .. 15: let vid = vtx.bVid[n] if vid.isValid: - let key = db.getKey vid + let key = db.getKey(vid, beKeyOk) if key.isValid: node.key[n] = key elif stopEarly: @@ -140,13 +151,26 @@ proc toNode*( of Extension: let vid = vtx.eVid - key = db.getKey vid + key = db.getKey(vid, beKeyOk) if not key.isValid: return err(@[vid]) let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid) node.key[0] = key return ok node + +proc subVids*(vtx: VertexRef): seq[VertexID] = + ## Returns the list of all sub-vertex IDs for the argument `vtx` + case vtx.vType: + of Leaf: + discard + of Branch: + for vid in vtx.bVid: + if vid.isValid: + result.add vid + of Extension: + result.add vtx.eVid + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/tests/test_aristo/test_filter.nim b/tests/test_aristo/test_filter.nim index 86d81edf28..df151857ef 100644 --- a/tests/test_aristo/test_filter.nim +++ b/tests/test_aristo/test_filter.nim @@ -341,7 +341,9 @@ proc checkBeOk( cache = if forceCache: true else: not dx[n].top.dirty rc = dx[n].checkBE(relax=relax, cache=cache) xCheckRc rc.error == (0,0): - noisy.say "***", "db check failed", " n=", n, " cache=", cache + noisy.say "***", "db check failed", + " n=", n, "/", dx.len-1, + " cache=", cache true @@ -356,19 +358,22 @@ proc checkFilterTrancoderOk( let rc = dx[n].roFilter.blobify() xCheckRc rc.error == 0: noisy.say "***", "db serialisation failed", - " n=", n, " error=", rc.error + " n=", n, "/", dx.len-1, + " error=", rc.error rc.value let dcdRoundTrip = block: let rc = data.deblobify FilterRef xCheckRc rc.error == 0: noisy.say "***", "db de-serialisation failed", - " n=", n, " error=", rc.error + " n=", n, "/", dx.len-1, + " error=", rc.error rc.value let roFilterExRoundTrip = dx[n].roFilter.isEq(dcdRoundTrip, dx[n], noisy) xCheck roFilterExRoundTrip: noisy.say "***", "checkFilterTrancoderOk failed", + " n=", n, "/", dx.len-1, "\n roFilter=", dx[n].roFilter.pp(dx[n]), "\n dcdRoundTrip=", dcdRoundTrip.pp(dx[n]) diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index da28b0c1dc..af18f2a017 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -17,7 +17,8 @@ import unittest2, stew/endians2, ../../nimbus/db/aristo/[ - aristo_check, aristo_delete, aristo_desc, aristo_get, aristo_merge], + aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get, + aristo_merge], ../../nimbus/db/[aristo, aristo/aristo_init/persistent], ../replay/xcheck, ./test_helpers @@ -156,7 +157,8 @@ proc saveToBackend( block: let rc = db.checkBE(relax=relax) - xCheckRc rc.error == (0,0) + xCheckRc rc.error == (0,0): + noisy.say "***", "saveToBackend (8)", " debugID=", debugID # Update layers to original level tx = db.txBegin().value.to(AristoDbRef).txBegin().value @@ -331,8 +333,12 @@ proc testTxMergeAndDelete*( defer: db.innerCleanUp # Merge leaf data into main trie (w/vertex ID 1) - let kvpLeafs = w.kvpLst.mapRootVid VertexID(1) - for leaf in kvpLeafs: + let kvpLeafs = block: + var lst = w.kvpLst.mapRootVid VertexID(1) + # The list might be reduced for isolation of particular properties, + # e.g. lst.setLen(min(5,lst.len)) + lst + for i,leaf in kvpLeafs: let rc = db.merge leaf xCheckRc rc.error == 0 @@ -358,13 +364,20 @@ proc testTxMergeAndDelete*( (leaf, lid) = lvp if doSaveBeOk: - if not tx.saveToBackend( - chunkedMpt=false, relax=relax, noisy=noisy, runID): - return + let saveBeOk = tx.saveToBackend( + chunkedMpt=false, relax=relax, noisy=noisy, runID) + xCheck saveBeOk: + noisy.say "***", "del(2)", + " u=", u, + " n=", n, "/", list.len, + "\n leaf=", leaf.pp(db), + "\n db\n ", db.pp(backendOk=true), + "\n" # Delete leaf - let rc = db.delete leaf - xCheckRc rc.error == (0,0) + block: + let rc = db.delete leaf + xCheckRc rc.error == (0,0) # Update list of remaininf leafs leafsLeft.excl leaf @@ -446,7 +459,7 @@ proc testTxMergeProofAndKvpList*( let rc = db.merge(rootKey, VertexID(1)) xCheckRc rc.error == 0 - proved = db.merge(w.proof, rc.value) # , noisy) + proved = db.merge(w.proof, rc.value) xCheck proved.error in {AristoError(0),MergeHashKeyCachedAlready} xCheck w.proof.len == proved.merged + proved.dups @@ -468,8 +481,8 @@ proc testTxMergeProofAndKvpList*( return when true and false: - noisy.say "***", "proofs(6) <", n, "/", lstLen-1, ">", - " groups=", count, " proved=", proved.pp, " merged=", merged.pp + noisy.say "***", "proofs(9) <", n, "/", list.len-1, ">", + " groups=", count, " proved=", proved, " merged=", merged true # ------------------------------------------------------------------------------