diff --git a/src/BeeNet.Client/BeeClient.cs b/src/BeeNet.Client/BeeClient.cs
index cf6c2a3..0cb7b55 100644
--- a/src/BeeNet.Client/BeeClient.cs
+++ b/src/BeeNet.Client/BeeClient.cs
@@ -879,7 +879,7 @@ public async Task<string> RefreshAuthAsync(
                 },
                 cancellationToken).ConfigureAwait(false)).Key;
 
-        public async Task<SwarmHash> ResolveSwarmAddressToHashAsync(SwarmAddress address)
+        public async Task<SwarmChunkReference> ResolveAddressToChunkReferenceAsync(SwarmAddress address)
         {
             var chunkStore = new BeeClientChunkStore(this);
             
@@ -887,7 +887,7 @@ public async Task<SwarmHash> ResolveSwarmAddressToHashAsync(SwarmAddress address
                 chunkStore,
                 address.Hash);
             
-            return await rootManifest.ResolveResourceHashAsync(address).ConfigureAwait(false);
+            return await rootManifest.ResolveAddressToChunkReferenceAsync(address).ConfigureAwait(false);
         }
 
         public Task ReuploadContentAsync(
diff --git a/src/BeeNet.Core/Models/IReadOnlyPostageBuckets.cs b/src/BeeNet.Core/Models/IReadOnlyPostageBuckets.cs
new file mode 100644
index 0000000..054f667
--- /dev/null
+++ b/src/BeeNet.Core/Models/IReadOnlyPostageBuckets.cs
@@ -0,0 +1,59 @@
+// Copyright 2021-present Etherna SA
+// This file is part of Bee.Net.
+// 
+// Bee.Net is free software: you can redistribute it and/or modify it under the terms of the
+// GNU Lesser General Public License as published by the Free Software Foundation,
+// either version 3 of the License, or (at your option) any later version.
+// 
+// Bee.Net is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+// See the GNU Lesser General Public License for more details.
+// 
+// You should have received a copy of the GNU Lesser General Public License along with Bee.Net.
+// If not, see <https://www.gnu.org/licenses/>.
+
+using System.Collections.Generic;
+
+namespace Etherna.BeeNet.Models
+{
+    public interface IReadOnlyPostageBuckets
+    {
+        // Properties.
+        /// <summary>
+        /// The higher level of collisions for a bucket
+        /// </summary>
+        uint MaxBucketCollisions { get; }
+        
+        /// <summary>
+        /// The lower level of collisions for a bucket
+        /// </summary>
+        uint MinBucketCollisions { get; }
+        
+        /// <summary>
+        /// Minimum required postage batch depth
+        /// </summary>
+        int RequiredPostageBatchDepth { get; }
+        
+        /// <summary>
+        /// Total added chunks in buckets
+        /// </summary>
+        long TotalChunks { get; }
+        
+        // Methods.
+        /// <summary>
+        /// Amount of buckets, grouped by collisions
+        /// </summary>
+        /// <returns>Array with collisions as index, and buckets amount as values</returns>
+        int[] CountBucketsByCollisions();
+        
+        /// <summary>
+        /// Get a copy of all buckets
+        /// </summary>
+        /// <returns>All the buckets</returns>
+        uint[] GetBuckets();
+
+        IEnumerable<uint> GetBucketsByCollisions(uint collisions);
+
+        uint GetCollisions(uint bucketId);
+    }
+}
\ No newline at end of file
diff --git a/src/BeeNet.Core/Models/PostageBuckets.cs b/src/BeeNet.Core/Models/PostageBuckets.cs
index a3188cf..19b29da 100644
--- a/src/BeeNet.Core/Models/PostageBuckets.cs
+++ b/src/BeeNet.Core/Models/PostageBuckets.cs
@@ -13,21 +13,27 @@
 // If not, see <https://www.gnu.org/licenses/>.
 
 using System;
-using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Diagnostics.CodeAnalysis;
 using System.Linq;
+using System.Threading;
 
 namespace Etherna.BeeNet.Models
 {
     /// <summary>
     /// A thread safe implementation of postage buckets array
     /// </summary>
-    public class PostageBuckets
+    [SuppressMessage("Reliability", "CA2002:Do not lock on objects with weak identity")]
+    public class PostageBuckets : IReadOnlyPostageBuckets, IDisposable
     {
         // Consts.
         public const int BucketsSize = 1 << PostageBatch.BucketDepth;
         
         // Fields.
-        private readonly ConcurrentDictionary<uint, uint> _buckets; //<index, collisions>
+        private readonly uint[] _buckets;
+        private readonly Dictionary<uint, HashSet<uint>> bucketsByCollisions; //<collisions, bucketId[]>
+        private readonly ReaderWriterLockSlim bucketsLock = new(LockRecursionPolicy.NoRecursion);
+        private bool disposed;
         
         // Constructor.
         public PostageBuckets(
@@ -38,56 +44,173 @@ public PostageBuckets(
                 throw new ArgumentOutOfRangeException(nameof(initialBuckets),
                     $"Initial buckets must have length {BucketsSize}, or be null");
             
-            _buckets = ArrayToDictionary(initialBuckets ?? []);
+            //init "buckets" and reverse index "bucketsByCollisions"
+            _buckets = initialBuckets ?? new uint[BucketsSize];
+            bucketsByCollisions = new Dictionary<uint, HashSet<uint>> { [0] = [] };
+            for (uint i = 0; i < BucketsSize; i++)
+                bucketsByCollisions[0].Add(i);
+
+            //init counters
+            MaxBucketCollisions = 0;
+            MinBucketCollisions = 0;
+            TotalChunks = 0;
+        }
+
+        // Dispose.
+        public void Dispose()
+        {
+            Dispose(true);
+            GC.SuppressFinalize(this);
+        }
+
+        protected virtual void Dispose(bool disposing)
+        {
+            if (disposed) return;
+
+            // Dispose managed resources.
+            if (disposing)
+                bucketsLock.Dispose();
+
+            disposed = true;
         }
 
         // Properties.
-        public ReadOnlySpan<uint> Buckets => DictionaryToArray(_buckets);
-        public uint MaxBucketCount { get; private set; }
+        public uint MaxBucketCollisions { get; private set; }
+        public uint MinBucketCollisions { get; private set; }
+        public int RequiredPostageBatchDepth => CollisionsToRequiredPostageBatchDepth(MaxBucketCollisions);
         public long TotalChunks { get; private set; }
         
         // Methods.
+        public int[] CountBucketsByCollisions()
+        {
+            bucketsLock.EnterReadLock();
+            try
+            {
+                return bucketsByCollisions.Select(pair => pair.Value.Count).ToArray();
+            }
+            finally
+            {
+                bucketsLock.ExitReadLock();
+            }
+        }
+        
+        public uint[] GetBuckets()
+        {
+            bucketsLock.EnterReadLock();
+            try
+            {
+                return _buckets.ToArray();
+            }
+            finally
+            {
+                bucketsLock.ExitReadLock();
+            }
+        }
+
+        public IEnumerable<uint> GetBucketsByCollisions(uint collisions)
+        {
+            bucketsLock.EnterReadLock();
+            try
+            {
+                return bucketsByCollisions.TryGetValue(collisions, out var bucketsSet)
+                    ? bucketsSet
+                    : Array.Empty<uint>();
+            }
+            finally
+            {
+                bucketsLock.ExitReadLock();
+            }
+        }
+        
         public uint GetCollisions(uint bucketId)
         {
-            _buckets.TryGetValue(bucketId, out var collisions);
-            return collisions;
+            bucketsLock.EnterReadLock();
+            try
+            {
+                return _buckets[bucketId];
+            }
+            finally
+            {
+                bucketsLock.ExitReadLock();
+            }
         }
 
         public void IncrementCollisions(uint bucketId)
         {
-            _buckets.AddOrUpdate(
-                bucketId,
-                _ =>
-                {
-                    TotalChunks++;
-                    if (1 > MaxBucketCount)
-                        MaxBucketCount = 1;
-                    return 1;
-                },
-                (_, c) =>
-                {
-                    TotalChunks++;
-                    if (c + 1 > MaxBucketCount)
-                        MaxBucketCount = c + 1;
-                    return c + 1;
-                });
+            /*
+             * We have to lock on _buckets because we need atomic operations also with bucketsByCollisions
+             * and counters. ConcurrentDictionary would have better locking on single values, but doesn't
+             * support atomic operations involving third objects, like counters and "bucketsByCollisions".
+             */
+            bucketsLock.EnterWriteLock();
+            try
+            {
+                // Update collections.
+                _buckets[bucketId]++;
+                
+                bucketsByCollisions.TryAdd(_buckets[bucketId], []);
+                bucketsByCollisions[_buckets[bucketId] - 1].Remove(bucketId);
+                bucketsByCollisions[_buckets[bucketId]].Add(bucketId);
+                
+                // Update counters.
+                if (_buckets[bucketId] > MaxBucketCollisions)
+                    MaxBucketCollisions = _buckets[bucketId];
+                    
+                MinBucketCollisions = bucketsByCollisions.OrderBy(p => p.Key)
+                    .First(p => p.Value.Count > 0)
+                    .Key;
+                    
+                TotalChunks++;
+            }
+            finally
+            {
+                bucketsLock.ExitWriteLock();
+            }
         }
 
-        public void ResetBucketCollisions(uint bucketId) =>
-            _buckets.AddOrUpdate(bucketId, _ => 0, (_, _) => 0);
+        public void ResetBucketCollisions(uint bucketId)
+        {
+            bucketsLock.EnterWriteLock();
+            try
+            {
+                // Update collections.
+                var oldCollisions = _buckets[bucketId];
+                _buckets[bucketId] = 0;
+                
+                bucketsByCollisions[oldCollisions].Remove(bucketId);
+                bucketsByCollisions[0].Add(bucketId);
+            
+                // Update counters.
+                MaxBucketCollisions = bucketsByCollisions.OrderByDescending(p => p.Key)
+                    .First(p => p.Value.Count > 0)
+                    .Key;
+                
+                MinBucketCollisions = 0;
+            }
+            finally
+            {
+                bucketsLock.ExitWriteLock();
+            }
+        }
         
-        // Helpers.
-        private static ConcurrentDictionary<uint, uint> ArrayToDictionary(uint[] buckets) =>
-            new(buckets.Select((c, i) => (c, (uint)i))
-                .ToDictionary<(uint value, uint index), uint, uint>(pair => pair.index, pair => pair.value));
-
-        private static uint[] DictionaryToArray(ConcurrentDictionary<uint, uint> dictionary)
+        // Static methods.
+        public static int CollisionsToRequiredPostageBatchDepth(uint collisions)
         {
-            var outArray = new uint[BucketsSize];
-            for (uint i = 0; i < BucketsSize; i++)
-                if (dictionary.TryGetValue(i, out var value))
-                    outArray[i] = value;
-            return outArray;
+            if (collisions == 0)
+                return PostageBatch.MinDepth;
+            return Math.Max(
+                (int)Math.Ceiling(Math.Log2(collisions)) + PostageBatch.BucketDepth,
+                PostageBatch.MinDepth);
+        }
+        
+        public static uint PostageBatchDepthToMaxCollisions(int postageBatchDepth)
+        {
+#pragma warning disable CA1512 //only supported from .net 8
+            if (postageBatchDepth < PostageBatch.MinDepth)
+                throw new ArgumentOutOfRangeException(nameof(postageBatchDepth));
+#pragma warning restore CA1512
+
+            return (uint)Math.Pow(2, postageBatchDepth - PostageBatch.BucketDepth);
         }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Core/Models/SwarmChunkReference.cs b/src/BeeNet.Core/Models/SwarmChunkReference.cs
new file mode 100644
index 0000000..e746b17
--- /dev/null
+++ b/src/BeeNet.Core/Models/SwarmChunkReference.cs
@@ -0,0 +1,9 @@
+namespace Etherna.BeeNet.Models
+{
+    public class SwarmChunkReference(SwarmHash hash, XorEncryptKey? encryptionKey, bool useRecursiveEncryption)
+    {
+        public XorEncryptKey? EncryptionKey { get; } = encryptionKey;
+        public SwarmHash Hash { get; } = hash;
+        public bool UseRecursiveEncryption { get; } = useRecursiveEncryption;
+    }
+}
\ No newline at end of file
diff --git a/src/BeeNet.Util/Manifest/XorEncryptKey.cs b/src/BeeNet.Core/Models/XorEncryptKey.cs
similarity index 72%
rename from src/BeeNet.Util/Manifest/XorEncryptKey.cs
rename to src/BeeNet.Core/Models/XorEncryptKey.cs
index b1c88f0..5ef2099 100644
--- a/src/BeeNet.Util/Manifest/XorEncryptKey.cs
+++ b/src/BeeNet.Core/Models/XorEncryptKey.cs
@@ -12,10 +12,11 @@
 // You should have received a copy of the GNU Lesser General Public License along with Bee.Net.
 // If not, see <https://www.gnu.org/licenses/>.
 
+using Nethereum.Hex.HexConvertors.Extensions;
 using System;
 using System.Security.Cryptography;
 
-namespace Etherna.BeeNet.Manifest
+namespace Etherna.BeeNet.Models
 {
     public class XorEncryptKey
     {
@@ -34,6 +35,23 @@ public XorEncryptKey(byte[] bytes)
             
             this.bytes = bytes;
         }
+        
+        public XorEncryptKey(string key)
+        {
+            ArgumentNullException.ThrowIfNull(key, nameof(key));
+            
+            try
+            {
+                bytes = key.HexToByteArray();
+            }
+            catch (FormatException)
+            {
+                throw new ArgumentException("Invalid hash", nameof(key));
+            }
+            
+            if (!IsValidKey(bytes))
+                throw new ArgumentOutOfRangeException(nameof(key));
+        }
 
         // Builders.
         public static XorEncryptKey BuildNewRandom()
@@ -61,5 +79,14 @@ public void EncryptDecrypt(Span<byte> data)
             for (var i = 0; i < data.Length; i++)
                 data[i] = (byte)(data[i] ^ bytes[i % bytes.Length]);
         }
+        
+        public override string ToString() => bytes.ToHex();
+        
+        // Static methods.
+        public static bool IsValidKey(byte[] value)
+        {
+            ArgumentNullException.ThrowIfNull(value, nameof(value));
+            return value.Length == KeySize;
+        }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Bmt/SwarmChunkBmtHasher.cs b/src/BeeNet.Util/Hashing/Bmt/SwarmChunkBmtHasher.cs
index 2e472a9..cec1ece 100644
--- a/src/BeeNet.Util/Hashing/Bmt/SwarmChunkBmtHasher.cs
+++ b/src/BeeNet.Util/Hashing/Bmt/SwarmChunkBmtHasher.cs
@@ -22,7 +22,7 @@ namespace Etherna.BeeNet.Hashing.Bmt
     internal static class SwarmChunkBmtHasher
     {
         // Static methods.
-        public static SwarmHash Hash(byte[] span, byte[] data)
+        public static SwarmHash Hash(byte[] span, byte[] data, IHasher? hasher = null)
         {
             ArgumentNullException.ThrowIfNull(span, nameof(span));
             ArgumentNullException.ThrowIfNull(data, nameof(data));
@@ -39,7 +39,7 @@ public static SwarmHash Hash(byte[] span, byte[] data)
             }
             
             // Build the merkle tree.
-            var hasher = new Hasher();
+            hasher ??= new Hasher();
             var bmt = new SwarmChunkBmt(hasher);
             bmt.BuildTree(segments);
             var result = bmt.Root.Hash;
diff --git a/src/BeeNet.Util/Hashing/Pipeline/ChunkAggregatorPipelineStage.cs b/src/BeeNet.Util/Hashing/Pipeline/ChunkAggregatorPipelineStage.cs
index 4d66004..9f73724 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/ChunkAggregatorPipelineStage.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/ChunkAggregatorPipelineStage.cs
@@ -22,13 +22,16 @@
 
 namespace Etherna.BeeNet.Hashing.Pipeline
 {
-    internal delegate Task<SwarmHash> HashChunkDelegateAsync(byte[] span, byte[] data);
-    
     internal sealed class ChunkAggregatorPipelineStage : IHasherPipelineStage
     {
         // Private classes.
-        private sealed class ChunkHeader(SwarmHash hash, ReadOnlyMemory<byte> span, bool isParityChunk)
+        private sealed class ChunkHeader(
+            SwarmHash hash,
+            ReadOnlyMemory<byte> span,
+            XorEncryptKey? chunkKey,
+            bool isParityChunk)
         {
+            public XorEncryptKey? ChunkKey { get; } = chunkKey;
             public SwarmHash Hash { get; } = hash;
             public ReadOnlyMemory<byte> Span { get; } = span;
             public bool IsParityChunk { get; } = isParityChunk;
@@ -38,18 +41,23 @@ private sealed class ChunkHeader(SwarmHash hash, ReadOnlyMemory<byte> span, bool
         private readonly SemaphoreSlim feedChunkMutex = new(1, 1);
         private readonly Dictionary<long, HasherPipelineFeedArgs> feedingBuffer = new();
         private readonly List<List<ChunkHeader>> chunkLevels; //[level][chunk]
-        private readonly HashChunkDelegateAsync hashChunkDelegate;
         private readonly byte maxChildrenChunks;
+        private readonly ChunkBmtPipelineStage shortBmtPipelineStage;
+        private readonly bool useRecursiveEncryption;
         
         private long feededChunkNumberId;
 
         // Constructor.
         public ChunkAggregatorPipelineStage(
-            HashChunkDelegateAsync hashChunkDelegate)
+            ChunkBmtPipelineStage shortBmtPipelineStage,
+            bool useRecursiveEncryption)
         {
             chunkLevels = [];
-            this.hashChunkDelegate = hashChunkDelegate;
-            maxChildrenChunks = SwarmChunkBmt.SegmentsCount;
+            maxChildrenChunks = (byte)(useRecursiveEncryption
+                ? SwarmChunkBmt.SegmentsCount / 2 //write chunk key after chunk hash
+                : SwarmChunkBmt.SegmentsCount);
+            this.shortBmtPipelineStage = shortBmtPipelineStage;
+            this.useRecursiveEncryption = useRecursiveEncryption;
         }
         
         // Dispose.
@@ -58,6 +66,9 @@ public void Dispose()
             feedChunkMutex.Dispose();
         }
         
+        // Properties.
+        public long MissedOptimisticHashing => shortBmtPipelineStage.MissedOptimisticHashing;
+        
         // Methods.
         public async Task FeedAsync(HasherPipelineFeedArgs args)
         {
@@ -83,6 +94,7 @@ await AddChunkToLevelAsync(
                         new ChunkHeader(
                             processingChunk.Hash!.Value,
                             processingChunk.Span,
+                            processingChunk.ChunkKey,
                             false)).ConfigureAwait(false);
                 }
             }
@@ -92,7 +104,7 @@ await AddChunkToLevelAsync(
             }
         }
 
-        public async Task<SwarmHash> SumAsync()
+        public async Task<SwarmChunkReference> SumAsync()
         {
             bool rootChunkFound = false;
             for (int i = 0; !rootChunkFound; i++)
@@ -118,7 +130,7 @@ public async Task<SwarmHash> SumAsync()
             
             var rootChunk = chunkLevels.Last()[0];
 
-            return rootChunk.Hash;
+            return new(rootChunk.Hash, rootChunk.ChunkKey, useRecursiveEncryption);
         }
 
         // Helpers.
@@ -151,19 +163,32 @@ private async Task WrapFullLevelAsync(int level)
                     .Aggregate((a,c) => a + c)); //sum of ulongs. Linq doesn't have it
             
             // Build total data from total span, and all the hashes in level.
+            // If chunks are compacted, append the encryption key after the chunk hash.
             var totalData = totalSpan.Concat(
-                levelChunks.SelectMany(c => c.Hash.ToByteArray()))
+                levelChunks.SelectMany(c => useRecursiveEncryption
+                    ? c.Hash.ToByteArray().Concat(c.ChunkKey!.Bytes.ToArray())
+                    : c.Hash.ToByteArray()))
                 .ToArray();
 
             // Run hashing on the new chunk, and add it to next level.
+            var hashingResult = await HashIntermediateChunkAsync(totalSpan, totalData).ConfigureAwait(false);
             await AddChunkToLevelAsync(
                 level + 1,
                 new ChunkHeader(
-                    await hashChunkDelegate(totalSpan, totalData).ConfigureAwait(false),
+                    hashingResult.Hash,
                     totalSpan,
+                    hashingResult.EncryptionKey,
                     false)).ConfigureAwait(false);
             
             levelChunks.Clear();
         }
+        
+        // Helpers.
+        private async Task<SwarmChunkReference> HashIntermediateChunkAsync(byte[] span, byte[] data)
+        {
+            var args = new HasherPipelineFeedArgs(span: span, data: data);
+            await shortBmtPipelineStage.FeedAsync(args).ConfigureAwait(false);
+            return new(args.Hash!.Value, args.ChunkKey, useRecursiveEncryption);
+        }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/ChunkBmtPipelineStage.cs b/src/BeeNet.Util/Hashing/Pipeline/ChunkBmtPipelineStage.cs
index 43b15b3..f85897b 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/ChunkBmtPipelineStage.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/ChunkBmtPipelineStage.cs
@@ -13,8 +13,11 @@
 // If not, see <https://www.gnu.org/licenses/>.
 
 using Etherna.BeeNet.Hashing.Bmt;
+using Etherna.BeeNet.Hashing.Postage;
 using Etherna.BeeNet.Models;
 using System;
+using System.Buffers.Binary;
+using System.Collections.Generic;
 using System.Threading.Tasks;
 
 namespace Etherna.BeeNet.Hashing.Pipeline
@@ -22,15 +25,38 @@ namespace Etherna.BeeNet.Hashing.Pipeline
     /// <summary>
     /// Calculate hash of each chunk
     /// </summary>
-    internal sealed class ChunkBmtPipelineStage(
-        IHasherPipelineStage nextStage)
-        : IHasherPipelineStage
+    internal sealed class ChunkBmtPipelineStage : IHasherPipelineStage
     {
+        // Fields.
+        private readonly ushort compactLevel;
+        private readonly IHasherPipelineStage nextStage;
+        private readonly IPostageStampIssuer stampIssuer;
+
+        private long _missedOptimisticHashing;
+
+        // Constructor.
+        /// <summary>
+        /// Calculate hash of each chunk
+        /// </summary>
+        public ChunkBmtPipelineStage(
+            ushort compactLevel,
+            IHasherPipelineStage nextStage,
+            IPostageStampIssuer stampIssuer)
+        {
+            this.compactLevel = compactLevel;
+            this.nextStage = nextStage;
+            this.stampIssuer = stampIssuer;
+        }
+
         // Dispose.
         public void Dispose()
         {
             nextStage.Dispose();
         }
+        
+        // Properties.
+        public long MissedOptimisticHashing =>
+            _missedOptimisticHashing + nextStage.MissedOptimisticHashing;
 
         // Methods.
         public async Task FeedAsync(HasherPipelineFeedArgs args)
@@ -39,14 +65,163 @@ public async Task FeedAsync(HasherPipelineFeedArgs args)
                 throw new InvalidOperationException("Data can't be shorter than span size here");
             if (args.Data.Length > SwarmChunk.SpanAndDataSize)
                 throw new InvalidOperationException("Data can't be longer than chunk + span size here");
+            
+            // Create an instance for this specific task. Hasher is not thread safe.
+            var hasher = new Hasher();
 
-            args.Hash = SwarmChunkBmtHasher.Hash(
+            var plainChunkHash = SwarmChunkBmtHasher.Hash(
                 args.Data[..SwarmChunk.SpanSize].ToArray(),
-                args.Data[SwarmChunk.SpanSize..].ToArray());
+                args.Data[SwarmChunk.SpanSize..].ToArray(),
+                hasher);
+            if (compactLevel == 0)
+            {
+                /* If no chunk compaction is involved, simply calculate the chunk hash and proceed. */
+                args.Hash = plainChunkHash;
+            }
+            else
+            {
+                // Search best chunk key.
+                var bestChunkResult = await GetBestChunkAsync(args, plainChunkHash, hasher).ConfigureAwait(false);
+                
+                args.ChunkKey = bestChunkResult.ChunkKey;
+                args.Data = bestChunkResult.EncryptedData;
+                args.Hash = bestChunkResult.Hash;
+            }
 
             await nextStage.FeedAsync(args).ConfigureAwait(false);
         }
 
-        public Task<SwarmHash> SumAsync() => nextStage.SumAsync();
+        public Task<SwarmChunkReference> SumAsync() => nextStage.SumAsync();
+        
+        // Helpers.
+        private static void EncryptDecryptChunkData(XorEncryptKey chunkKey, byte[] data)
+        {
+            // Don't encrypt span, otherwise knowing the chunk length, we could reconstruct the key.
+            chunkKey.EncryptDecrypt(data.AsSpan()[SwarmChunk.SpanSize..]);
+        }
+        
+        private async Task<CompactedChunkAttemptResult> GetBestChunkAsync(
+            HasherPipelineFeedArgs args,
+            SwarmHash plainChunkHash,
+            Hasher hasher)
+        {
+            /*
+             * If chunk compaction is involved, use optimistic calculation.
+             * 
+             * Calculate an encryption key, and try to find a bucket with optimal collisions.
+             * Before to proceed with the chunk and its key, wait optimistically until
+             * the previous chunk has been completed. Then verify if the same bucket has received
+             * new collisions, or not. If not, proceed, otherwise try again to search the best chunk key.
+             *
+             * The chunk key is calculate from the plain chunk hash, replacing the last 4 bytes
+             * with the attempt counter (int), and then hashing again.
+             * 
+             *     chunkKey = Keccack(plainChunkHash[^2..] + attempt)
+             *
+             * The encrypted chunk is calculated encrypting data with the chunk key.
+             *
+             * The optimistic algorithm will search the first best chunk available, trying a max of
+             * incremental attempts with max at the "compactionLevel" parameter.
+             *
+             * Best chunk is a chunk that fits in a bucket with the lowest possible number of collisions.
+             *
+             * Use a cache on generated chunkKey and relative bucketId for each attempt. This permits to not
+             * repeat the hash calculation in case that we need to repeat the search.
+             */
+                
+            // Run optimistically before prev chunk completion.
+            var encryptionCache = new Dictionary<ushort /*attempt*/, CompactedChunkAttemptResult>();
+            var (bestKeyAttempt, expectedCollisions) = SearchFirstBestChunkKey(args, encryptionCache, plainChunkHash, hasher);
+
+            // If there isn't any prev chunk to wait, proceed with result.
+            if (args.PrevChunkSemaphore == null)
+                return encryptionCache[bestKeyAttempt];
+
+            try
+            {
+                // Otherwise wait until prev chunk has been processed.
+                await args.PrevChunkSemaphore.WaitAsync().ConfigureAwait(false);
+                
+                // ** Here chunks can enter only once per time, and in order. **
+            
+                // Check the optimistic result, and keep if valid.
+                var bestBucketId = encryptionCache[bestKeyAttempt].Hash.ToBucketId();
+                var actualCollisions = stampIssuer.Buckets.GetCollisions(bestBucketId);
+            
+                if (actualCollisions == expectedCollisions)
+                    return encryptionCache[bestKeyAttempt];
+
+                // If it has been invalidated, do it again.
+                _missedOptimisticHashing++;
+                var (newBestKeyAttempt, _) = SearchFirstBestChunkKey(args, encryptionCache, plainChunkHash, hasher);
+                return encryptionCache[newBestKeyAttempt];
+            }
+            finally
+            {
+                //release prev chunk semaphore to reuse it
+                args.PrevChunkSemaphore.Release();
+            }
+        }
+        
+        private (ushort BestKeyAttempt, uint ExpectedCollisions) SearchFirstBestChunkKey(
+            HasherPipelineFeedArgs args,
+            Dictionary<ushort /*attempt*/, CompactedChunkAttemptResult> optimisticCache,
+            SwarmHash plainChunkHash,
+            Hasher hasher)
+        {
+            // Init.
+            ushort bestAttempt = 0;
+            uint bestCollisions = 0;
+            
+            var plainChunkHashArray = plainChunkHash.ToByteArray();
+                
+            // Search best chunk key.
+            for (ushort i = 0; i < compactLevel; i++)
+            {
+                uint collisions;
+                
+                if (optimisticCache.TryGetValue(i, out var cachedValues))
+                {
+                    collisions = stampIssuer.Buckets.GetCollisions(cachedValues.Hash.ToBucketId());
+                }
+                else
+                {
+                    // Create key.
+                    BinaryPrimitives.WriteUInt16BigEndian(plainChunkHashArray.AsSpan()[^2..], i);
+                    var chunkKey = new XorEncryptKey(hasher.ComputeHash(plainChunkHashArray));
+                    
+                    // Encrypt data.
+                    var encryptedData = args.Data.ToArray();
+                    EncryptDecryptChunkData(chunkKey, encryptedData);
+                    
+                    // Calculate hash, bucket id, and save in cache.
+                    var encryptedHash = SwarmChunkBmtHasher.Hash(
+                        encryptedData[..SwarmChunk.SpanSize],
+                        encryptedData[SwarmChunk.SpanSize..],
+                        hasher);
+                    optimisticCache[i] = new(chunkKey, encryptedData, encryptedHash);
+
+                    // Check key collisions.
+                    collisions = stampIssuer.Buckets.GetCollisions(encryptedHash.ToBucketId());
+                }
+                
+                // First attempt is always the best one.
+                if (i == 0)
+                    bestCollisions = collisions;
+                
+                // Check if collisions are optimal.
+                if (collisions == stampIssuer.Buckets.MinBucketCollisions)
+                    return (i, collisions);
+                
+                // Else, if this reach better collisions, but not the best.
+                if (collisions < bestCollisions)
+                {
+                    bestAttempt = i;
+                    bestCollisions = collisions;
+                }
+            }
+
+            return (bestAttempt, bestCollisions);
+        }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/ChunkFeederPipelineStage.cs b/src/BeeNet.Util/Hashing/Pipeline/ChunkFeederPipelineStage.cs
index 0b24cf6..d26342b 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/ChunkFeederPipelineStage.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/ChunkFeederPipelineStage.cs
@@ -14,7 +14,9 @@
 
 using Etherna.BeeNet.Models;
 using System;
+using System.Collections.Concurrent;
 using System.Collections.Generic;
+using System.Diagnostics.CodeAnalysis;
 using System.IO;
 using System.Threading;
 using System.Threading.Tasks;
@@ -25,34 +27,54 @@ namespace Etherna.BeeNet.Hashing.Pipeline
     /// Produce chunked data with span prefix for successive stages.
     /// Also controls the parallelism on chunk elaboration
     /// </summary>
-    internal sealed class ChunkFeederPipelineStage(
-        IHasherPipelineStage nextStage,
-        int chunkConcurrency)
-        : IHasherPipeline
+    internal sealed class ChunkFeederPipelineStage : IHasherPipeline
     {
         // Fields.
+        private readonly SemaphoreSlim chunkConcurrencySemaphore;
+        private readonly ConcurrentQueue<SemaphoreSlim> chunkSemaphorePool;
+        private readonly IHasherPipelineStage nextStage;
         private readonly List<Task> nextStageTasks = new();
-        private readonly SemaphoreSlim semaphore = new(chunkConcurrency, chunkConcurrency);
         
         private long passedBytes;
-        
-        // Constructor.
-        public ChunkFeederPipelineStage(IHasherPipelineStage nextStage)
-            : this(nextStage, Environment.ProcessorCount)
-        { }
+
+        // Constructors.
+        [SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope")]
+        public ChunkFeederPipelineStage(
+            IHasherPipelineStage nextStage,
+            int? chunkConcurrency = default)
+        {
+            chunkConcurrency ??= Environment.ProcessorCount;
+            
+            this.nextStage = nextStage;
+            chunkConcurrencySemaphore = new(chunkConcurrency.Value, chunkConcurrency.Value);
+            chunkSemaphorePool = new ConcurrentQueue<SemaphoreSlim>();
+            
+            //init semaphore pool
+            /*
+             * Double semaphores compared to current chunk concurrency.
+             * This avoids the race condition when: a chunk complete its hashing, it's semaphore is assigned and
+             * locked by another one, and only after this the direct child of the first one tries to wait its parent.
+             */
+            for (int i = 0; i < chunkConcurrency * 2; i++)
+                chunkSemaphorePool.Enqueue(new SemaphoreSlim(1, 1));
+        }
 
         // Dispose.
         public void Dispose()
         {
             nextStage.Dispose();
-            semaphore.Dispose();
+            chunkConcurrencySemaphore.Dispose();
+            while (chunkSemaphorePool.TryDequeue(out var semaphore))
+                semaphore.Dispose();
         }
         
         // Properties.
         public bool IsUsable { get; private set; } = true;
 
+        public long MissedOptimisticHashing => nextStage.MissedOptimisticHashing;
+
         // Methods.
-        public async Task<SwarmHash> HashDataAsync(byte[] data)
+        public async Task<SwarmChunkReference> HashDataAsync(byte[] data)
         {
             ArgumentNullException.ThrowIfNull(data, nameof(data));
 
@@ -60,7 +82,7 @@ public async Task<SwarmHash> HashDataAsync(byte[] data)
             return await HashDataAsync(memoryStream).ConfigureAwait(false);
         }
         
-        public async Task<SwarmHash> HashDataAsync(Stream dataStream)
+        public async Task<SwarmChunkReference> HashDataAsync(Stream dataStream)
         {
             ArgumentNullException.ThrowIfNull(dataStream, nameof(dataStream));
 
@@ -73,6 +95,7 @@ public async Task<SwarmHash> HashDataAsync(Stream dataStream)
             // Slicing the stream permits to avoid to load all the stream in memory at the same time.
             var chunkBuffer = new byte[SwarmChunk.DataSize];
             int chunkReadSize;
+            SemaphoreSlim? prevChunkSemaphore = null;
             do
             {
                 chunkReadSize = await dataStream.ReadAsync(chunkBuffer).ConfigureAwait(false);
@@ -90,11 +113,24 @@ public async Task<SwarmHash> HashDataAsync(Stream dataStream)
                         (ulong)chunkReadSize);
                 
                     // Invoke next stage with parallelism on chunks.
-                    await semaphore.WaitAsync().ConfigureAwait(false);
+                    //control concurrency
+                    await chunkConcurrencySemaphore.WaitAsync().ConfigureAwait(false);
+                    
+                    //initialize chunk semaphore, receiving from semaphore pool
+#pragma warning disable CA2000
+                    if (!chunkSemaphorePool.TryDequeue(out var chunkSemaphore))
+                        throw new InvalidOperationException("Semaphore pool exhausted");
+#pragma warning restore CA2000
+                    await chunkSemaphore.WaitAsync().ConfigureAwait(false);
+                    
+                    //build args
                     var feedArgs = new HasherPipelineFeedArgs(
                         span: chunkData[..SwarmChunk.SpanSize],
                         data: chunkData,
-                        numberId: passedBytes / SwarmChunk.DataSize);
+                        numberId: passedBytes / SwarmChunk.DataSize,
+                        prevChunkSemaphore: prevChunkSemaphore);
+                    
+                    //run task
                     nextStageTasks.Add(
                         Task.Run(async () =>
                         {
@@ -104,10 +140,18 @@ public async Task<SwarmHash> HashDataAsync(Stream dataStream)
                             }
                             finally
                             {
-                                semaphore.Release();
+                                //release and restore chunk semaphore in pool
+                                chunkSemaphore.Release();
+                                chunkSemaphorePool.Enqueue(chunkSemaphore);
+                                
+                                //release task for next chunk
+                                chunkConcurrencySemaphore.Release();
                             }
                         }));
                     
+                    //set current chunk semaphore as prev for next chunk
+                    prevChunkSemaphore = chunkSemaphore;
+                    
                     passedBytes += chunkReadSize;
                 }
             } while (chunkReadSize == SwarmChunk.DataSize);
diff --git a/src/BeeNet.Util/Hashing/Pipeline/ChunkStoreWriterPipelineStage.cs b/src/BeeNet.Util/Hashing/Pipeline/ChunkStoreWriterPipelineStage.cs
index e1a5d98..edeadb8 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/ChunkStoreWriterPipelineStage.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/ChunkStoreWriterPipelineStage.cs
@@ -31,6 +31,9 @@ public void Dispose()
         {
             nextStage?.Dispose();
         }
+        
+        // Properties.
+        public long MissedOptimisticHashing => nextStage?.MissedOptimisticHashing ?? 0;
 
         // Methods.
         public async Task FeedAsync(HasherPipelineFeedArgs args)
@@ -50,7 +53,7 @@ public async Task FeedAsync(HasherPipelineFeedArgs args)
                 await nextStage.FeedAsync(args).ConfigureAwait(false);
         }
         
-        public Task<SwarmHash> SumAsync() =>
+        public Task<SwarmChunkReference> SumAsync() =>
             nextStage?.SumAsync() ?? throw new InvalidOperationException();
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/CompactedChunkAttemptResult.cs b/src/BeeNet.Util/Hashing/Pipeline/CompactedChunkAttemptResult.cs
new file mode 100644
index 0000000..df4ed46
--- /dev/null
+++ b/src/BeeNet.Util/Hashing/Pipeline/CompactedChunkAttemptResult.cs
@@ -0,0 +1,17 @@
+using Etherna.BeeNet.Manifest;
+using Etherna.BeeNet.Models;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Etherna.BeeNet.Hashing.Pipeline
+{
+    [SuppressMessage("Performance", "CA1819:Properties should not return arrays")]
+    public class CompactedChunkAttemptResult(
+        XorEncryptKey chunkKey,
+        byte[] encryptedData,
+        SwarmHash hash)
+    {
+        public XorEncryptKey ChunkKey { get; } = chunkKey;
+        public byte[] EncryptedData { get; } = encryptedData;
+        public SwarmHash Hash { get; } = hash;
+    }
+}
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineBuilder.cs b/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineBuilder.cs
index 2b4b3d9..73e4a49 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineBuilder.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineBuilder.cs
@@ -28,19 +28,27 @@ public static IHasherPipeline BuildNewHasherPipeline(
             IPostageStamper postageStamper,
             RedundancyLevel redundancyLevel,
             bool isEncrypted,
-            string? chunkStoreDirectory) =>
+            string? chunkStoreDirectory,
+            ushort compactLevel,
+            int? chunkConcurrency) =>
             BuildNewHasherPipeline(
                 chunkStoreDirectory is null ? new FakeChunkStore() : new LocalDirectoryChunkStore(chunkStoreDirectory),
                 postageStamper,
                 redundancyLevel,
-                isEncrypted);
+                isEncrypted,
+                compactLevel,
+                chunkConcurrency);
         
         public static IHasherPipeline BuildNewHasherPipeline(
             IChunkStore chunkStore,
             IPostageStamper postageStamper,
             RedundancyLevel redundancyLevel,
-            bool isEncrypted)
+            bool isEncrypted,
+            ushort compactLevel,
+            int? chunkConcurrency)
         {
+            ArgumentNullException.ThrowIfNull(postageStamper, nameof(postageStamper));
+            
             if (redundancyLevel != RedundancyLevel.None)
                 throw new NotImplementedException();
 
@@ -52,29 +60,19 @@ public static IHasherPipeline BuildNewHasherPipeline(
             else
             {
                 //build stages
-                var shortPipelineStage = BuildNewShortHasherPipeline(chunkStore, postageStamper);
-                
                 var chunkAggregatorStage = new ChunkAggregatorPipelineStage(
-                    async (span, data) =>
-                    {
-                        var args = new HasherPipelineFeedArgs(span: span, data: data);
-                        await shortPipelineStage.FeedAsync(args).ConfigureAwait(false);
-                        return args.Hash!.Value;
-                    }
-                );
+                    new ChunkBmtPipelineStage(
+                        compactLevel,
+                        new ChunkStoreWriterPipelineStage(chunkStore, postageStamper, null),
+                        postageStamper.StampIssuer),
+                    compactLevel > 0);
+                
                 var storeWriterStage = new ChunkStoreWriterPipelineStage(chunkStore, postageStamper, chunkAggregatorStage);
-                bmtStage = new ChunkBmtPipelineStage(storeWriterStage);
+                
+                bmtStage = new ChunkBmtPipelineStage(compactLevel, storeWriterStage, postageStamper.StampIssuer);
             }
             
-            return new ChunkFeederPipelineStage(bmtStage);
-        }
-        
-        public static IHasherPipelineStage BuildNewShortHasherPipeline(
-            IChunkStore chunkStore,
-            IPostageStamper postageStamper)
-        {
-            var storeWriter = new ChunkStoreWriterPipelineStage(chunkStore, postageStamper, null);
-            return new ChunkBmtPipelineStage(storeWriter);
+            return new ChunkFeederPipelineStage(bmtStage, chunkConcurrency);
         }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineFeedArgs.cs b/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineFeedArgs.cs
index e323a5a..dae12f2 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineFeedArgs.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/HasherPipelineFeedArgs.cs
@@ -14,20 +14,22 @@
 
 using Etherna.BeeNet.Models;
 using System;
+using System.Threading;
 
 namespace Etherna.BeeNet.Hashing.Pipeline
 {
     public sealed class HasherPipelineFeedArgs
     {
         // Fields.
-        private readonly byte[] _data;
         private readonly byte[]? _span;
+        private byte[] _data;
         
         // Constructor.
         public HasherPipelineFeedArgs(
             byte[] data,
             byte[]? span = null,
-            long numberId = 0)
+            long numberId = 0,
+            SemaphoreSlim? prevChunkSemaphore = null)
         {
             ArgumentNullException.ThrowIfNull(data, nameof(data));
             
@@ -42,13 +44,23 @@ public HasherPipelineFeedArgs(
             _data = data;
             _span = span;
             NumberId = numberId;
+            PrevChunkSemaphore = prevChunkSemaphore;
         }
         
         // Properties.
+        /// <summary>
+        /// The optional chunk encryption key
+        /// </summary>
+        public XorEncryptKey? ChunkKey { get; internal set; }
+
         /// <summary>
         /// Data can include the span too, but it may be encrypted if the pipeline is encrypted
         /// </summary>
-        public ReadOnlyMemory<byte> Data => _data;
+        public ReadOnlyMemory<byte> Data
+        {
+            get => _data;
+            internal set => _data = value.ToArray();
+        }
         
         /// <summary>
         /// Hash generated by BMT
@@ -61,8 +73,13 @@ public HasherPipelineFeedArgs(
         public ReadOnlyMemory<byte> Span => _span;
         
         /// <summary>
-        /// Ordered Id, from 0 to n with the last chunk
+        /// Ordered id, from 0 to n with the last chunk
         /// </summary>
         public long NumberId { get; }
+
+        /// <summary>
+        /// Previous chunk semaphore. Occuped resource until chunk is processing.
+        /// </summary>
+        public SemaphoreSlim? PrevChunkSemaphore { get; }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/IHasherPipeline.cs b/src/BeeNet.Util/Hashing/Pipeline/IHasherPipeline.cs
index 67b4869..40cf2a6 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/IHasherPipeline.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/IHasherPipeline.cs
@@ -21,20 +21,24 @@ namespace Etherna.BeeNet.Hashing.Pipeline
 {
     public interface IHasherPipeline : IDisposable
     {
+        // Properties.
         bool IsUsable { get; }
         
+        long MissedOptimisticHashing { get; }
+        
+        // Methods.
         /// <summary>
         /// Consume a byte array and returns a Swarm hash as result
         /// </summary>
         /// <param name="data">Input data</param>
         /// <returns>Resulting swarm hash</returns>
-        Task<SwarmHash> HashDataAsync(byte[] data);
+        Task<SwarmChunkReference> HashDataAsync(byte[] data);
 
         /// <summary>
         /// Consume a stream slicing it in chunk size parts, and returns a Swarm hash as result
         /// </summary>
         /// <param name="dataStream">Input data stream</param>
         /// <returns>Resulting swarm hash</returns>
-        Task<SwarmHash> HashDataAsync(Stream dataStream);
+        Task<SwarmChunkReference> HashDataAsync(Stream dataStream);
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Pipeline/IHasherPipelineStage.cs b/src/BeeNet.Util/Hashing/Pipeline/IHasherPipelineStage.cs
index ea578c8..d25e038 100644
--- a/src/BeeNet.Util/Hashing/Pipeline/IHasherPipelineStage.cs
+++ b/src/BeeNet.Util/Hashing/Pipeline/IHasherPipelineStage.cs
@@ -20,7 +20,11 @@ namespace Etherna.BeeNet.Hashing.Pipeline
 {
     public interface IHasherPipelineStage : IDisposable
     {
+        // Properties.
+        long MissedOptimisticHashing { get; }
+        
+        // Methods.
         Task FeedAsync(HasherPipelineFeedArgs args);
-        Task<SwarmHash> SumAsync();
+        Task<SwarmChunkReference> SumAsync();
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Postage/FakePostageStampIssuer.cs b/src/BeeNet.Util/Hashing/Postage/FakePostageStampIssuer.cs
index a1b77ff..b50a2ed 100644
--- a/src/BeeNet.Util/Hashing/Postage/FakePostageStampIssuer.cs
+++ b/src/BeeNet.Util/Hashing/Postage/FakePostageStampIssuer.cs
@@ -13,22 +13,18 @@
 // If not, see <https://www.gnu.org/licenses/>.
 
 using Etherna.BeeNet.Models;
-using System;
 
 namespace Etherna.BeeNet.Hashing.Postage
 {
     public class FakePostageStampIssuer : IPostageStampIssuer
     {
-        public ReadOnlySpan<uint> Buckets => Array.Empty<uint>();
+        public IReadOnlyPostageBuckets Buckets { get; } = new PostageBuckets();
         public uint BucketUpperBound { get; }
         public bool HasSaturated { get; }
         public PostageBatch PostageBatch => PostageBatch.MaxDepthInstance;
-        public uint MaxBucketCount { get; }
+        public uint MaxBucketCollisions { get; }
         public long TotalChunks { get; }
 
-        public StampBucketIndex IncrementBucketCount(SwarmHash hash) =>
-            new StampBucketIndex(0, 0);
-
-        public ulong GetCollisions(uint bucketId) => 0;
+        public StampBucketIndex IncrementBucketCount(SwarmHash hash) => new(0, 0);
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Postage/IPostageStampIssuer.cs b/src/BeeNet.Util/Hashing/Postage/IPostageStampIssuer.cs
index 6432b18..fec1006 100644
--- a/src/BeeNet.Util/Hashing/Postage/IPostageStampIssuer.cs
+++ b/src/BeeNet.Util/Hashing/Postage/IPostageStampIssuer.cs
@@ -13,40 +13,27 @@
 // If not, see <https://www.gnu.org/licenses/>.
 
 using Etherna.BeeNet.Models;
-using System;
 
 namespace Etherna.BeeNet.Hashing.Postage
 {
     public interface IPostageStampIssuer
     {
         // Properties.
-        /// <summary>
-        /// Collision Buckets: counts per neighbourhoods
-        /// </summary>
-        public ReadOnlySpan<uint> Buckets { get; }
-
-        public uint BucketUpperBound { get; }
+        IReadOnlyPostageBuckets Buckets { get; }
+        
+        uint BucketUpperBound { get; }
         
         /// <summary>
         /// True if batch is mutable and BucketUpperBound has been it
         /// </summary>
-        public bool HasSaturated { get; }
+        bool HasSaturated { get; }
 
         /// <summary>
         /// The batch stamps are issued from
         /// </summary>
-        public PostageBatch PostageBatch { get; }
-        
-        /// <summary>
-        /// The count of the fullest bucket
-        /// </summary>
-        public uint MaxBucketCount { get; }
-
-        long TotalChunks { get; }
+        PostageBatch PostageBatch { get; }
 
         // Methods.
         StampBucketIndex IncrementBucketCount(SwarmHash hash);
-        
-        ulong GetCollisions(uint bucketId);
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Hashing/Postage/PostageStampIssuer.cs b/src/BeeNet.Util/Hashing/Postage/PostageStampIssuer.cs
index ac8bc8a..dd55036 100644
--- a/src/BeeNet.Util/Hashing/Postage/PostageStampIssuer.cs
+++ b/src/BeeNet.Util/Hashing/Postage/PostageStampIssuer.cs
@@ -35,16 +35,12 @@ public PostageStampIssuer(
         }
 
         // Properties.
-        public ReadOnlySpan<uint> Buckets => _buckets.Buckets;
+        public IReadOnlyPostageBuckets Buckets => _buckets;
         public uint BucketUpperBound { get; }
         public bool HasSaturated { get; private set; }
         public PostageBatch PostageBatch { get; }
-        public uint MaxBucketCount => _buckets.MaxBucketCount;
-        public long TotalChunks => _buckets.TotalChunks;
 
         // Methods.
-        public ulong GetCollisions(uint bucketId) => _buckets.GetCollisions(bucketId);
-        
         public StampBucketIndex IncrementBucketCount(SwarmHash hash)
         {
             var bucketId = hash.ToBucketId();
diff --git a/src/BeeNet.Util/Hashing/Store/ChunkJoiner.cs b/src/BeeNet.Util/Hashing/Store/ChunkJoiner.cs
index 01e1232..9c7d387 100644
--- a/src/BeeNet.Util/Hashing/Store/ChunkJoiner.cs
+++ b/src/BeeNet.Util/Hashing/Store/ChunkJoiner.cs
@@ -13,6 +13,7 @@
 // If not, see <https://www.gnu.org/licenses/>.
 
 using Etherna.BeeNet.Models;
+using System;
 using System.Collections.Generic;
 using System.Threading.Tasks;
 
@@ -22,20 +23,40 @@ public class ChunkJoiner(
         IReadOnlyChunkStore chunkStore)
     {
         // Methods.
-        public async Task<IEnumerable<byte>> GetJoinedChunkDataAsync(SwarmHash hash)
+        public async Task<IEnumerable<byte>> GetJoinedChunkDataAsync(SwarmChunkReference chunkReference)
         {
-            var chunk = await chunkStore.GetAsync(hash).ConfigureAwait(false);
-            var totalDataLength = SwarmChunk.SpanToLength(chunk.Span.Span);
+            ArgumentNullException.ThrowIfNull(chunkReference, nameof(chunkReference));
+            
+            var chunk = await chunkStore.GetAsync(chunkReference.Hash).ConfigureAwait(false);
             
+            var dataArray = chunk.Data.ToArray();
+            chunkReference.EncryptionKey?.EncryptDecrypt(dataArray);
+            
+            var totalDataLength = SwarmChunk.SpanToLength(chunk.Span.Span);
             if (totalDataLength <= SwarmChunk.DataSize)
-                return chunk.Data.ToArray();
+                return dataArray;
             
             var joinedData = new List<byte>();
-                
-            for (int i = 0; i < chunk.Data.Length; i += SwarmHash.HashSize)
+            for (int i = 0; i < dataArray.Length;)
             {
-                var childHash = new SwarmHash(chunk.Data[i..(i + SwarmHash.HashSize)].ToArray());
-                joinedData.AddRange(await GetJoinedChunkDataAsync(childHash).ConfigureAwait(false));
+                //read hash
+                var childHash = new SwarmHash(dataArray[i..(i + SwarmHash.HashSize)]);
+                i += SwarmHash.HashSize;
+                
+                //read encryption key
+                XorEncryptKey? childEncryptionKey = null;
+                if (chunkReference.UseRecursiveEncryption)
+                {
+                    childEncryptionKey = new XorEncryptKey(dataArray[i..(i + XorEncryptKey.KeySize)]);
+                    i += XorEncryptKey.KeySize;
+                }
+                
+                //add joined data recursively
+                joinedData.AddRange(await GetJoinedChunkDataAsync(
+                    new SwarmChunkReference(
+                        childHash,
+                        childEncryptionKey,
+                        chunkReference.UseRecursiveEncryption)).ConfigureAwait(false));
             }
             
             return joinedData;
diff --git a/src/BeeNet.Util/IBeeClient.cs b/src/BeeNet.Util/IBeeClient.cs
index a6be056..df2fd1b 100644
--- a/src/BeeNet.Util/IBeeClient.cs
+++ b/src/BeeNet.Util/IBeeClient.cs
@@ -536,7 +536,7 @@ Task<string> RefreshAuthAsync(
             int expiry,
             CancellationToken cancellationToken = default);
 
-        Task<SwarmHash> ResolveSwarmAddressToHashAsync(SwarmAddress address);
+        Task<SwarmChunkReference> ResolveAddressToChunkReferenceAsync(SwarmAddress address);
 
         /// <summary>Reupload a root hash to the network</summary>
         /// <param name="hash">Root hash of content (can be of any type: collection, file, chunk)</param>
diff --git a/src/BeeNet.Util/Manifest/IReadOnlyMantarayNode.cs b/src/BeeNet.Util/Manifest/IReadOnlyMantarayNode.cs
index 9861ed4..0a70e52 100644
--- a/src/BeeNet.Util/Manifest/IReadOnlyMantarayNode.cs
+++ b/src/BeeNet.Util/Manifest/IReadOnlyMantarayNode.cs
@@ -29,6 +29,6 @@ public interface IReadOnlyMantarayNode
         
         // Methods.
         Task<IReadOnlyDictionary<string,string>> GetResourceMetadataAsync(string path);
-        Task<SwarmHash> ResolveResourceHashAsync(string path);
+        Task<SwarmChunkReference> ResolveChunkReferenceAsync(string path);
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Manifest/ManifestEntry.cs b/src/BeeNet.Util/Manifest/ManifestEntry.cs
index dfe06ac..e107190 100644
--- a/src/BeeNet.Util/Manifest/ManifestEntry.cs
+++ b/src/BeeNet.Util/Manifest/ManifestEntry.cs
@@ -20,10 +20,12 @@ namespace Etherna.BeeNet.Manifest
     public class ManifestEntry
     {
         // Consts.
+        public const string ChunkEncryptKeyKey = "ChunkEncryptKey";
         public const string ContentTypeKey = "Content-Type";
         public const string FilenameKey = "Filename";
         public const string WebsiteErrorDocPathKey = "website-error-document";
         public const string WebsiteIndexDocPathKey = "website-index-document";
+        public const string UseRecursiveEncryptionKey = "recursiveEncrypt";
         
         // Constructor.
         private ManifestEntry(
diff --git a/src/BeeNet.Util/Manifest/MantarayManifest.cs b/src/BeeNet.Util/Manifest/MantarayManifest.cs
index a03ce96..aa68674 100644
--- a/src/BeeNet.Util/Manifest/MantarayManifest.cs
+++ b/src/BeeNet.Util/Manifest/MantarayManifest.cs
@@ -19,18 +19,20 @@
 
 namespace Etherna.BeeNet.Manifest
 {
+    public delegate IHasherPipeline BuildHasherPipeline();
+    
     public class MantarayManifest : IReadOnlyMantarayManifest
     {
         // Consts.
         public static readonly string RootPath = SwarmAddress.Separator.ToString();
         
         // Fields.
-        private readonly Func<IHasherPipeline> hasherBuilder;
+        private readonly BuildHasherPipeline hasherBuilder;
         private readonly MantarayNode _rootNode;
 
         // Constructors.
         public MantarayManifest(
-            Func<IHasherPipeline> hasherBuilder,
+            BuildHasherPipeline hasherBuilder,
             bool isEncrypted)
             : this(hasherBuilder,
                 new MantarayNode(isEncrypted
@@ -39,7 +41,7 @@ public MantarayManifest(
         { }
 
         public MantarayManifest(
-            Func<IHasherPipeline> hasherBuilder,
+            BuildHasherPipeline hasherBuilder,
             MantarayNode rootNode)
         {
             this.hasherBuilder = hasherBuilder;
@@ -58,10 +60,10 @@ public void Add(string path, ManifestEntry entry)
             _rootNode.Add(path, entry);
         }
 
-        public async Task<SwarmHash> GetHashAsync()
+        public async Task<SwarmChunkReference> GetHashAsync()
         {
             await _rootNode.ComputeHashAsync(hasherBuilder).ConfigureAwait(false);
-            return _rootNode.Hash;
+            return new SwarmChunkReference(_rootNode.Hash, null, false);
         }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Manifest/MantarayNode.cs b/src/BeeNet.Util/Manifest/MantarayNode.cs
index 346926e..fa62f48 100644
--- a/src/BeeNet.Util/Manifest/MantarayNode.cs
+++ b/src/BeeNet.Util/Manifest/MantarayNode.cs
@@ -14,7 +14,6 @@
 
 using Etherna.BeeNet.Extensions;
 using Etherna.BeeNet.Hashing;
-using Etherna.BeeNet.Hashing.Pipeline;
 using Etherna.BeeNet.Models;
 using System;
 using System.Collections.Generic;
@@ -143,7 +142,7 @@ public void Add(string path, ManifestEntry entry)
             }
         }
 
-        public async Task ComputeHashAsync(Func<IHasherPipeline> hasherPipelineBuilder)
+        public async Task ComputeHashAsync(BuildHasherPipeline hasherPipelineBuilder)
         {
             ArgumentNullException.ThrowIfNull(hasherPipelineBuilder, nameof(hasherPipelineBuilder));
             
@@ -156,7 +155,8 @@ public async Task ComputeHashAsync(Func<IHasherPipeline> hasherPipelineBuilder)
 
             // Marshal current node, and set its hash.
             using var hasherPipeline = hasherPipelineBuilder();
-            _hash = await hasherPipeline.HashDataAsync(ToByteArray()).ConfigureAwait(false);
+            var hashingResult = await hasherPipeline.HashDataAsync(ToByteArray()).ConfigureAwait(false);
+            _hash = hashingResult.Hash;
             
             // Clean forks.
             _forks.Clear();
@@ -194,7 +194,7 @@ public Task<IReadOnlyDictionary<string, string>> GetResourceMetadataAsync(string
             throw new NotImplementedException();
         }
 
-        public Task<SwarmHash> ResolveResourceHashAsync(string path)
+        public Task<SwarmChunkReference> ResolveChunkReferenceAsync(string path)
         {
             //this will be implemented probably into a base class
             throw new NotImplementedException();
diff --git a/src/BeeNet.Util/Manifest/ReferencedMantarayManifest.cs b/src/BeeNet.Util/Manifest/ReferencedMantarayManifest.cs
index b7b5fb8..97a2fc9 100644
--- a/src/BeeNet.Util/Manifest/ReferencedMantarayManifest.cs
+++ b/src/BeeNet.Util/Manifest/ReferencedMantarayManifest.cs
@@ -43,17 +43,15 @@ public async Task<IReadOnlyDictionary<string, string>> GetResourceMetadataAsync(
             if (!_rootNode.IsDecoded)
                 await _rootNode.DecodeFromChunkAsync().ConfigureAwait(false);
 
-            return await RootNode.GetResourceMetadataAsync(
-                address.Path?.ToString() ?? "").ConfigureAwait(false);
+            return await RootNode.GetResourceMetadataAsync(address.Path).ConfigureAwait(false);
         }
 
-        public async Task<SwarmHash> ResolveResourceHashAsync(SwarmAddress address)
+        public async Task<SwarmChunkReference> ResolveAddressToChunkReferenceAsync(SwarmAddress address)
         {
             if (!_rootNode.IsDecoded)
                 await _rootNode.DecodeFromChunkAsync().ConfigureAwait(false);
 
-            return await RootNode.ResolveResourceHashAsync(
-                address.Path?.ToString() ?? "").ConfigureAwait(false);
+            return await RootNode.ResolveChunkReferenceAsync(address.Path).ConfigureAwait(false);
         }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Manifest/ReferencedMantarayNode.cs b/src/BeeNet.Util/Manifest/ReferencedMantarayNode.cs
index f9185dc..6486ecf 100644
--- a/src/BeeNet.Util/Manifest/ReferencedMantarayNode.cs
+++ b/src/BeeNet.Util/Manifest/ReferencedMantarayNode.cs
@@ -43,12 +43,20 @@ public ReferencedMantarayNode(
             Hash = chunkHash;
             _metadata = metadata ?? new Dictionary<string, string>();
             NodeTypeFlags = nodeTypeFlags;
+
+            // Read metadata.
+            if (_metadata.TryGetValue(ManifestEntry.ChunkEncryptKeyKey, out var encryptKeyStr))
+                EntryEncryptionKey = new XorEncryptKey(encryptKeyStr);
+            if (_metadata.TryGetValue(ManifestEntry.UseRecursiveEncryptionKey, out var useRecursiveEncrypStr))
+                EntryUseRecursiveEncryption = bool.Parse(useRecursiveEncrypStr);
         }
         
         // Properties.
+        public XorEncryptKey? EntryEncryptionKey { get; }
         public SwarmHash? EntryHash => IsDecoded
             ? _entryHash
             : throw new InvalidOperationException("Node is not decoded from chunk");
+        public bool EntryUseRecursiveEncryption { get; }
         public IReadOnlyDictionary<char, ReferencedMantarayNodeFork> Forks => IsDecoded
             ? _forks
             : throw new InvalidOperationException("Node is not decoded from chunk");
@@ -94,17 +102,17 @@ public async Task<IReadOnlyDictionary<string, string>> GetResourceMetadataAsync(
             ArgumentNullException.ThrowIfNull(path, nameof(path));
 
             // If the path is empty
-            if (path.Length == 0)
+            if (path.Length == 0 || path == SwarmAddress.Separator.ToString())
             {
                 //try to lookup for index document suffix
                 if (!_forks.TryGetValue(SwarmAddress.Separator, out var rootFork) ||
                     rootFork.Prefix != SwarmAddress.Separator.ToString())
                     throw new KeyNotFoundException($"Final path {path} can't be found");
                 
-                if (!rootFork.Node.Metadata.TryGetValue(ManifestEntry.WebsiteIndexDocPathKey, out var suffix))
+                if (!rootFork.Node.Metadata.TryGetValue(ManifestEntry.WebsiteIndexDocPathKey, out var indexDocPat))
                     throw new KeyNotFoundException($"Index document can't be found");
 
-                path += suffix;
+                path = indexDocPat;
             }
 
             // Find the child fork.
@@ -124,26 +132,29 @@ public async Task<IReadOnlyDictionary<string, string>> GetResourceMetadataAsync(
             return await fork.Node.GetResourceMetadataAsync(childSubPath).ConfigureAwait(false);
         }
 
-        public async Task<SwarmHash> ResolveResourceHashAsync(string path)
+        public async Task<SwarmChunkReference> ResolveChunkReferenceAsync(string path)
         {
             ArgumentNullException.ThrowIfNull(path, nameof(path));
 
             // If the path is empty
-            if (path.Length == 0)
+            if (path.Length == 0 || path == SwarmAddress.Separator.ToString())
             {
                 //if entry is not null, return it
                 if (EntryHash.HasValue && EntryHash != SwarmHash.Zero)
-                    return EntryHash.Value;
+                    return new SwarmChunkReference(
+                        EntryHash.Value,
+                        EntryEncryptionKey,
+                        EntryUseRecursiveEncryption);
                 
                 //try to lookup for index document suffix
                 if (!_forks.TryGetValue(SwarmAddress.Separator, out var rootFork) ||
                     rootFork.Prefix != SwarmAddress.Separator.ToString())
                     throw new KeyNotFoundException($"Final path {path} can't be found");
                 
-                if (!rootFork.Node.Metadata.TryGetValue(ManifestEntry.WebsiteIndexDocPathKey, out var suffix))
+                if (!rootFork.Node.Metadata.TryGetValue(ManifestEntry.WebsiteIndexDocPathKey, out var indexDocPat))
                     throw new KeyNotFoundException($"Index document can't be found");
 
-                path += suffix;
+                path = indexDocPat;
             }
             
             // Find the child fork.
@@ -154,7 +165,7 @@ public async Task<SwarmHash> ResolveResourceHashAsync(string path)
             if (!fork.Node.IsDecoded)
                 await fork.Node.DecodeFromChunkAsync().ConfigureAwait(false);
 
-            return await fork.Node.ResolveResourceHashAsync(path[fork.Prefix.Length..]).ConfigureAwait(false);
+            return await fork.Node.ResolveChunkReferenceAsync(path[fork.Prefix.Length..]).ConfigureAwait(false);
         }
         
         // Helpers.
diff --git a/src/BeeNet.Util/Services/CalculatorService.cs b/src/BeeNet.Util/Services/CalculatorService.cs
index fd799e9..cf734b6 100644
--- a/src/BeeNet.Util/Services/CalculatorService.cs
+++ b/src/BeeNet.Util/Services/CalculatorService.cs
@@ -32,9 +32,11 @@ public async Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
             string directoryPath,
             string? indexFilename = null,
             string? errorFilename = null,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null, 
             IChunkStore? chunkStore = null)
         {
             // Checks.
@@ -51,6 +53,8 @@ public async Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
                 new FakeSigner(),
                 postageStampIssuer,
                 new MemoryStampStore());
+
+            long totalMissedOptimisticHashing = 0;
             
             // Try set index document.
             if (indexFilename is null &&
@@ -63,7 +67,9 @@ public async Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
                     chunkStore,
                     postageStamper,
                     redundancyLevel,
-                    encrypt),
+                    encrypt,
+                    0,
+                    chunkCuncorrency),
                 encrypt);
 
             // Iterate through the files in the supplied directory.
@@ -77,22 +83,33 @@ public async Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
                     chunkStore,
                     postageStamper,
                     redundancyLevel,
-                    encrypt);
+                    encrypt,
+                    compactLevel,
+                    chunkCuncorrency);
                 
                 var fileContentType = FileContentTypeProvider.GetContentType(file);
                 var fileName = Path.GetFileName(file);
                 using var fileStream = File.OpenRead(file);
 
-                var fileHash = await fileHasherPipeline.HashDataAsync(fileStream).ConfigureAwait(false);
+                var fileHashingResult = await fileHasherPipeline.HashDataAsync(fileStream).ConfigureAwait(false);
+                totalMissedOptimisticHashing += fileHasherPipeline.MissedOptimisticHashing;
                 
                 // Add file entry to dir manifest.
+                var fileEntryMetadata = new Dictionary<string, string>
+                {
+                    [ManifestEntry.ContentTypeKey] = fileContentType,
+                    [ManifestEntry.FilenameKey] = fileName
+                };
+                if (fileHashingResult.EncryptionKey != null)
+                    fileEntryMetadata.Add(ManifestEntry.ChunkEncryptKeyKey, fileHashingResult.EncryptionKey.ToString());
+                if (compactLevel > 0)
+                    fileEntryMetadata.Add(ManifestEntry.UseRecursiveEncryptionKey, true.ToString());
+                
                 dirManifest.Add(
                     Path.GetRelativePath(directoryPath, file),
-                    ManifestEntry.NewFile(fileHash, new Dictionary<string, string>
-                    {
-                        [ManifestEntry.ContentTypeKey] = fileContentType,
-                        [ManifestEntry.FilenameKey] = fileName
-                    }));
+                    ManifestEntry.NewFile(
+                        fileHashingResult.Hash,
+                        fileEntryMetadata));
             }
             
             // Store website information.
@@ -111,11 +128,12 @@ public async Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
             }
 
             // Get manifest hash.
-            var manifestHash = await dirManifest.GetHashAsync().ConfigureAwait(false);
+            var chunkHashingResult = await dirManifest.GetHashAsync().ConfigureAwait(false);
             
             // Return result.
             return new UploadEvaluationResult(
-                manifestHash,
+                chunkHashingResult.Hash,
+                totalMissedOptimisticHashing,
                 postageStampIssuer);
         }
 
@@ -123,9 +141,11 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
             byte[] data,
             string fileContentType,
             string? fileName,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null, 
             IChunkStore? chunkStore = null)
         {
             using var stream = new MemoryStream(data);
@@ -133,9 +153,11 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
                 stream,
                 fileContentType,
                 fileName,
+                compactLevel,
                 encrypt,
                 redundancyLevel,
                 postageStampIssuer,
+                chunkCuncorrency,
                 chunkStore).ConfigureAwait(false);
         }
 
@@ -143,9 +165,11 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
             Stream stream,
             string fileContentType,
             string? fileName,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null, 
             IChunkStore? chunkStore = null)
         {
             chunkStore ??= new FakeChunkStore();
@@ -161,9 +185,11 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
                 chunkStore,
                 postageStamper,
                 redundancyLevel,
-                encrypt);
-            var fileHash = await fileHasherPipeline.HashDataAsync(stream).ConfigureAwait(false);
-            fileName ??= fileHash.ToString(); //if missing, set file name with its address
+                encrypt,
+                compactLevel,
+                chunkCuncorrency);
+            var fileHashingResult = await fileHasherPipeline.HashDataAsync(stream).ConfigureAwait(false);
+            fileName ??= fileHashingResult.Hash.ToString(); //if missing, set file name with its address
             
             // Create manifest.
             var manifest = new MantarayManifest(
@@ -171,7 +197,9 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
                     chunkStore,
                     postageStamper,
                     redundancyLevel,
-                    encrypt),
+                    encrypt,
+                    0,
+                    chunkCuncorrency),
                 encrypt);
 
             manifest.Add(
@@ -181,22 +209,29 @@ public async Task<UploadEvaluationResult> EvaluateFileUploadAsync(
                     {
                         [ManifestEntry.WebsiteIndexDocPathKey] = fileName,
                     }));
+
+            var fileEntryMetadata = new Dictionary<string, string>
+            {
+                [ManifestEntry.ContentTypeKey] = fileContentType,
+                [ManifestEntry.FilenameKey] = fileName
+            };
+            if (fileHashingResult.EncryptionKey != null)
+                fileEntryMetadata.Add(ManifestEntry.ChunkEncryptKeyKey, fileHashingResult.EncryptionKey.ToString());
+            if (compactLevel > 0)
+                fileEntryMetadata.Add(ManifestEntry.UseRecursiveEncryptionKey, true.ToString());
             
             manifest.Add(
                 fileName,
                 ManifestEntry.NewFile(
-                    fileHash,
-                    new Dictionary<string, string>
-                    {
-                        [ManifestEntry.ContentTypeKey] = fileContentType,
-                        [ManifestEntry.FilenameKey] = fileName
-                    }));
+                    fileHashingResult.Hash,
+                    fileEntryMetadata));
 
-            var manifestHash = await manifest.GetHashAsync().ConfigureAwait(false);
+            var chunkHashingResult = await manifest.GetHashAsync().ConfigureAwait(false);
             
             // Return result.
             return new UploadEvaluationResult(
-                manifestHash,
+                chunkHashingResult.Hash,
+                fileHasherPipeline.MissedOptimisticHashing,
                 postageStampIssuer);
         }
 
@@ -224,10 +259,10 @@ public async Task<Stream> GetFileStreamFromChunksAsync(
                 chunkStore,
                 address.Hash);
             
-            var resourceHash = await rootManifest.ResolveResourceHashAsync(address).ConfigureAwait(false);
+            var chunkReference = await rootManifest.ResolveAddressToChunkReferenceAsync(address).ConfigureAwait(false);
             
             var memoryStream = new MemoryStream();
-            var resourceData = await chunkJoiner.GetJoinedChunkDataAsync(resourceHash).ConfigureAwait(false);
+            var resourceData = await chunkJoiner.GetJoinedChunkDataAsync(chunkReference).ConfigureAwait(false);
             memoryStream.Write(resourceData.ToArray());
             memoryStream.Position = 0;
             
@@ -238,24 +273,30 @@ public Task<SwarmHash> WriteDataChunksAsync(
             byte[] data,
             string outputDirectory,
             bool createDirectory = true,
+            ushort compactLevel = 0,
             bool encrypt = false,
-            RedundancyLevel redundancyLevel = RedundancyLevel.None)
+            RedundancyLevel redundancyLevel = RedundancyLevel.None,
+            int? chunkCuncorrency = null)
         {
             using var stream = new MemoryStream(data);
             return WriteDataChunksAsync(
                 stream,
                 outputDirectory,
                 createDirectory,
+                compactLevel,
                 encrypt,
-                redundancyLevel);
+                redundancyLevel,
+                chunkCuncorrency);
         }
 
         public async Task<SwarmHash> WriteDataChunksAsync(
             Stream stream,
             string outputDirectory,
             bool createDirectory = true,
+            ushort compactLevel = 0,
             bool encrypt = false,
-            RedundancyLevel redundancyLevel = RedundancyLevel.None)
+            RedundancyLevel redundancyLevel = RedundancyLevel.None,
+            int? chunkCuncorrency = null)
         {
             var chunkStore = new LocalDirectoryChunkStore(outputDirectory, createDirectory);
             
@@ -264,11 +305,13 @@ public async Task<SwarmHash> WriteDataChunksAsync(
                 chunkStore,
                 new FakePostageStamper(),
                 redundancyLevel,
-                encrypt);
-            var fileHash = await fileHasherPipeline.HashDataAsync(stream).ConfigureAwait(false);
+                encrypt,
+                compactLevel,
+                chunkCuncorrency);
+            var fileHashingResult = await fileHasherPipeline.HashDataAsync(stream).ConfigureAwait(false);
             
             // Return file hash.
-            return fileHash;
+            return fileHashingResult.Hash;
         }
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Services/ICalculatorService.cs b/src/BeeNet.Util/Services/ICalculatorService.cs
index e83595e..1ccd757 100644
--- a/src/BeeNet.Util/Services/ICalculatorService.cs
+++ b/src/BeeNet.Util/Services/ICalculatorService.cs
@@ -29,18 +29,22 @@ public interface ICalculatorService
         /// <param name="directoryPath">The directory to upload</param>
         /// <param name="indexFilename">The index default file</param>
         /// <param name="errorFilename">The error default file</param>
+        /// <param name="compactLevel">Chunk compact level [0, 65535]</param>
         /// <param name="encrypt">True to encrypt</param>
         /// <param name="redundancyLevel">Choose the redundancy level</param>
         /// <param name="postageStampIssuer">Custom postage stamp issuer</param>
+        /// <param name="chunkCuncorrency">Amount of concurrent chunk hashing tasks. Null to default</param>
         /// <param name="chunkStore">Optional custom chunk store</param>
         /// <returns>The upload evaluation result</returns>
         Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
             string directoryPath,
             string? indexFilename = null,
             string? errorFilename = null,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null,
             IChunkStore? chunkStore = null);
         
         /// <summary>
@@ -49,18 +53,22 @@ Task<UploadEvaluationResult> EvaluateDirectoryUploadAsync(
         /// <param name="data">The file data in byte array</param>
         /// <param name="fileContentType">The file content type</param>
         /// <param name="fileName">The file name</param>
+        /// <param name="compactLevel">Chunk compact level [0, 65535]</param>
         /// <param name="encrypt">True to encrypt</param>
         /// <param name="redundancyLevel">Choose the redundancy level</param>
         /// <param name="postageStampIssuer">Custom postage stamp issuer</param>
+        /// <param name="chunkCuncorrency">Amount of concurrent chunk hashing tasks. Null to default</param>
         /// <param name="chunkStore">Optional custom chunk store</param>
         /// <returns>The upload evaluation result</returns>
         Task<UploadEvaluationResult> EvaluateFileUploadAsync(
             byte[] data,
             string fileContentType,
             string? fileName,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null,
             IChunkStore? chunkStore = null);
         
         /// <summary>
@@ -69,18 +77,22 @@ Task<UploadEvaluationResult> EvaluateFileUploadAsync(
         /// <param name="stream">The file stream</param>
         /// <param name="fileContentType">The file content type</param>
         /// <param name="fileName">The file name</param>
+        /// <param name="compactLevel">Chunk compact level [0, 65535]</param>
         /// <param name="encrypt">True to encrypt</param>
         /// <param name="redundancyLevel">Choose the redundancy level</param>
         /// <param name="postageStampIssuer">Custom postage stamp issuer</param>
+        /// <param name="chunkCuncorrency">Amount of concurrent chunk hashing tasks. Null to default</param>
         /// <param name="chunkStore">Optional custom chunk store</param>
         /// <returns>The upload evaluation result</returns>
         Task<UploadEvaluationResult> EvaluateFileUploadAsync(
             Stream stream,
             string fileContentType,
             string? fileName,
+            ushort compactLevel = 0,
             bool encrypt = false,
             RedundancyLevel redundancyLevel = RedundancyLevel.None,
             IPostageStampIssuer? postageStampIssuer = null,
+            int? chunkCuncorrency = null,
             IChunkStore? chunkStore = null);
 
         /// <summary>
@@ -109,15 +121,19 @@ Task<Stream> GetFileStreamFromChunksAsync(
         /// <param name="data">The data byte array input</param>
         /// <param name="outputDirectory">The output directory path</param>
         /// <param name="createDirectory">If true, create if directory doesn't exist</param>
+        /// <param name="compactLevel">Chunk compact level [0, 65535]</param>
         /// <param name="encrypt">True to encrypt</param>
         /// <param name="redundancyLevel">Choose the redundancy level</param>
+        /// <param name="chunkCuncorrency">Amount of concurrent chunk hashing tasks. Null to default</param>
         /// <returns>The data root hash</returns>
         Task<SwarmHash> WriteDataChunksAsync(
             byte[] data,
             string outputDirectory,
             bool createDirectory = true,
+            ushort compactLevel = 0,
             bool encrypt = false,
-            RedundancyLevel redundancyLevel = RedundancyLevel.None);
+            RedundancyLevel redundancyLevel = RedundancyLevel.None,
+            int? chunkCuncorrency = null);
         
         /// <summary>
         /// Write data chunks on a local directory, without any manifest
@@ -125,14 +141,18 @@ Task<SwarmHash> WriteDataChunksAsync(
         /// <param name="stream">The data stream input</param>
         /// <param name="outputDirectory">The output directory path</param>
         /// <param name="createDirectory">If true, create if directory doesn't exist</param>
+        /// <param name="compactLevel">Chunk compact level [0, 65535]</param>
         /// <param name="encrypt">True to encrypt</param>
         /// <param name="redundancyLevel">Choose the redundancy level</param>
+        /// <param name="chunkCuncorrency">Amount of concurrent chunk hashing tasks. Null to default</param>
         /// <returns>The data root hash</returns>
         Task<SwarmHash> WriteDataChunksAsync(
             Stream stream,
             string outputDirectory,
             bool createDirectory = true,
+            ushort compactLevel = 0,
             bool encrypt = false,
-            RedundancyLevel redundancyLevel = RedundancyLevel.None);
+            RedundancyLevel redundancyLevel = RedundancyLevel.None,
+            int? chunkCuncorrency = null);
     }
 }
\ No newline at end of file
diff --git a/src/BeeNet.Util/Services/UploadEvaluationResult.cs b/src/BeeNet.Util/Services/UploadEvaluationResult.cs
index e61dd9b..715f41c 100644
--- a/src/BeeNet.Util/Services/UploadEvaluationResult.cs
+++ b/src/BeeNet.Util/Services/UploadEvaluationResult.cs
@@ -23,9 +23,11 @@ public class UploadEvaluationResult
         // Constructor.
         internal UploadEvaluationResult(
             SwarmHash hash,
+            long missedOptimisticHashing,
             IPostageStampIssuer postageStampIssuer)
         {
             Hash = hash;
+            MissedOptimisticHashing = missedOptimisticHashing;
             PostageStampIssuer = postageStampIssuer;
         }
 
@@ -34,6 +36,8 @@ internal UploadEvaluationResult(
         /// The upload resulting hash
         /// </summary>
         public SwarmHash Hash { get; }
+        
+        public long MissedOptimisticHashing { get; }
 
         public IPostageStampIssuer PostageStampIssuer { get; }
 
@@ -41,7 +45,7 @@ internal UploadEvaluationResult(
         /// Total batch space consumed in bytes
         /// </summary>
         public long ConsumedSize =>
-            PostageStampIssuer.MaxBucketCount *
+            PostageStampIssuer.Buckets.MaxBucketCollisions *
             (long)Math.Pow(2, PostageBatch.BucketDepth) *
             SwarmChunk.DataSize;
         
@@ -49,19 +53,11 @@ internal UploadEvaluationResult(
         /// Available postage batch space after the upload, with minimum batch depth
         /// </summary>
         public long RemainingPostageBatchSize => RequiredPostageBatchByteSize - ConsumedSize;
-
-        /// <summary>
-        /// Minimum required postage batch depth to handle the upload
-        /// </summary>
-        public int RequiredPostageBatchDepth =>
-            Math.Max(
-                (int)Math.Ceiling(Math.Log2(PostageStampIssuer.MaxBucketCount)) + PostageBatch.BucketDepth,
-                PostageBatch.MinDepth);
         
         /// <summary>
         /// Minimum required postage batch byte size
         /// </summary>
         public long RequiredPostageBatchByteSize =>
-            (long)(Math.Pow(2, RequiredPostageBatchDepth) * SwarmChunk.DataSize);
+            (long)(Math.Pow(2, PostageStampIssuer.Buckets.RequiredPostageBatchDepth) * SwarmChunk.DataSize);
     }
 }
\ No newline at end of file