From 5b3f632ce0a78538ea2b42377e60b58d01e61e0c Mon Sep 17 00:00:00 2001 From: Timofey Koolin Date: Fri, 15 Sep 2023 02:20:27 +0300 Subject: [PATCH] value lru to rwmutex --- go.mod | 2 +- go.sum | 4 +- internal/cache/disk.go | 79 +++--- internal/cache/memory.go | 42 +-- internal/cache/value_lru.go | 111 ++++---- internal/cache/value_lru_test.go | 257 +++++++++++------- vendor/github.com/rekby/safemutex/README.md | 20 ++ vendor/github.com/rekby/safemutex/callback.go | 3 + vendor/github.com/rekby/safemutex/mutex.go | 21 +- .../rekby/safemutex/mutex_allow_pointers.go | 33 --- .../github.com/rekby/safemutex/mutex_base.go | 16 +- .../rekby/safemutex/mutex_try_lock.go | 78 ++++++ vendor/modules.txt | 2 +- 13 files changed, 409 insertions(+), 259 deletions(-) delete mode 100644 vendor/github.com/rekby/safemutex/mutex_allow_pointers.go diff --git a/go.mod b/go.mod index 18604dfa..9de48a7e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/jonboulle/clockwork v0.4.0 github.com/letsencrypt/pebble/v2 v2.4.0 github.com/rekby/fastuuid v0.9.0 - github.com/rekby/safemutex v0.2.0 + github.com/rekby/safemutex v0.2.1 golang.org/x/time v0.3.0 ) diff --git a/go.sum b/go.sum index 1451d75f..b0ac9620 100644 --- a/go.sum +++ b/go.sum @@ -218,8 +218,8 @@ github.com/rekby/fastuuid v0.9.0 h1:iQk8V/AyqSrgQAtKRdqx/CVep+CaKwaSWeerw1yEP3Q= github.com/rekby/fastuuid v0.9.0/go.mod h1:qP8Lh0BH2+4rNGVRDHmDpkvE/ZuLUhjmKpRWjx+WesY= github.com/rekby/fixenv v0.3.1 h1:zOPocbQmcsxSIjiVu5U+9JAfeu6WeLN7a9ryZkGTGJY= github.com/rekby/fixenv v0.3.1/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= -github.com/rekby/safemutex v0.2.0 h1:iEfcPqsR3EApwWHwdHvp+srN9Wfna+IG8bSpN467Jmk= -github.com/rekby/safemutex v0.2.0/go.mod h1:6I/yJdmctX0RmxEp00RzYBJJXl3ona8PsBiIDqg0v+U= +github.com/rekby/safemutex v0.2.1 h1:4PzkGz1dlDpsI1+601WCqxlN2xbiNuQIjYZCOZI+Xp4= +github.com/rekby/safemutex v0.2.1/go.mod h1:6I/yJdmctX0RmxEp00RzYBJJXl3ona8PsBiIDqg0v+U= github.com/rekby/zapcontext v0.0.4 h1:85600nHTteGCLcuOhGp/SzXHymm9QcCA5sn+MPKCodY= github.com/rekby/zapcontext v0.0.4/go.mod h1:lTIxvHAwWXBZBPPfEvmAEXPbVEcTwd52VaASZWZWcxI= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/internal/cache/disk.go b/internal/cache/disk.go index dba10203..3e5eebaa 100644 --- a/internal/cache/disk.go +++ b/internal/cache/disk.go @@ -3,72 +3,73 @@ package cache import ( "context" "github.com/rekby/lets-proxy2/internal/log" + "github.com/rekby/safemutex" + zc "github.com/rekby/zapcontext" + "go.uber.org/zap" "go.uber.org/zap/zapcore" - "io/ioutil" "os" "path/filepath" "strings" - "sync" - - zc "github.com/rekby/zapcontext" - "go.uber.org/zap" ) type DiskCache struct { Dir string - mu sync.RWMutex + mu safemutex.RWMutex[struct{}] } func (c *DiskCache) filepath(key string) string { return filepath.Join(c.Dir, diskCacheSanitizeKey(key)) } -func (c *DiskCache) Get(ctx context.Context, key string) ([]byte, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - filePath := c.filepath(key) - - logLevel := zapcore.DebugLevel - res, err := ioutil.ReadFile(filePath) - if err != nil { - if os.IsNotExist(err) { - err = ErrCacheMiss - } else { - logLevel = zapcore.ErrorLevel +func (c *DiskCache) Get(ctx context.Context, key string) (res []byte, err error) { + c.mu.RLock(func(synced struct{}) { + filePath := c.filepath(key) + + logLevel := zapcore.DebugLevel + res, err = os.ReadFile(filePath) + if err != nil { + if os.IsNotExist(err) { + err = ErrCacheMiss + } else { + logLevel = zapcore.ErrorLevel + } } - } - log.LevelParamCtx(ctx, logLevel, "Got from disk cache", zap.String("dir", c.Dir), zap.String("key", key), - zap.String("file", filePath), zap.Error(err)) + log.LevelParamCtx(ctx, logLevel, "Got from disk cache", zap.String("dir", c.Dir), zap.String("key", key), + zap.String("file", filePath), zap.Error(err)) + }) return res, err } -func (c *DiskCache) Put(ctx context.Context, key string, data []byte) error { - c.mu.Lock() - defer c.mu.Unlock() +func (c *DiskCache) Put(ctx context.Context, key string, data []byte) (err error) { + c.mu.Lock(func(synced struct{}) struct{} { + zc.L(ctx).Debug("Put to disk cache", zap.String("dir", c.Dir), zap.String("key", key)) + err = os.WriteFile(c.filepath(key), data, 0600) + zc.L(ctx).Debug("Put to disk cache result.", zap.String("dir", c.Dir), zap.String("key", key), + zap.Error(err)) + + return synced + }) - zc.L(ctx).Debug("Put to disk cache", zap.String("dir", c.Dir), zap.String("key", key)) - err := ioutil.WriteFile(c.filepath(key), data, 0600) - zc.L(ctx).Debug("Put to disk cache result.", zap.String("dir", c.Dir), zap.String("key", key), - zap.Error(err)) return err } -func (c *DiskCache) Delete(ctx context.Context, key string) error { - c.mu.Lock() - defer c.mu.Unlock() +func (c *DiskCache) Delete(ctx context.Context, key string) (err error) { + c.mu.Lock(func(synced struct{}) struct{} { + zc.L(ctx).Debug("Delete from cache", zap.String("dir", c.Dir), zap.String("key", key)) + err = os.Remove(c.filepath(key)) + zc.L(ctx).Debug("Delete from cache result", zap.String("dir", c.Dir), zap.String("key", key), + zap.Error(err)) + + if os.IsNotExist(err) { + err = nil + } - zc.L(ctx).Debug("Delete from cache", zap.String("dir", c.Dir), zap.String("key", key)) - err := os.Remove(c.filepath(key)) - zc.L(ctx).Debug("Delete from cache result", zap.String("dir", c.Dir), zap.String("key", key), - zap.Error(err)) + return synced + }) - if os.IsNotExist(err) { - err = nil - } return err } diff --git a/internal/cache/memory.go b/internal/cache/memory.go index 2a31fbf1..eee20e43 100644 --- a/internal/cache/memory.go +++ b/internal/cache/memory.go @@ -2,8 +2,7 @@ package cache import ( "context" - "sync" - + "github.com/rekby/safemutex" zc "github.com/rekby/zapcontext" "go.uber.org/zap" ) @@ -11,14 +10,13 @@ import ( type MemoryCache struct { Name string // use for log - mu sync.RWMutex - m map[string][]byte + stateMutex safemutex.RWMutexWithPointers[map[string][]byte] } func NewMemoryCache(name string) *MemoryCache { return &MemoryCache{ - Name: name, - m: make(map[string][]byte), + Name: name, + stateMutex: safemutex.RWNewWithPointers(map[string][]byte{}), } } @@ -28,12 +26,14 @@ func (c *MemoryCache) Get(ctx context.Context, key string) (data []byte, err err zap.String("key", key), zap.Int("data_len", len(data)), zap.Error(err)) }() - c.mu.RLock() - defer c.mu.RUnlock() - if resp, exist := c.m[key]; exist { - return resp, nil - } - return nil, ErrCacheMiss + c.stateMutex.RLock(func(synced map[string][]byte) { + if resp, exist := synced[key]; exist { + data = resp + return + } + err = ErrCacheMiss + }) + return data, err } func (c *MemoryCache) Put(ctx context.Context, key string, data []byte) (err error) { @@ -42,9 +42,13 @@ func (c *MemoryCache) Put(ctx context.Context, key string, data []byte) (err err zap.String("key", key), zap.Int("data_len", len(data)), zap.Error(err)) }() - c.mu.Lock() - defer c.mu.Unlock() - c.m[key] = data + localCopy := make([]byte, len(data)) + copy(localCopy, data) + c.stateMutex.Lock(func(synced map[string][]byte) map[string][]byte { + synced[key] = localCopy + return synced + }) + return nil } @@ -54,10 +58,10 @@ func (c *MemoryCache) Delete(ctx context.Context, key string) (err error) { zap.String("key", key), zap.Error(err)) }() - c.mu.Lock() - defer c.mu.Unlock() - - delete(c.m, key) + c.stateMutex.Lock(func(synced map[string][]byte) map[string][]byte { + delete(synced, key) + return synced + }) return nil } diff --git a/internal/cache/value_lru.go b/internal/cache/value_lru.go index f38c7770..e99698eb 100644 --- a/internal/cache/value_lru.go +++ b/internal/cache/value_lru.go @@ -2,9 +2,9 @@ package cache import ( "context" + "github.com/rekby/safemutex" "math" "sort" - "sync" "sync/atomic" zc "github.com/rekby/zapcontext" @@ -28,14 +28,19 @@ type MemoryValueLRU struct { CleanCount int lastTime uint64 - mu sync.RWMutex - m map[string]*memoryValueLRUItem // stored always non nil item + mu safemutex.RWMutexWithPointers[memoryValueLRUSynced] +} + +type memoryValueLRUSynced struct { + Items map[string]*memoryValueLRUItem // always stored non nil items } func NewMemoryValueLRU(name string) *MemoryValueLRU { return &MemoryValueLRU{ - Name: name, - m: make(map[string]*memoryValueLRUItem, defaultMemoryLimitSize+1), + Name: name, + mu: safemutex.RWNewWithPointers(memoryValueLRUSynced{ + Items: make(map[string]*memoryValueLRUItem, defaultMemoryLimitSize+1), + }), MaxSize: defaultMemoryLimitSize, CleanCount: defaultLRUCleanCount, } @@ -47,14 +52,16 @@ func (c *MemoryValueLRU) Get(ctx context.Context, key string) (value interface{} zap.String("key", key), zap.Reflect("value", value), zap.Error(err)) }() - c.mu.RLock() - defer c.mu.RUnlock() + c.mu.RLock(func(synced memoryValueLRUSynced) { + if resp, exist := synced.Items[key]; exist { + resp.lastUsedTime.Store(c.time()) + value = resp.value + return + } + err = ErrCacheMiss + }) - if resp, exist := c.m[key]; exist { - resp.lastUsedTime.Store(c.time()) - return resp.value, nil - } - return nil, ErrCacheMiss + return value, err } func (c *MemoryValueLRU) Put(ctx context.Context, key string, value interface{}) (err error) { @@ -63,13 +70,16 @@ func (c *MemoryValueLRU) Put(ctx context.Context, key string, value interface{}) zap.String("key", key), zap.Reflect("data_len", value), zap.Error(err)) }() - c.mu.Lock() - c.m[key] = &memoryValueLRUItem{key: key, value: value, lastUsedTime: newUint64Atomic(c.time())} - if len(c.m) > c.MaxSize { - // handlepanic: no external call - go c.clean() - } - c.mu.Unlock() + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items[key] = &memoryValueLRUItem{key: key, value: value, lastUsedTime: newUint64Atomic(c.time())} + if len(synced.Items) > c.MaxSize { + // handlepanic: no external call + go c.clean() + } + + return synced + }) + return nil } @@ -79,10 +89,10 @@ func (c *MemoryValueLRU) Delete(ctx context.Context, key string) (err error) { zap.String("key", key), zap.Error(err)) }() - c.mu.Lock() - defer c.mu.Unlock() - - delete(c.m, key) + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + delete(synced.Items, key) + return synced + }) return nil } @@ -97,20 +107,19 @@ func (c *MemoryValueLRU) time() uint64 { } func (c *MemoryValueLRU) renumberTime() { - c.mu.Lock() - - items := c.getSortedItems() - for i, item := range items { - item.lastUsedTime.Store(uint64(i)) - } - - c.mu.Unlock() + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + items := c.getSortedItems(&synced) + for i, item := range items { + item.lastUsedTime.Store(uint64(i)) + } + return synced + }) } // must called from locked state -func (c *MemoryValueLRU) getSortedItems() []*memoryValueLRUItem { - items := make([]*memoryValueLRUItem, 0, len(c.m)) - for _, item := range c.m { +func (c *MemoryValueLRU) getSortedItems(synced *memoryValueLRUSynced) []*memoryValueLRUItem { + items := make([]*memoryValueLRUItem, 0, len(synced.Items)) + for _, item := range synced.Items { items = append(items, item) } @@ -121,28 +130,30 @@ func (c *MemoryValueLRU) getSortedItems() []*memoryValueLRUItem { } func (c *MemoryValueLRU) clean() { - c.mu.Lock() - defer c.mu.Unlock() + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + if c.CleanCount == 0 { + return synced + } - if c.CleanCount == 0 { - return - } + if len(synced.Items) <= c.MaxSize { + return synced + } - if len(c.m) <= c.MaxSize { - return - } + if c.CleanCount >= c.MaxSize { + synced.Items = make(map[string]*memoryValueLRUItem, c.MaxSize+1) + return synced + } - if c.CleanCount >= c.MaxSize { - c.m = make(map[string]*memoryValueLRUItem, c.MaxSize+1) - return - } + items := c.getSortedItems(&synced) - items := c.getSortedItems() + for i := 0; i < c.CleanCount; i++ { + delete(synced.Items, items[i].key) + } - for i := 0; i < c.CleanCount; i++ { - delete(c.m, items[i].key) - } + return synced + }) } + func newUint64Atomic(val uint64) atomic.Uint64 { var v atomic.Uint64 v.Store(val) diff --git a/internal/cache/value_lru_test.go b/internal/cache/value_lru_test.go index b47a6bb3..f87ed4ae 100644 --- a/internal/cache/value_lru_test.go +++ b/internal/cache/value_lru_test.go @@ -115,123 +115,175 @@ func TestValueLRULimitClean(t *testing.T) { c.MaxSize = 5 c.CleanCount = 0 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 6) - td.CmpDeeply(c.m["1"].value, 1) - td.CmpDeeply(c.m["2"].value, 2) - td.CmpDeeply(c.m["3"].value, 3) - td.CmpDeeply(c.m["4"].value, 4) - td.CmpDeeply(c.m["5"].value, 5) - td.CmpDeeply(c.m["6"].value, 6) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 6) + td.CmpDeeply(synced.Items["1"].value, 1) + td.CmpDeeply(synced.Items["2"].value, 2) + td.CmpDeeply(synced.Items["3"].value, 3) + td.CmpDeeply(synced.Items["4"].value, 4) + td.CmpDeeply(synced.Items["5"].value, 5) + td.CmpDeeply(synced.Items["6"].value, 6) + }) c.MaxSize = 5 c.CleanCount = 3 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 5) - td.CmpDeeply(c.m["1"].value, 1) - td.CmpDeeply(c.m["2"].value, 2) - td.CmpDeeply(c.m["3"].value, 3) - td.CmpDeeply(c.m["4"].value, 4) - td.CmpDeeply(c.m["5"].value, 5) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 5) + td.CmpDeeply(synced.Items["1"].value, 1) + td.CmpDeeply(synced.Items["2"].value, 2) + td.CmpDeeply(synced.Items["3"].value, 3) + td.CmpDeeply(synced.Items["4"].value, 4) + td.CmpDeeply(synced.Items["5"].value, 5) + }) c.MaxSize = 5 c.CleanCount = 2 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 4) - td.Nil(c.m["1"]) - td.Nil(c.m["2"]) - td.CmpDeeply(c.m["3"].value, 3) - td.CmpDeeply(c.m["4"].value, 4) - td.CmpDeeply(c.m["5"].value, 5) - td.CmpDeeply(c.m["6"].value, 6) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 4) + td.Nil(synced.Items["1"]) + td.Nil(synced.Items["2"]) + td.CmpDeeply(synced.Items["3"].value, 3) + td.CmpDeeply(synced.Items["4"].value, 4) + td.CmpDeeply(synced.Items["5"].value, 5) + td.CmpDeeply(synced.Items["6"].value, 6) + + }) // reverse c.MaxSize = 5 c.CleanCount = 2 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(6)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(5)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(4)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(3)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(2)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(1)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(6)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(5)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(4)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(3)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(2)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(1)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 4) - td.CmpDeeply(c.m["1"].value, 1) - td.CmpDeeply(c.m["2"].value, 2) - td.CmpDeeply(c.m["3"].value, 3) - td.CmpDeeply(c.m["4"].value, 4) - td.Nil(c.m["5"]) - td.Nil(c.m["6"]) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 4) + td.CmpDeeply(synced.Items["1"].value, 1) + td.CmpDeeply(synced.Items["2"].value, 2) + td.CmpDeeply(synced.Items["3"].value, 3) + td.CmpDeeply(synced.Items["4"].value, 4) + td.Nil(synced.Items["5"]) + td.Nil(synced.Items["6"]) + }) c.MaxSize = 5 c.CleanCount = 5 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 0) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 0) + }) c.MaxSize = 5 c.CleanCount = 6 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + return synced + }) + c.clean() - td.CmpDeeply(len(c.m), 0) + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 0) + }) // update used time on get c.MaxSize = 5 c.CleanCount = 3 - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} - c.m["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(1)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(2)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(3)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(4)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(5)} + synced.Items["6"] = &memoryValueLRUItem{key: "6", value: 6, lastUsedTime: newUint64Atomic(6)} + return synced + }) + _, _ = c.Get(ctx, "6") _, _ = c.Get(ctx, "2") _, _ = c.Get(ctx, "3") _, _ = c.Get(ctx, "5") _, _ = c.Get(ctx, "1") _, _ = c.Get(ctx, "4") + c.clean() - td.CmpDeeply(len(c.m), 3) - td.Nil(c.m["6"]) - td.Nil(c.m["2"]) - td.Nil(c.m["3"]) - td.CmpDeeply(c.m["5"].value, 5) - td.CmpDeeply(c.m["1"].value, 1) - td.CmpDeeply(c.m["4"].value, 4) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 3) + td.Nil(synced.Items["6"]) + td.Nil(synced.Items["2"]) + td.Nil(synced.Items["3"]) + td.CmpDeeply(synced.Items["5"].value, 5) + td.CmpDeeply(synced.Items["1"].value, 1) + td.CmpDeeply(synced.Items["4"].value, 4) + }) } func TestLimitValueRenumberItems(t *testing.T) { @@ -241,24 +293,29 @@ func TestLimitValueRenumberItems(t *testing.T) { td := testdeep.NewT(t) var c = NewMemoryValueLRU("test") - c.m = make(map[string]*memoryValueLRUItem) - c.m["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(100)} - c.m["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(200)} - c.m["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(300)} - c.m["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(400)} - c.m["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(500)} + c.mu.Lock(func(synced memoryValueLRUSynced) memoryValueLRUSynced { + synced.Items = make(map[string]*memoryValueLRUItem) + synced.Items["1"] = &memoryValueLRUItem{key: "1", value: 1, lastUsedTime: newUint64Atomic(100)} + synced.Items["2"] = &memoryValueLRUItem{key: "2", value: 2, lastUsedTime: newUint64Atomic(200)} + synced.Items["3"] = &memoryValueLRUItem{key: "3", value: 3, lastUsedTime: newUint64Atomic(300)} + synced.Items["4"] = &memoryValueLRUItem{key: "4", value: 4, lastUsedTime: newUint64Atomic(400)} + synced.Items["5"] = &memoryValueLRUItem{key: "5", value: 5, lastUsedTime: newUint64Atomic(500)} + return synced + }) c.lastTime = math.MaxUint64/2 - 1 _ = c.Put(ctx, "6", 6) time.Sleep(time.Millisecond * 10) - td.CmpDeeply(len(c.m), 6) - - c.mu.RLock() - defer c.mu.RLock() - td.CmpDeeply(c.m["1"].lastUsedTime.Load(), uint64(0)) - td.CmpDeeply(c.m["2"].lastUsedTime.Load(), uint64(1)) - td.CmpDeeply(c.m["3"].lastUsedTime.Load(), uint64(2)) - td.CmpDeeply(c.m["4"].lastUsedTime.Load(), uint64(3)) - td.CmpDeeply(c.m["5"].lastUsedTime.Load(), uint64(4)) - td.CmpDeeply(c.m["6"].lastUsedTime.Load(), uint64(5)) + + c.mu.RLock(func(synced memoryValueLRUSynced) { + td.CmpDeeply(len(synced.Items), 6) + + td.CmpDeeply(synced.Items["1"].lastUsedTime.Load(), uint64(0)) + td.CmpDeeply(synced.Items["2"].lastUsedTime.Load(), uint64(1)) + td.CmpDeeply(synced.Items["3"].lastUsedTime.Load(), uint64(2)) + td.CmpDeeply(synced.Items["4"].lastUsedTime.Load(), uint64(3)) + td.CmpDeeply(synced.Items["5"].lastUsedTime.Load(), uint64(4)) + td.CmpDeeply(synced.Items["6"].lastUsedTime.Load(), uint64(5)) + }) + } diff --git a/vendor/github.com/rekby/safemutex/README.md b/vendor/github.com/rekby/safemutex/README.md index edf6ac03..57aad1c7 100644 --- a/vendor/github.com/rekby/safemutex/README.md +++ b/vendor/github.com/rekby/safemutex/README.md @@ -41,3 +41,23 @@ func main() { }) } ``` + + +# Benchmark result + +Safe mutexes are optimized for performance and give an additional load within the measurement error. + +``` +BenchmarkSyncMutexLock +BenchmarkSyncMutexLock-10 88132146 13.57 ns/op 0 B/op 0 allocs/op +BenchmarkSafeMutexLock +BenchmarkSafeMutexLock-10 88716652 13.56 ns/op 0 B/op 0 allocs/op +BenchmarkSafeMutexWithPointersLock +BenchmarkSafeMutexWithPointersLock-10 87819339 13.64 ns/op 0 B/op 0 allocs/op +BenchmarkSyncRWMutexLock +BenchmarkSyncRWMutexLock-10 64879916 18.52 ns/op 0 B/op 0 allocs/op +BenchmarkSafeRWMutexLock +BenchmarkSafeRWMutexLock-10 64612960 18.50 ns/op 0 B/op 0 allocs/op +BenchmarkSafeRWMutexWithPointersLock +BenchmarkSafeRWMutexWithPointersLock-10 64686685 18.58 ns/op 0 B/op 0 allocs/op +``` diff --git a/vendor/github.com/rekby/safemutex/callback.go b/vendor/github.com/rekby/safemutex/callback.go index 14904885..965340e6 100644 --- a/vendor/github.com/rekby/safemutex/callback.go +++ b/vendor/github.com/rekby/safemutex/callback.go @@ -1,4 +1,7 @@ package safemutex +// ReadCallback receive current value, saved in mutex +type ReadCallback[T any] func(synced T) + // ReadWriteCallback receive current value, saved in mutex and return new value type ReadWriteCallback[T any] func(synced T) T diff --git a/vendor/github.com/rekby/safemutex/mutex.go b/vendor/github.com/rekby/safemutex/mutex.go index bbee8048..c82b9d6c 100644 --- a/vendor/github.com/rekby/safemutex/mutex.go +++ b/vendor/github.com/rekby/safemutex/mutex.go @@ -2,6 +2,7 @@ package safemutex import ( "reflect" + "sync" ) // Mutex contains guarded value inside, access to value allowed inside callbacks only @@ -10,15 +11,16 @@ import ( // Mutex deny to save value with any type of pointers, which allow accidentally change internal state. // it will panic if T contains any pointer. type Mutex[T any] struct { - mutexBase[T] - initialized bool + mutexBase[T, sync.Mutex] + initOnce sync.Once + initialized bool // for tests only } // New create Mutex with initial value and default options. // New call internal checks for T and panic if checks failed, see MutexOptions for details func New[T any](value T) Mutex[T] { res := Mutex[T]{ - mutexBase: mutexBase[T]{ + mutexBase: mutexBase[T, sync.Mutex]{ value: value, }, } @@ -32,7 +34,7 @@ func New[T any](value T) Mutex[T] { // Lock - call f within locked mutex. // it will panic if value type not pass internal checks -// it will panic with ErrPoisoned if previous call exited without return value: +// it will panic with ErrPoisoned if previous locked call exited without return value: // with panic or runtime.Goexit() func (m *Mutex[T]) Lock(f ReadWriteCallback[T]) { m.m.Lock() @@ -45,15 +47,14 @@ func (m *Mutex[T]) Lock(f ReadWriteCallback[T]) { func (m *Mutex[T]) validateLocked() { m.baseValidateLocked() - if m.initialized { - return - } + m.initOnce.Do(m.initLocked) +} - // check pointers +func (m *Mutex[T]) initLocked() { if checkTypeCanContainPointers(reflect.TypeOf(m.value)) { - panic(errContainPointers) + m.errWrap.err = errContainPointers + panic(m.errWrap) } - m.initialized = true } diff --git a/vendor/github.com/rekby/safemutex/mutex_allow_pointers.go b/vendor/github.com/rekby/safemutex/mutex_allow_pointers.go deleted file mode 100644 index e841ddb2..00000000 --- a/vendor/github.com/rekby/safemutex/mutex_allow_pointers.go +++ /dev/null @@ -1,33 +0,0 @@ -package safemutex - -// MutexWithPointers contains guarded value inside, access to value allowed inside callbacks only -// it allow to guarantee not access to the value without lock the mutex -// zero value is usable as mutex with default options and zero value of guarded type -type MutexWithPointers[T any] struct { - mutexBase[T] -} - -// NewWithPointers create Mutex with initial value and default options. -// NewWithPointers call internal checks for T and panic if checks failed, see MutexOptions for details -func NewWithPointers[T any](value T) MutexWithPointers[T] { - res := MutexWithPointers[T]{ - mutexBase: mutexBase[T]{ - value: value, - }, - } - - //nolint:govet - //goland:noinspection GoVetCopyLock - return res -} - -// Lock - call f within locked mutex. -// it will panic with ErrPoisoned if previous call exited without return value: -// with panic or runtime.Goexit() -func (m *MutexWithPointers[T]) Lock(f ReadWriteCallback[T]) { - m.m.Lock() - defer m.m.Unlock() - - m.baseValidateLocked() - m.callLocked(f) -} diff --git a/vendor/github.com/rekby/safemutex/mutex_base.go b/vendor/github.com/rekby/safemutex/mutex_base.go index 3be55f6a..d4f92417 100644 --- a/vendor/github.com/rekby/safemutex/mutex_base.go +++ b/vendor/github.com/rekby/safemutex/mutex_base.go @@ -2,20 +2,28 @@ package safemutex import "sync" -type mutexBase[T any] struct { - m sync.Mutex +type mutexVariant interface { + sync.Mutex | sync.RWMutex +} + +type mutexBase[T any, M mutexVariant] struct { + m M value T errWrap errWrap } -func (m *mutexBase[T]) baseValidateLocked() { +func (m *mutexBase[T, M]) baseValidateLocked() { if m.errWrap.err != nil { panic(m.errWrap) } } -func (m *mutexBase[T]) callLocked(f ReadWriteCallback[T]) { +func (m *mutexBase[T, M]) callLocked(f ReadWriteCallback[T]) { m.errWrap.err = ErrPoisoned m.value = f(m.value) m.errWrap.err = nil } + +func (m *mutexBase[T, M]) callReadLocked(f ReadCallback[T]) { + f(m.value) +} diff --git a/vendor/github.com/rekby/safemutex/mutex_try_lock.go b/vendor/github.com/rekby/safemutex/mutex_try_lock.go index be8fccf2..50d499dd 100644 --- a/vendor/github.com/rekby/safemutex/mutex_try_lock.go +++ b/vendor/github.com/rekby/safemutex/mutex_try_lock.go @@ -23,6 +23,46 @@ func (m *Mutex[T]) TryLock(f ReadWriteCallback[T]) bool { return true } +// TryLock - call f within locked mutex if locked successfully. +// returned true if locked successfully +// return true if mutex already locked +// it will panic if value type not pass internal checks +// it will panic with ErrPoisoned if locked successfully and previous call exited without return value: +// with panic or runtime.Goexit() +// +// Available since go 1.19 only +func (m *RWMutex[T]) TryLock(f ReadWriteCallback[T]) bool { + locked := m.m.TryLock() + if !locked { + return false + } + defer m.m.Unlock() + + m.validateLocked() + m.callLocked(f) + return true +} + +// TryRLock - call f within read locked mutex if locked successfully. +// returned true if locked successfully +// return true if mutex already locked +// it will panic if value type not pass internal checks +// it will panic with ErrPoisoned if locked successfully and previous call exited without return value: +// with panic or runtime.Goexit() +// +// Available since go 1.19 only +func (m *RWMutex[T]) TryRLock(f ReadCallback[T]) bool { + locked := m.m.TryRLock() + if !locked { + return false + } + defer m.m.RUnlock() + + m.validateLocked() + m.callReadLocked(f) + return true +} + // TryLock - call f within locked mutex if locked successfully. // returned true if locked successfully // return true if mutex already locked @@ -41,3 +81,41 @@ func (m *MutexWithPointers[T]) TryLock(f ReadWriteCallback[T]) bool { m.callLocked(f) return true } + +// TryLock - call f within locked mutex if locked successfully. +// returned true if locked successfully +// return true if mutex already locked +// it will panic with ErrPoisoned if locked successfully and previous call exited without return value: +// with panic or runtime.Goexit() +// +// Available since go 1.19 only +func (m *RWMutexWithPointers[T]) TryLock(f ReadWriteCallback[T]) bool { + locked := m.m.TryLock() + if !locked { + return false + } + defer m.m.Unlock() + + m.baseValidateLocked() + m.callLocked(f) + return true +} + +// TryRLock - call f within read locked mutex if locked successfully. +// returned true if locked successfully +// return true if mutex already locked +// it will panic with ErrPoisoned if locked successfully and previous call exited without return value: +// with panic or runtime.Goexit() +// +// Available since go 1.19 only +func (m *RWMutexWithPointers[T]) TryRLock(f ReadCallback[T]) bool { + locked := m.m.TryRLock() + if !locked { + return false + } + defer m.m.RUnlock() + + m.baseValidateLocked() + m.callReadLocked(f) + return true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 47c59ffa..4bf58a9d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -139,7 +139,7 @@ github.com/rekby/fastuuid/internal/ibytes # github.com/rekby/fixenv v0.3.1 ## explicit; go 1.18 github.com/rekby/fixenv -# github.com/rekby/safemutex v0.2.0 +# github.com/rekby/safemutex v0.2.1 ## explicit; go 1.18 github.com/rekby/safemutex # github.com/rekby/zapcontext v0.0.4