diff --git a/cache.go b/cache.go index 59166a6..6f4e214 100644 --- a/cache.go +++ b/cache.go @@ -42,8 +42,9 @@ type CacheEntry struct { } type RequestError struct { - Error error - Code int + Error error + Code int + UserInfo interface{} } type CacheDriver struct { @@ -65,10 +66,14 @@ type CacheDriver struct { } func (cache *CacheDriver) MissCount() int { + cache.lock.Lock() + defer cache.lock.Unlock() return cache.missCount } func (cache *CacheDriver) HitCount() int { + cache.lock.Lock() + defer cache.lock.Unlock() return cache.hitCount } @@ -85,9 +90,120 @@ func (cache *CacheDriver) ExtendedCapacity() int { } func (cache *CacheDriver) NumEntries() int { + cache.lock.Lock() + defer cache.lock.Unlock() return cache.numEntries } +// LazyRemove removes the entry with keyVal from the cache. It does not remove the entry immediately, but it marks it as removed. +func (cache *CacheDriver) LazyRemove(keyVal interface{}) error { + + key, err := cache.toMapKey(keyVal) + if err != nil { + return err + } + + cache.lock.Lock() + + if entry, ok := cache.table[key]; ok { + cache.lock.Unlock() + entry.lock.Lock() + defer entry.lock.Unlock() + + if entry.expirationTime.Before(time.Now()) { + return fmt.Errorf("entry expired") + } + + if entry.state != COMPUTING && entry.state != AVAILABLE { + // In this way, when the entry is accessed again, it will be removed + entry.timestamp = time.Now() + entry.expirationTime = entry.timestamp + cache.lock.Lock() + cache.becomeLru(entry) + cache.lock.Unlock() + return nil + } + + if entry.state == AVAILABLE { + return fmt.Errorf("entry is available state") + } + + return fmt.Errorf("entry is computing state") + } + + cache.lock.Unlock() + + return nil +} + +func (cache *CacheDriver) Touch(keyVal interface{}) error { + + key, err := cache.toMapKey(keyVal) + if err != nil { + return err + } + + cache.lock.Lock() + + if entry, ok := cache.table[key]; ok { + cache.lock.Unlock() + entry.lock.Lock() + defer entry.lock.Unlock() + + currentTime := time.Now() + if entry.expirationTime.Before(currentTime) { + return fmt.Errorf("entry expired") + } + + if entry.state != COMPUTING && entry.state != AVAILABLE { + // In this way, when the entry is accessed again, it will be removed + entry.timestamp = currentTime + entry.expirationTime = entry.timestamp + cache.lock.Lock() + cache.becomeMru(entry) + cache.lock.Unlock() + return nil + } + + if entry.state == AVAILABLE { + return fmt.Errorf("entry is available state") + } + + return fmt.Errorf("entry is computing state") + } + + cache.lock.Unlock() + + return nil +} + +// Contains returns true if the cache contains keyVal. It does not update the entry timestamp and consequently it does not change the eviction order. +func (cache *CacheDriver) Contains(keyVal interface{}) (bool, error) { + + key, err := cache.toMapKey(keyVal) + if err != nil { + return false, err + } + + cache.lock.Lock() + + if entry, ok := cache.table[key]; ok { + cache.lock.Unlock() + entry.lock.Lock() + defer entry.lock.Unlock() + + if entry.state != AVAILABLE { + return true, nil + } + + return false, nil + } + + cache.lock.Unlock() + + return false, nil +} + // New Creates a new cache. Parameters are: // // capacity: maximum number of entries that cache can manage without evicting the least recently used @@ -192,17 +308,65 @@ func (cache *CacheDriver) insertAsMru(entry *CacheEntry) { cache.head.next = entry } +func (cache *CacheDriver) insertAsLru(entry *CacheEntry) { + entry.prev = cache.head.prev + entry.next = &cache.head + cache.head.prev.next = entry + cache.head.prev = entry +} + // Auto deletion of lru queue func (entry *CacheEntry) selfDeleteFromLRUList() { entry.prev.next = entry.next entry.next.prev = entry.prev } +func (cache *CacheDriver) isLru(entry *CacheEntry) bool { + return entry.next == &cache.head +} + +func (cache *CacheDriver) isMru(entry *CacheEntry) bool { + return entry.prev == &cache.head +} + +func (cache *CacheDriver) isKeyLru(keyVal interface{}) (bool, error) { + + key, err := cache.toMapKey(keyVal) + if err != nil { + return false, err + } + + if entry, ok := cache.table[key]; ok { + return cache.isLru(entry), nil + } + + return false, nil +} + +func (cache *CacheDriver) isKeyMru(keyVal interface{}) (bool, error) { + + key, err := cache.toMapKey(keyVal) + if err != nil { + return false, err + } + + if entry, ok := cache.table[key]; ok { + return cache.isMru(entry), nil + } + + return false, nil +} + func (cache *CacheDriver) becomeMru(entry *CacheEntry) { entry.selfDeleteFromLRUList() cache.insertAsMru(entry) } +func (cache *CacheDriver) becomeLru(entry *CacheEntry) { + entry.selfDeleteFromLRUList() + cache.insertAsLru(entry) +} + // Rewove the last item in the list (lru); mutex must be taken. The entry becomes AVAILABLE func (cache *CacheDriver) evictLruEntry() (*CacheEntry, error) { entry := cache.head.prev // <-- LRU entry diff --git a/cache_test.go b/cache_test.go index c461ad8..2b5bed3 100644 --- a/cache_test.go +++ b/cache_test.go @@ -573,3 +573,76 @@ func TestConcurrencyAndCompress(t *testing.T) { wg.Wait() } } + +func TestCacheDriver_LazyRemove(t *testing.T) { + + cache, tbl := createCacheWithCapEntriesInside() + N := len(tbl) + requests := make([]*RequestEntry, 0, N) + for req := range tbl { + requests = append(requests, req) + } + + var lastRequest *RequestEntry + for i := 0; i < 100; i++ { + i := rand.Intn(N) + req := requests[i] + lastRequest = req + _, _ = cache.RetrieveFromCacheOrCompute(req, "Req", "UReq") + isMru, err := cache.isKeyMru(req) + assert.Nil(t, err) + assert.True(t, isMru) + } + + err := cache.LazyRemove(lastRequest) + assert.Nil(t, err) + assert.False(t, cache.has(lastRequest)) +} + +func TestCacheDriver_Contains(t *testing.T) { + + cache, tbl := createCacheWithCapEntriesInside() + N := len(tbl) + requests := make([]*RequestEntry, 0, N) + for req := range tbl { + requests = append(requests, req) + } + + for i := 0; i < N; i++ { + req := requests[i] + _, _ = cache.RetrieveFromCacheOrCompute(req, "Req", "UReq") + } + + for i := 0; i < N; i++ { + req := requests[i] + ok, err := cache.Contains(req) + assert.Nil(t, err) + assert.True(t, ok) + } +} + +func TestCacheDriver_Touch(t *testing.T) { + + cache, tbl := createCacheWithCapEntriesInside() + N := len(tbl) + requests := make([]*RequestEntry, 0, N) + for req := range tbl { + requests = append(requests, req) + } + + for i := 0; i < N; i++ { + req := requests[i] + _, _ = cache.RetrieveFromCacheOrCompute(req, "Req", "UReq") + } + + for i := 0; i < N; i++ { + req := requests[i] + err := cache.Touch(req) + assert.Nil(t, err) + + mru, err := cache.isKeyMru(req) + + assert.Nil(t, err) + assert.True(t, mru) + } +}