diff --git a/LICENSE b/LICENSE index b70b651..2c247fa 100644 --- a/LICENSE +++ b/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file +THE SOFTWARE. diff --git a/README.md b/README.md index e9129b1..b59b5a8 100644 --- a/README.md +++ b/README.md @@ -58,14 +58,14 @@ type KeyInfo struct { } ``` -### FlushMru(), FlushMfu(), FlushAll() +### FlushMRU(), FlushMFU(), FlushAll() ```go -err := c.FlushMru() -err := c.FlushMfu() +err := c.FlushMRU() +err := c.FlushMFU() err := c.FlushAll() ``` -Flush commands flush all keys from the respective cache. `FlushAll` is faster than combining `FlushMru` and `FlushMfu`. +Flush commands flush all keys from the respective cache. `FlushAll` is faster than combining `FlushMRU` and `FlushMFU`. ### Pause(), Resume() ```go @@ -73,7 +73,14 @@ c.Pause() c.Resume() ``` -Pause and Resume allow auto evictions to be suspended and resumed, respectively. If eviction logging is enabled and evictions are paused, bicache will log accordingly. +Pause and Resume allow auto evictions to be suspended and resumed, respectively. If eviction logging is enabled and evictions are paused, bicache will log accordingly. + +### Close() +```go +c.Close() +``` + +Close should be called when a \*Bicache is done being used, before removing any references to it, to ensure any background tasks have returned and that it can be cleanly garbage collected. ### Stats() ```go @@ -84,10 +91,10 @@ Returns a \*bicache.Stats. ```go type Stats struct { - MfuSize uint // Number of acive MFU keys. - MruSize uint // Number of active MRU keys. - MfuUsedP uint // MFU used in percent. - MruUsedP uint // MRU used in percent. + MFUSize uint // Number of acive MFU keys. + MRUSize uint // Number of active MRU keys. + MFUUsedP uint // MFU used in percent. + MRUUsedP uint // MRU used in percent. Hits uint64 // Cache hits. Misses uint64 // Cache misses. Evictions uint64 // Cache evictions. @@ -102,7 +109,7 @@ j, _ := json.Marshal(stats) fmt.Prinln(string(j)) ``` ``` -{"MfuSize":0,"MruSize":3,"MfuUsedP":0,"MruUsedP":4,"Hits":3,"Misses":0,"Evictions":0,"Overflows":0} +{"MFUSize":0,"MRUSize":3,"MFUUsedP":0,"MRUUsedP":4,"Hits":3,"Misses":0,"Evictions":0,"Overflows":0} ``` # Design @@ -134,7 +141,7 @@ Tested with Go 1.7+. ### Shard counts -Shards must be sized in powers of 2. Shards are relatively inexpensive to manage but should not be arbitrarily high. Shard sizing should be relative to desired cache sizes and workload; more key space and greater write concurrency/rates are better suited with more shards. "Normal" sizes might be 8 shards for simple testing and 1024 shards for production workloads that experience tens of thousands (or more) of cache lookups a second. +Shards must be sized in powers of 2. Shards are relatively inexpensive to manage but should not be arbitrarily high. Shard sizing should be relative to desired cache sizes and workload; more key space and greater write concurrency/rates are better suited with more shards. "Normal" sizes might be 8 shards for simple testing and 1024 shards for production workloads that experience tens of thousands (or more) of cache lookups a second. ### Cache sizes @@ -173,8 +180,8 @@ import ( func main() { c, _ := bicache.New(&bicache.Config{ - MfuSize: 24, // MFU capacity in keys - MruSize: 64, // MRU capacity in keys + MFUSize: 24, // MFU capacity in keys + MRUSize: 64, // MRU capacity in keys ShardCount: 16, // Shard count. Defaults to 512 if unset. AutoEvict: 30000, // Run TTL evictions + MRU->MFU promotions / evictions automatically every 30s. EvictLog: true, // Emit eviction timing logs. @@ -208,5 +215,5 @@ john 5535 [109 121 32 118 97 108 117 101] -{"MfuSize":0,"MruSize":3,"MfuUsedP":0,"MruUsedP":4,"Hits":3,"Misses":0,"Evictions":0,"Overflows":0} +{"MFUSize":0,"MRUSize":3,"MFUUsedP":0,"MRUUsedP":4,"Hits":3,"Misses":0,"Evictions":0,"Overflows":0} ``` diff --git a/bicache.go b/bicache.go index a1d7a70..2ff0c89 100644 --- a/bicache.go +++ b/bicache.go @@ -4,6 +4,7 @@ package bicache import ( "container/list" + "context" "errors" "log" "math" @@ -24,6 +25,7 @@ type Bicache struct { ShardCount uint32 Size int paused uint32 + done context.CancelFunc } // Shard implements a cache unit @@ -61,8 +63,8 @@ type counters struct { // defers the operation until each Set is called // on the bicache. type Config struct { - MfuSize uint - MruSize uint + MFUSize uint + MRUSize uint AutoEvict uint EvictLog bool ShardCount int @@ -89,10 +91,10 @@ type cacheData struct { // Stats holds Bicache // statistics data. type Stats struct { - MfuSize uint // Number of acive MFU keys. - MruSize uint // Number of active MRU keys. - MfuUsedP uint // MFU used in percent. - MruUsedP uint // MRU used in percent. + MFUSize uint // Number of acive MFU keys. + MRUSize uint // Number of active MRU keys. + MFUUsedP uint // MFU used in percent. + MRUUsedP uint // MRU used in percent. Hits uint64 // Cache hits. Misses uint64 // Cache misses. Evictions uint64 // Cache evictions. @@ -100,14 +102,14 @@ type Stats struct { } // New takes a *Config and returns -// an initialized *Shard. +// an initialized *Bicache. func New(c *Config) (*Bicache, error) { // Check that ShardCount is a power of 2. if (c.ShardCount & (c.ShardCount - 1)) != 0 { return nil, errors.New("Shard count must be a power of 2") } - if c.MruSize <= 0 { + if c.MRUSize <= 0 { return nil, errors.New("MRU size must be > 0") } @@ -119,15 +121,15 @@ func New(c *Config) (*Bicache, error) { shards := make([]*Shard, c.ShardCount) // Get cache sizes for each shard. - mfuSize := int(math.Ceil(float64(c.MfuSize) / float64(c.ShardCount))) - mruSize := int(math.Ceil(float64(c.MruSize) / float64(c.ShardCount))) + mfuSize := int(math.Ceil(float64(c.MFUSize) / float64(c.ShardCount))) + mruSize := int(math.Ceil(float64(c.MRUSize) / float64(c.ShardCount))) // Init shards. for i := 0; i < c.ShardCount; i++ { shards[i] = &Shard{ cacheMap: make(map[string]*entry, mfuSize+mruSize), - mfuCache: sll.New(mfuSize), - mruCache: sll.New(mruSize), + mfuCache: sll.New(), + mruCache: sll.New(), mfuCap: uint(mfuSize), mruCap: uint(mruSize), ttlMap: make(map[string]time.Time), @@ -137,10 +139,13 @@ func New(c *Config) (*Bicache, error) { } } + ctx, cf := context.WithCancel(context.Background()) + cache := &Bicache{ shards: shards, ShardCount: uint32(c.ShardCount), Size: (mfuSize + mruSize) * c.ShardCount, + done: cf, } // Initialize a background goroutine @@ -149,15 +154,24 @@ func New(c *Config) (*Bicache, error) { if c.AutoEvict > 0 { cache.autoEvict = true iter := time.Duration(c.AutoEvict) - go bgAutoEvict(cache, iter, c) + go bgAutoEvict(ctx, cache, iter, c) } return cache, nil } +// Close stops background tasks and +// releases any resources. This should be +// called before removing a reference to +// a *Bicache if it's desired to be garbage +// collected cleanly. +func (b *Bicache) Close() { + b.done() +} + // bgAutoEvict calls evictTTL and promoteEvict for all shards // sequentially on the configured iter time interval. -func bgAutoEvict(b *Bicache, iter time.Duration, c *Config) { +func bgAutoEvict(ctx context.Context, b *Bicache, iter time.Duration, c *Config) { ttlTachy := tachymeter.New(&tachymeter.Config{Size: c.ShardCount}) promoTachy := tachymeter.New(&tachymeter.Config{Size: c.ShardCount}) interval := time.NewTicker(time.Millisecond * iter) @@ -168,66 +182,71 @@ func bgAutoEvict(b *Bicache, iter time.Duration, c *Config) { var ttlStats, promoStats *tachymeter.Metrics - // On the auto eviction interval, - // we loop through each shard - // and trigger a TTL and promotion/eviction. - for _ = range interval.C { - // Skip this interval if - // evictions are paused. - if atomic.LoadUint32(&b.paused) == 1 { - if c.EvictLog { - log.Printf("[Bicache] Evictions Paused") - } - continue - } - - for _, s := range b.shards { - // Run ttl evictions. - start = time.Now() - evicted = 0 - - // At the very first check, nearestExpire - // was set to the Bicache initialization time. - // This is certain to run at least once. - // The first and real nearest expire will be set - // in any SetTTL call that's made. - if s.nearestExpire.Before(start.Add(iter)) { - evicted = s.evictTTL() + for { + select { + case <-ctx.Done(): + return + case <-interval.C: + // Skip this interval if + // evictions are paused. + if atomic.LoadUint32(&b.paused) == 1 { + if c.EvictLog { + log.Printf("[Bicache] Evictions Paused") + } + continue } - if c.EvictLog && evicted > 0 { - ttlTachy.AddTime(time.Since(start)) + // On the auto eviction interval, + // we loop through each shard + // and trigger a TTL and promotion/eviction. + for _, s := range b.shards { + // Run ttl evictions. + start = time.Now() + evicted = 0 + + // At the very first check, nearestExpire + // was set to the Bicache initialization time. + // This is certain to run at least once. + // The first and real nearest expire will be set + // in any SetTTL call that's made. + if s.nearestExpire.Before(start.Add(iter)) { + evicted = s.evictTTL() + } + + if c.EvictLog && evicted > 0 { + ttlTachy.AddTime(time.Since(start)) + } + + // Run promotions/overflow evictions. + start = time.Now() + s.promoteEvict() + + if c.EvictLog { + promoTachy.AddTime(time.Since(start)) + } } - // Run promotions/overflow evictions. - start = time.Now() - s.promoteEvict() + // Calc eviction/promo stats. + ttlStats = ttlTachy.Calc() + promoStats = promoTachy.Calc() if c.EvictLog { - promoTachy.AddTime(time.Since(start)) - } - } - - // Calc eviction/promo stats. - ttlStats = ttlTachy.Calc() - promoStats = promoTachy.Calc() - - if c.EvictLog { - // Log TTL stats if a - // TTL eviction was triggered. - if ttlStats.Count > 0 { - log.Printf("[Bicache EvictTTL] cumulative: %s | min: %s | max: %s\n", - ttlStats.Time.Cumulative, ttlStats.Time.Min, ttlStats.Time.Max) + // Log TTL stats if a + // TTL eviction was triggered. + if ttlStats.Count > 0 { + log.Printf("[Bicache EvictTTL] cumulative: %s | min: %s | max: %s\n", + ttlStats.Time.Cumulative, ttlStats.Time.Min, ttlStats.Time.Max) + } + + // Log PromoteEvict stats. + log.Printf("[Bicache PromoteEvict] cumulative: %s | min: %s | max: %s\n", + promoStats.Time.Cumulative, promoStats.Time.Min, promoStats.Time.Max) } - // Log PromoteEvict stats. - log.Printf("[Bicache PromoteEvict] cumulative: %s | min: %s | max: %s\n", - promoStats.Time.Cumulative, promoStats.Time.Min, promoStats.Time.Max) + // Reset tachymeter. + ttlTachy.Reset() + promoTachy.Reset() } - - // Reset tachymeter. - ttlTachy.Reset() - promoTachy.Reset() } } @@ -239,8 +258,8 @@ func (b *Bicache) Stats() *Stats { for _, s := range b.shards { s.RLock() - stats.MfuSize += s.mfuCache.Len() - stats.MruSize += s.mruCache.Len() + stats.MFUSize += s.mfuCache.Len() + stats.MRUSize += s.mruCache.Len() s.RUnlock() mfuCap += float64(s.mfuCap) @@ -252,12 +271,12 @@ func (b *Bicache) Stats() *Stats { stats.Overflows += atomic.LoadUint64(&s.counters.overflows) } - stats.MruUsedP = uint(float64(stats.MruSize) / mruCap * 100) + stats.MRUUsedP = uint(float64(stats.MRUSize) / mruCap * 100) // Prevent incorrect stats in MRU-only mode. if mfuCap > 0 { - stats.MfuUsedP = uint(float64(stats.MfuSize) / mfuCap * 100) + stats.MFUUsedP = uint(float64(stats.MFUSize) / mfuCap * 100) } else { - stats.MfuUsedP = 0 + stats.MFUUsedP = 0 } return stats @@ -307,22 +326,14 @@ func (s *Shard) evictTTL() int { delete(s.ttlMap, k.Value.(string)) switch n.state { case 0: - s.mruCache.RemoveAsync(n.node) + s.mruCache.Remove(n.node) case 1: - s.mfuCache.RemoveAsync(n.node) + s.mfuCache.Remove(n.node) } evicted++ } } - // Sync the MRU and MFU - // in parallel. - var wg sync.WaitGroup - wg.Add(2) - go bgSync(&wg, s.mruCache) - go bgSync(&wg, s.mfuCache) - wg.Wait() - // Update the nearest expire. // If the last TTL'd key was just expired, // this will be left at the initially set value @@ -343,7 +354,7 @@ func (s *Shard) evictTTL() int { } // promoteEvict checks if the MRU exceeds the -// Config.MruSize (overflow count) If so, the top +// Config.MRUSize (overflow count) If so, the top // MRU scores are checked against the MFU. If any of the top MRU scores // are greater than the lowest MFU scores, they are promoted // to the MFU (if possible). Any remaining overflow count @@ -359,12 +370,9 @@ func (s *Shard) promoteEvict() { // LRU-only behavior. if s.mfuCap == 0 { s.Lock() - s.evictFromMruTail(mruOverflow) + s.evictFromMRUTail(mruOverflow) s.Unlock() - s.RLock() - s.mruCache.Sync() - s.RUnlock() return } @@ -419,7 +427,7 @@ func (s *Shard) promoteEvict() { // Remove from the MRU and // push to the MFU tail. // Update cache state. - s.mruCache.RemoveAsync(node) + s.mruCache.Remove(node) s.mfuCache.PushTailNode(node) s.cacheMap[node.Value.(*cacheData).k].state = 1 @@ -430,10 +438,6 @@ func (s *Shard) promoteEvict() { // all the overflow, return. if promoted == mruOverflow { s.Unlock() - // Synchronize the MRU cache. - s.RLock() - s.mruCache.Sync() - s.RUnlock() return } } @@ -453,48 +457,45 @@ promoteByScore: // were promoted by score. var promotedByScore int - // WaitGroup for cache bgSync(). - var wg sync.WaitGroup - // We're here on two conditions: // 1) The MFU was full. We need to handle all mruToPromoteEvict (canPromote == 0). // 2) We promoted some mruToPromoteEvict and have leftovers (canPromote > 0). // Get top MRU scores and bottom MFU scores to compare. - bottomMfu := s.mfuCache.LowScores(mruOverflow) + bottomMFU := s.mfuCache.LowScores(mruOverflow) // If the lowest MFU score is higher than the lowest // score to promote, none of these are eligible. - if len(bottomMfu) == 0 || bottomMfu[0].Score >= mruToPromoteEvict[remainderPosition].Score { - goto evictFromMruTail + if len(bottomMFU) == 0 || bottomMFU[0].Score >= mruToPromoteEvict[remainderPosition].Score { + goto evictFromMRUTail } // Otherwise, scan for a replacement. s.Lock() scorePromote: for _, mruNode := range mruToPromoteEvict[remainderPosition:] { - for i, mfuNode := range bottomMfu { + for i, mfuNode := range bottomMFU { if mruNode.Score > mfuNode.Score { // Push the evicted MFU node to the head // of the MRU and update state. - s.mfuCache.RemoveAsync(mfuNode) + s.mfuCache.Remove(mfuNode) s.mruCache.PushHeadNode(mfuNode) s.cacheMap[mfuNode.Value.(*cacheData).k].state = 0 // Promote the MRU node to the MFU and // update state. - s.mruCache.RemoveAsync(mruNode) + s.mruCache.Remove(mruNode) s.mfuCache.PushTailNode(mruNode) s.cacheMap[mruNode.Value.(*cacheData).k].state = 1 promotedByScore++ // Remove the replaced MFU node from the - // bottomMfu list so it's not attempted twice. - bottomMfu = append(bottomMfu[:i], bottomMfu[i+1:]...) + // bottomMFU list so it's not attempted twice. + bottomMFU = append(bottomMFU[:i], bottomMFU[i+1:]...) break } - if i == len(bottomMfu)-1 { + if i == len(bottomMFU)-1 { break scorePromote } } @@ -503,7 +504,7 @@ scorePromote: s.Unlock() -evictFromMruTail: +evictFromMRUTail: s.Lock() @@ -511,31 +512,22 @@ evictFromMruTail: toEvict := mruOverflow - promotedByScore // Evict this many from the MRU tail. if toEvict > 0 { - s.evictFromMruTail(toEvict) + s.evictFromMRUTail(toEvict) } s.Unlock() - - // Sync the MRU and MFU - // in parallel. - s.RLock() - wg.Add(2) - go bgSync(&wg, s.mruCache) - go bgSync(&wg, s.mfuCache) - wg.Wait() - s.RUnlock() } -// evictFromMruTail evicts n keys from the tail +// evictFromMRUTail evicts n keys from the tail // of the MRU cache. -func (s *Shard) evictFromMruTail(n int) { +func (s *Shard) evictFromMRUTail(n int) { ttlStart := len(s.ttlMap) for i := 0; i < n; i++ { node := s.mruCache.Tail() delete(s.cacheMap, node.Value.(*cacheData).k) delete(s.ttlMap, node.Value.(*cacheData).k) - s.mruCache.RemoveTailAsync() + s.mruCache.RemoveTail() } // Update the ttlCount. @@ -559,17 +551,10 @@ func (s *Shard) decrementTTLCount(n uint64) { if s.ttlCount-n > s.ttlCount { atomic.StoreUint64(&s.ttlCount, 0) } else { - atomic.StoreUint64(&s.ttlCount, s.ttlCount-n) + atomic.AddUint64(&s.ttlCount, ^uint64(n-1)) } // Increment the evictions count // by n, regardless. atomic.AddUint64(&s.counters.evictions, n) } - -// Sll Sync handler with a WaitGroup -// for background parallelization. -func bgSync(wg *sync.WaitGroup, s *sll.Sll) { - s.Sync() - wg.Done() -} diff --git a/bicache_test.go b/bicache_test.go index 77b70f3..c58b851 100644 --- a/bicache_test.go +++ b/bicache_test.go @@ -21,8 +21,8 @@ func TestNew(t *testing.T) { // minimum for the number of shards specified. for _, n := range configExpected { c, _ := bicache.New(&bicache.Config{ - MfuSize: uint(n[0]), - MruSize: uint(n[1]), + MFUSize: uint(n[0]), + MRUSize: uint(n[1]), ShardCount: n[2], }) @@ -34,8 +34,8 @@ func TestNew(t *testing.T) { func TestStats(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 3000, }) @@ -53,20 +53,20 @@ func TestStats(t *testing.T) { stats := c.Stats() - if stats.MfuSize != 10 { - t.Errorf("Expected MFU size 10, got %d", stats.MfuSize) + if stats.MFUSize != 10 { + t.Errorf("Expected MFU size 10, got %d", stats.MFUSize) } - if stats.MruSize != 30 { - t.Errorf("Expected MRU size 30, got %d", stats.MruSize) + if stats.MRUSize != 30 { + t.Errorf("Expected MRU size 30, got %d", stats.MRUSize) } - if stats.MfuUsedP != 100 { - t.Errorf("Expected MFU usedp 100, got %d", stats.MfuUsedP) + if stats.MFUUsedP != 100 { + t.Errorf("Expected MFU usedp 100, got %d", stats.MFUUsedP) } - if stats.MruUsedP != 100 { - t.Errorf("Expected MRU usedp 100, got %d", stats.MruUsedP) + if stats.MRUUsedP != 100 { + t.Errorf("Expected MRU usedp 100, got %d", stats.MRUUsedP) } if stats.Hits != 100 { @@ -88,8 +88,8 @@ func TestStats(t *testing.T) { func TestEvictTtl(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 1000, }) @@ -111,7 +111,7 @@ func TestEvictTtl(t *testing.T) { stats := c.Stats() - if stats.MruSize != 1 || stats.Evictions != 1 { + if stats.MRUSize != 1 || stats.Evictions != 1 { t.Error("Unexpected stats") } } @@ -119,8 +119,8 @@ func TestEvictTtl(t *testing.T) { func TestPromoteEvict(t *testing.T) { // Also covers MRU tail eviction. c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 5000, }) @@ -150,11 +150,11 @@ func TestPromoteEvict(t *testing.T) { stats := c.Stats() - if stats.MfuSize != 3 { + if stats.MFUSize != 3 { t.Error("Unexpected MFU count") } - if stats.MruSize != 30 { + if stats.MRUSize != 30 { t.Error("Unexpected MRU count") } diff --git a/examples/bicache-example/README.md b/examples/bicache-example/README.md index a2be9dc..b28d026 100644 --- a/examples/bicache-example/README.md +++ b/examples/bicache-example/README.md @@ -1,8 +1,8 @@ Excerpts: ```go c, _ := bicache.New(&bicache.Config{ - MfuSize: 50000, - MruSize: 25000, + MFUSize: 50000, + MRUSize: 25000, AutoEvict: 1000, }) @@ -50,7 +50,7 @@ Max: 281.841µs Min: 82ns Rate/sec.: 5196912.04 -{"MfuSize":0,"MruSize":100000,"MfuUsedP":0,"MruUsedP":200,"Hits":100010,"Misses":0,"Evictions":0} +{"MFUSize":0,"MRUSize":100000,"MFUUsedP":0,"MRUUsedP":200,"Hits":100010,"Misses":0,"Evictions":0} ``` -Note: timing data via [tachymeter](https://github.com/jamiealquiza/tachymeter) \ No newline at end of file +Note: timing data via [tachymeter](https://github.com/jamiealquiza/tachymeter) diff --git a/examples/bicache-example/main.go b/examples/bicache-example/main.go index 398a8f2..9d9b4df 100644 --- a/examples/bicache-example/main.go +++ b/examples/bicache-example/main.go @@ -12,8 +12,8 @@ import ( func main() { c, _ := bicache.New(&bicache.Config{ - MfuSize: 50000, - MruSize: 50000, + MFUSize: 50000, + MRUSize: 50000, AutoEvict: 1000, ShardCount: 512, }) diff --git a/examples/bicache-loadtest/main.go b/examples/bicache-loadtest/main.go index 71c39e1..4cc563a 100644 --- a/examples/bicache-loadtest/main.go +++ b/examples/bicache-loadtest/main.go @@ -24,8 +24,8 @@ func main() { flag.Parse() c, _ := bicache.New(&bicache.Config{ - MfuSize: uint(*mfu), - MruSize: uint(*mru), + MFUSize: uint(*mfu), + MRUSize: uint(*mru), AutoEvict: uint(*evict * 1000), EvictLog: true, ShardCount: 1024, @@ -42,7 +42,7 @@ func main() { ticker := time.NewTicker(10 * time.Second) - for _ = range ticker.C { + for range ticker.C { fmt.Printf("\n> Writes:\n") writeT.Calc().Dump() diff --git a/methods.go b/methods.go index 312b6c1..3d9a4f8 100644 --- a/methods.go +++ b/methods.go @@ -214,8 +214,8 @@ func (b *Bicache) List(n int) ListResults { return lr } -// FlushMru flushes all MRU entries. -func (b *Bicache) FlushMru() error { +// FlushMRU flushes all MRU entries. +func (b *Bicache) FlushMRU() error { // Traverse shards. for _, s := range b.shards { s.Lock() @@ -228,7 +228,7 @@ func (b *Bicache) FlushMru() error { } } - s.mruCache = sll.New(int(s.mruCap)) + s.mruCache = sll.New() s.Unlock() } @@ -236,8 +236,8 @@ func (b *Bicache) FlushMru() error { return nil } -// FlushMfu flushes all MFU entries. -func (b *Bicache) FlushMfu() error { +// FlushMFU flushes all MFU entries. +func (b *Bicache) FlushMFU() error { // Traverse shards. for _, s := range b.shards { s.Lock() @@ -250,7 +250,7 @@ func (b *Bicache) FlushMfu() error { } } - s.mfuCache = sll.New(int(s.mfuCap)) + s.mfuCache = sll.New() s.Unlock() } @@ -260,7 +260,7 @@ func (b *Bicache) FlushMfu() error { // FlushAll flushes all cache entries. // Flush all is much faster than combining both a -// FlushMru and FlushMfu call. +// FlushMRU and FlushMFU call. func (b *Bicache) FlushAll() error { // Traverse and reset shard caches. for _, s := range b.shards { @@ -272,8 +272,8 @@ func (b *Bicache) FlushAll() error { s.nearestExpire = time.Now().Add(time.Second * 2147483647) // Create new caches. - s.mfuCache = sll.New(int(s.mfuCap)) - s.mruCache = sll.New(int(s.mruCap)) + s.mfuCache = sll.New() + s.mruCache = sll.New() s.Unlock() } diff --git a/methods_test.go b/methods_test.go index 2437015..d57862c 100644 --- a/methods_test.go +++ b/methods_test.go @@ -15,8 +15,8 @@ func BenchmarkGet(b *testing.B) { b.StopTimer() c, _ := bicache.New(&bicache.Config{ - MfuSize: 10000, - MruSize: 600000, + MFUSize: 10000, + MRUSize: 600000, ShardCount: 1024, AutoEvict: 30000, }) @@ -38,8 +38,8 @@ func BenchmarkSet(b *testing.B) { b.StopTimer() c, _ := bicache.New(&bicache.Config{ - MfuSize: 10000, - MruSize: 600000, + MFUSize: 10000, + MRUSize: 600000, ShardCount: 1024, AutoEvict: 30000, }) @@ -60,8 +60,8 @@ func BenchmarkSetTTL(b *testing.B) { b.StopTimer() c, _ := bicache.New(&bicache.Config{ - MfuSize: 10000, - MruSize: 600000, + MFUSize: 10000, + MRUSize: 600000, ShardCount: 1024, AutoEvict: 30000, }) @@ -82,8 +82,8 @@ func BenchmarkDel(b *testing.B) { b.StopTimer() c, _ := bicache.New(&bicache.Config{ - MfuSize: 10000, - MruSize: 600000, + MFUSize: 10000, + MRUSize: 600000, ShardCount: 1024, AutoEvict: 30000, }) @@ -108,8 +108,8 @@ func BenchmarkList(b *testing.B) { b.StopTimer() c, _ := bicache.New(&bicache.Config{ - MfuSize: 10000, - MruSize: 600000, + MFUSize: 10000, + MRUSize: 600000, ShardCount: 1024, AutoEvict: 30000, }) @@ -134,8 +134,8 @@ func BenchmarkList(b *testing.B) { func TestSetGet(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 10000, }) @@ -162,8 +162,8 @@ func TestSetGet(t *testing.T) { func TestSetTTL(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 1000, }) @@ -187,8 +187,8 @@ func TestSetTTL(t *testing.T) { func TestDel(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 10000, }) @@ -202,15 +202,15 @@ func TestDel(t *testing.T) { stats := c.Stats() - if stats.MruSize != 0 { - t.Errorf("Expected MRU size 0, got %d", stats.MruSize) + if stats.MRUSize != 0 { + t.Errorf("Expected MRU size 0, got %d", stats.MRUSize) } } func TestList(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 2, AutoEvict: 1000, }) @@ -256,10 +256,10 @@ func TestList(t *testing.T) { } } -func TestFlushMru(t *testing.T) { +func TestFlushMRU(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 1, AutoEvict: 1000, }) @@ -270,23 +270,23 @@ func TestFlushMru(t *testing.T) { // Check before. stats := c.Stats() - if stats.MruSize != 30 { - t.Errorf("Expected MFU size of 30, got %d", stats.MfuSize) + if stats.MRUSize != 30 { + t.Errorf("Expected MFU size of 30, got %d", stats.MFUSize) } - c.FlushMru() + c.FlushMRU() // Check after. stats = c.Stats() - if stats.MruSize != 0 { - t.Errorf("Expected MRU size of 0, got %d", stats.MruSize) + if stats.MRUSize != 0 { + t.Errorf("Expected MRU size of 0, got %d", stats.MRUSize) } } -func TestFlushMfu(t *testing.T) { +func TestFlushMFU(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 1, AutoEvict: 1000, }) @@ -321,23 +321,23 @@ func TestFlushMfu(t *testing.T) { // Check before. stats := c.Stats() - if stats.MfuSize != 3 { - t.Errorf("Expected MFU size of 3, got %d", stats.MfuSize) + if stats.MFUSize != 3 { + t.Errorf("Expected MFU size of 3, got %d", stats.MFUSize) } - c.FlushMfu() + c.FlushMFU() // Check after. stats = c.Stats() - if stats.MfuSize != 0 { - t.Errorf("Expected MFU size of 0, got %d", stats.MfuSize) + if stats.MFUSize != 0 { + t.Errorf("Expected MFU size of 0, got %d", stats.MFUSize) } } func TestFlushAll(t *testing.T) { c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 1, AutoEvict: 1000, }) @@ -368,12 +368,12 @@ func TestFlushAll(t *testing.T) { // Check before. stats := c.Stats() - if stats.MfuSize != 3 { - t.Errorf("Expected MFU size of 3, got %d", stats.MfuSize) + if stats.MFUSize != 3 { + t.Errorf("Expected MFU size of 3, got %d", stats.MFUSize) } - if stats.MruSize != 30 { - t.Errorf("Expected MFU size of 30, got %d", stats.MfuSize) + if stats.MRUSize != 30 { + t.Errorf("Expected MFU size of 30, got %d", stats.MFUSize) } c.FlushAll() @@ -381,12 +381,12 @@ func TestFlushAll(t *testing.T) { // Check after. stats = c.Stats() - if stats.MfuSize != 0 { - t.Errorf("Expected MFU size of 0, got %d", stats.MfuSize) + if stats.MFUSize != 0 { + t.Errorf("Expected MFU size of 0, got %d", stats.MFUSize) } - if stats.MruSize != 0 { - t.Errorf("Expected MRU size of 0, got %d", stats.MruSize) + if stats.MRUSize != 0 { + t.Errorf("Expected MRU size of 0, got %d", stats.MRUSize) } } @@ -394,8 +394,8 @@ func TestIntegrity(t *testing.T) { words := []string{"&c", "'d", "'em", "'ll", "'m", "'mid", "'midst", "'mongst", "'prentice", "'re", "'s", "'sblood", "'sbodikins", "'sdeath", "'sfoot", "'sheart", "'shun", "'slid", "'slife", "'slight", "'snails", "'strewth", "'t", "'til", "'tis", "'twas", "'tween", "'twere", "'twill", "'twixt", "'twould", "'un", "'ve", "1080", "10th", "1st", "2", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "a", "a'", "a's", "a/c", "a1", "aa", "aaa", "aah", "aahed", "aahing", "aahs", "aal", "aalii", "aaliis", "aals", "aam", "aardvark", "aardvarks", "aardwolf", "aardwolves", "aargh", "aaron", "aaronic", "aarrgh", "aarrghh", "aas", "aasvogel", "aasvogels", "ab", "aba", "abac", "abaca", "abacas", "abacate", "abacaxi", "abacay", "abaci", "abacinate", "abacination", "abacisci", "abaciscus", "abacist", "aback", "abacli", "abacot", "abacterial", "abactinal", "abactinally", "abaction", "abactor", "abaculi", "abaculus", "abacus", "abacuses", "abada", "abaddon", "abadejo", "abadengo", "abadia", "abaff", "abaft", "abaisance", "abaised", "abaiser", "abaisse", "abaissed", "abaka", "abakas", "abalation", "abalienate", "abalienated", "abalienating", "abalienation", "abalone", "abalones", "abamp", "abampere", "abamperes", "abamps", "aband", "abandon", "abandonable", "abandoned", "abandonedly", "abandonee", "abandoner", "abandoners", "abandoning", "abandonment", "abandonments", "abandons", "abandum", "abanet", "abanga", "abannition", "abapical", "abaptiston", "abaptistum", "abarthrosis", "abarticular", "abarticulation", "abas", "abase", "abased", "abasedly", "abasedness", "abasement", "abasements", "abaser", "abasers", "abases", "abash", "abashed", "abashedly", "abashedness", "abashes", "abashing", "abashless", "abashlessly", "abashment", "abashments", "abasia", "abasias", "abasic", "abasing", "abasio", "abask", "abassi", "abastard", "abastardize", "abastral", "abatable", "abatage", "abate", "abated", "abatement", "abatements", "abater", "abaters", "abates", "abatic", "abating", "abatis", "abatised", "abatises", "abatjour", "abatjours", "abaton", "abator", "abators", "abattage", "abattis", "abattised", "abattises", "abattoir", "abattoirs", "abattu", "abattue", "abature", "abaue", "abave", "abaxial", "abaxile", "abay", "abayah", "abaze", "abb", "abba", "abbacies", "abbacomes", "abbacy", "abbandono", "abbas", "abbasi", "abbasid", "abbassi", "abbate", "abbatial", "abbatical", "abbatie", "abbaye", "abbe", "abbes", "abbess", "abbesses", "abbest", "abbevillian", "abbey", "abbey's", "abbeys", "abbeystead", "abbeystede", "abboccato", "abbogada", "abbot", "abbot's", "abbotcies", "abbotcy", "abbotnullius", "abbotric", "abbots", "abbotship", "abbotships", "abbott", "abbozzo", "abbr", "abbrev", "abbreviatable", "abbreviate", "abbreviated", "abbreviately", "abbreviates", "abbreviating", "abbreviation", "abbreviations", "abbreviator", "abbreviators", "abbreviatory", "abbreviature", "abbroachment", "abby", "abc", "abcess", "abcissa", "abcoulomb", "abd", "abdal", "abdali", "abdaria", "abdat", "abdest", "abdicable", "abdicant", "abdicate", "abdicated", "abdicates", "abdicating", "abdication", "abdications", "abdicative", "abdicator", "abditive", "abditory", "abdom", "abdomen", "abdomen's", "abdomens", "abdomina", "abdominal", "abdominales", "abdominalia", "abdominalian", "abdominally", "abdominals", "abdominoanterior", "abdominocardiac", "abdominocentesis", "abdominocystic", "abdominogenital", "abdominohysterectomy", "abdominohysterotomy", "abdominoposterior", "abdominoscope", "abdominoscopy", "abdominothoracic", "abdominous", "abdominovaginal", "abdominovesical", "abduce", "abduced", "abducens", "abducent", "abducentes", "abduces", "abducing", "abduct", "abducted", "abducting", "abduction", "abduction's", "abductions", "abductor", "abductor's", "abductores", "abductors", "abducts", "abeam", "abear", "abearance", "abecedaire", "abecedaria", "abecedarian", "abecedarians", "abecedaries", "abecedarium", "abecedarius", "abecedary", "abed", "abede", "abedge", "abegge", "abeigh", "abel", "abele", "abeles", "abelian", "abelite", "abelmosk", "abelmosks", "abelmusk", "abeltree", "abend", "abends", "abenteric", "abepithymia", "aberdavine", "aberdeen", "aberdevine", "aberduvine", "abernethy", "aberr", "aberrance", "aberrancies", "aberrancy", "aberrant", "aberrantly", "aberrants", "aberrate", "aberrated", "aberrating", "aberration", "aberrational", "aberrations", "aberrative", "aberrator", "aberrometer", "aberroscope", "aberuncate", "aberuncator", "abesse", "abessive", "abet", "abetment", "abetments", "abets", "abettal", "abettals", "abetted", "abetter", "abetters", "abetting", "abettor", "abettors", "abevacuation", "abey", "abeyance", "abeyances", "abeyancies", "abeyancy", "abeyant", "abfarad", "abfarads", "abhenries", "abhenry", "abhenrys", "abhinaya", "abhiseka", "abhominable", "abhor", "abhorred", "abhorrence", "abhorrences", "abhorrency", "abhorrent", "abhorrently", "abhorrer", "abhorrers", "abhorrible", "abhorring", "abhors", "abib", "abichite", "abidal", "abidance", "abidances", "abidden", "abide", "abided", "abider", "abiders", "abides", "abidi", "abiding", "abidingly", "abidingness", "abiegh", "abience", "abient", "abietate", "abietene", "abietic", "abietin", "abietineous", "abietinic", "abietite", "abigail", "abigails", "abigailship", "abigeat", "abigei", "abigeus", "abilao", "abilene", "abiliment", "abilitable", "abilities", "ability", "ability's", "abilla", "abilo", "abime", "abintestate", "abiogeneses", "abiogenesis", "abiogenesist", "abiogenetic", "abiogenetical", "abiogenetically", "abiogenist", "abiogenous", "abiogeny", "abiological", "abiologically", "abiology", "abioses", "abiosis", "abiotic", "abiotical", "abiotically", "abiotrophic", "abiotrophy", "abir", "abirritant", "abirritate", "abirritated", "abirritating", "abirritation", "abirritative", "abiston", "abit", "abiuret", "abject", "abjectedness", "abjection", "abjections"} c, _ := bicache.New(&bicache.Config{ - MfuSize: 10, - MruSize: 30, + MFUSize: 10, + MRUSize: 30, ShardCount: 8, AutoEvict: 2000, }) @@ -454,8 +454,8 @@ func TestIntegrity(t *testing.T) { } c, _ = bicache.New(&bicache.Config{ - MfuSize: 1, - MruSize: 30, + MFUSize: 1, + MRUSize: 30, ShardCount: 1, AutoEvict: 2000, }) diff --git a/sll/README.md b/sll/README.md index e62c2be..174ea23 100644 --- a/sll/README.md +++ b/sll/README.md @@ -4,7 +4,5 @@ # Sll A scored linked list. Sll implements a pointer-based doubly linked list with the addition of methods to fetch nodes by score (high or low) and arbitrarily move nodes between lists. A node score is incremented with each `Read()` method called while retrieving the node's value. -Sll is somewhat specialized. The added overhead of scoring would possibly make Sll a poor choice in the case the functionality is not required; Sll simply bakes in some accounting overhead that would otherwise exist in external data structures. - - See [GoDoc](https://godoc.org/github.com/jamiealquiza/bicache/sll) for reference. - See [`sll-example`](./sll-example) for example usage. diff --git a/sll/examples/sll-example/README.md b/sll/examples/sll-example/README.md index 1c4b4cf..ccd01b6 100644 --- a/sll/examples/sll-example/README.md +++ b/sll/examples/sll-example/README.md @@ -41,4 +41,4 @@ eight -> six -> five -> four -> three -> two -> one -> [ read score list ] one two three four five six eight -``` \ No newline at end of file +``` diff --git a/sll/examples/sll-example/main.go b/sll/examples/sll-example/main.go index e5ea335..b34dc95 100644 --- a/sll/examples/sll-example/main.go +++ b/sll/examples/sll-example/main.go @@ -28,7 +28,7 @@ import ( ) func main() { - s := sll.New(10) + s := sll.New() objects := []string{ "one", diff --git a/sll/heap.go b/sll/heap.go new file mode 100644 index 0000000..1062c54 --- /dev/null +++ b/sll/heap.go @@ -0,0 +1,69 @@ +package sll + +// MinHeap implements a min-heap heap.Interface. +type MinHeap []*Node + +func (mh MinHeap) Len() int { return len(mh) } + +func (mh MinHeap) Less(i, j int) bool { + return mh[i].Score < mh[j].Score +} + +func (mh MinHeap) Swap(i, j int) { + mh[i], mh[j] = mh[j], mh[i] +} + +// Push adds an item to the heap. +func (mh *MinHeap) Push(x interface{}) { + item := x.(*Node) + *mh = append(*mh, item) +} + +// Pop removes and returns the root node from the heap. +func (mh *MinHeap) Pop() interface{} { + old := *mh + n := len(old) + item := old[n-1] + *mh = old[0 : n-1] + return item +} + +// Peek returns the root node from the heap. +func (mh *MinHeap) Peek() interface{} { + s := *mh + return s[0] +} + +// MaxHeap implements a max-heap heap.Interface. +type MaxHeap []*Node + +func (mh MaxHeap) Len() int { return len(mh) } + +func (mh MaxHeap) Less(i, j int) bool { + return mh[i].Score > mh[j].Score +} + +func (mh MaxHeap) Swap(i, j int) { + mh[i], mh[j] = mh[j], mh[i] +} + +// Push adds an item to the heap. +func (mh *MaxHeap) Push(x interface{}) { + item := x.(*Node) + *mh = append(*mh, item) +} + +// Pop removes and returns the root node from the heap. +func (mh *MaxHeap) Pop() interface{} { + old := *mh + n := len(old) + item := old[n-1] + *mh = old[0 : n-1] + return item +} + +// Peek returns the root node from the heap. +func (mh *MaxHeap) Peek() interface{} { + s := *mh + return s[0] +} diff --git a/sll/sll.go b/sll/sll.go index 5cf8570..c6ca3b5 100644 --- a/sll/sll.go +++ b/sll/sll.go @@ -1,14 +1,15 @@ package sll import ( + "container/heap" "sort" "sync/atomic" ) // Sll is a scored linked list. type Sll struct { - root *Node - scores nodeScoreList + root *Node + len uint64 } // Node is a scored linked list node. @@ -20,6 +21,7 @@ type Node struct { Value interface{} } +// Next returns the next node in the *Sll. func (n *Node) Next() *Node { if n.next != n.list.root { return n.next @@ -28,6 +30,7 @@ func (n *Node) Next() *Node { return nil } +// Prev returns the previous node in the *Sll. func (n *Node) Prev() *Node { if n.prev != n.list.root { return n.prev @@ -36,14 +39,18 @@ func (n *Node) Prev() *Node { return nil } -// New creates a new *Sll. New takes an -// integer length to pre-allocate a nodeScoreList -// of capacity l. This reduces append latencies if -// many elements are inserted into a new list. -func New(l int) *Sll { +// Copy returns a copy of a *Node. +func (n *Node) Copy() *Node { + return &Node{ + Score: n.Score, + Value: n.Value, + } +} + +// New creates a new *Sll. +func New() *Sll { ll := &Sll{ - root: &Node{}, - scores: make(nodeScoreList, 0, l), + root: &Node{}, } ll.root.next, ll.root.prev = ll.root, ll.root @@ -51,9 +58,9 @@ func New(l int) *Sll { return ll } -// nodeScoreList holds a slice of *Node -// for sorting by score. -type nodeScoreList []*Node +// NodeScoreList is a slice of *Node +// sorted by ascending scores. +type NodeScoreList []*Node // Read returns a *Node Value and increments the score. func (n *Node) Read() interface{} { @@ -61,23 +68,23 @@ func (n *Node) Read() interface{} { return n.Value } -// nodeScoreList methods to satisfy the sort interface. +// NodeScoreList methods to satisfy the sort interface. -func (nsl nodeScoreList) Len() int { +func (nsl NodeScoreList) Len() int { return len(nsl) } -func (nsl nodeScoreList) Less(i, j int) bool { +func (nsl NodeScoreList) Less(i, j int) bool { return atomic.LoadUint64(&nsl[i].Score) < atomic.LoadUint64(&nsl[j].Score) } -func (nsl nodeScoreList) Swap(i, j int) { +func (nsl NodeScoreList) Swap(i, j int) { nsl[i], nsl[j] = nsl[j], nsl[i] } // Len returns the count of nodes in the *Sll. func (ll *Sll) Len() uint { - return uint(len(ll.scores)) + return uint(ll.len) } // Head returns the head *Node. @@ -90,25 +97,57 @@ func (ll *Sll) Tail() *Node { return ll.root.next } +// Copy returns a copy of a *Sll. +func (ll *Sll) Copy() *Sll { + newll := New() + + for node := ll.Head(); node != nil; node = node.Prev() { + c := node.Copy() + newll.PushTailNode(c) + } + + return newll +} + // HighScores takes an integer and returns the // respective number of *Nodes with the higest scores // sorted in ascending order. -func (ll *Sll) HighScores(r int) nodeScoreList { - sort.Sort(ll.scores) - // Return what's available - // if more is being requested - // than exists. - if r > len(ll.scores) { - scores := make(nodeScoreList, len(ll.scores)) - copy(scores, ll.scores) - return scores +func (ll *Sll) HighScores(k int) NodeScoreList { + h := &MinHeap{} + + if ll.Len() == 0 { + return NodeScoreList(*h) } - // We return a copy because the - // underlying array order will - // possibly change. - scores := make(nodeScoreList, r) - copy(scores, ll.scores[len(ll.scores)-r:]) + heap.Init(h) + + // Add the first k nodes + // to the heap. In a high scores selection, + // we traverse from the head toward the + // tail with the assumption that head nodes + // are more probable to have higher + // scores than tail nodes. + node := ll.Head() + for i := 0; i < k && node != nil; i++ { + heap.Push(h, node) + node = node.Prev() + } + + var min = h.Peek().(*Node).Score + + // Iterate the rest of the list + // while maintaining the current + // heap len. + for ; node != nil; node = node.Prev() { + if node.Score > min { + heap.Push(h, node) + heap.Pop(h) + min = h.Peek().(*Node).Score + } + } + + scores := NodeScoreList(*h) + sort.Sort(scores) return scores } @@ -116,22 +155,39 @@ func (ll *Sll) HighScores(r int) nodeScoreList { // LowScores takes an integer and returns the // respective number of *Nodes with the lowest scores // sorted in ascending order. -func (ll *Sll) LowScores(r int) nodeScoreList { - sort.Sort(ll.scores) - // Return what's available - // if more is being requested - // than exists. - if r > len(ll.scores) { - scores := make(nodeScoreList, len(ll.scores)) - copy(scores, ll.scores) - return scores +func (ll *Sll) LowScores(k int) NodeScoreList { + h := &MaxHeap{} + + if ll.Len() == 0 { + return NodeScoreList(*h) + } + + // In a low scores selection, + // we traverse from the tail toward the + // head with the assumption that tail nodes + // are more probable to have lower + // scores than head nodes. + node := ll.Tail() + for i := 0; i < k && node != nil; i++ { + heap.Push(h, node) + node = node.Next() } - // We return a copy because the - // underlying array order will - // possibly change. - scores := make(nodeScoreList, r) - copy(scores, ll.scores[:r]) + var max = h.Peek().(*Node).Score + + // Iterate the rest of the list + // while maintaining the current + // heap len. + for ; node != nil; node = node.Next() { + if node.Score < max { + heap.Push(h, node) + heap.Pop(h) + max = h.Peek().(*Node).Score + } + } + + scores := NodeScoreList(*h) + sort.Sort(scores) return scores } @@ -148,7 +204,7 @@ func insertAt(n, at *Node) { // pull removes a *Node from // its position in the *Sll, but // doesn't remove the node from -// the nodeScoreList. This is used for +// the NodeScoreList. This is used for // repositioning nodes. func pull(n *Node) { // Link next/prev nodes. @@ -194,8 +250,7 @@ func (ll *Sll) PushHead(v interface{}) *Node { list: ll, } - // Add to scores and insert. - ll.scores = append(ll.scores, n) + atomic.AddUint64(&ll.len, 1) insertAt(n, ll.root.prev) return n @@ -210,8 +265,7 @@ func (ll *Sll) PushTail(v interface{}) *Node { list: ll, } - // Add to scores and insert. - ll.scores = append(ll.scores, n) + atomic.AddUint64(&ll.len, 1) insertAt(n, ll.root) return n @@ -222,8 +276,7 @@ func (ll *Sll) PushTail(v interface{}) *Node { func (ll *Sll) PushHeadNode(n *Node) { n.list = ll - // Add to scores and insert. - ll.scores = append(ll.scores, n) + atomic.AddUint64(&ll.len, 1) insertAt(n, ll.root.prev) } @@ -232,8 +285,8 @@ func (ll *Sll) PushHeadNode(n *Node) { func (ll *Sll) PushTailNode(n *Node) { n.list = ll - // Add to scores and insert. - ll.scores = append(ll.scores, n) + // Increment len. + atomic.AddUint64(&ll.len, 1) insertAt(n, ll.root) } @@ -241,29 +294,12 @@ func (ll *Sll) PushTailNode(n *Node) { func (ll *Sll) Remove(n *Node) { // Link next/prev nodes. n.next.prev, n.prev.next = n.prev, n.next - // Remove references. - n.next, n.prev = nil, nil - //Update scores. - ll.removeFromScores(n) -} -// RemoveAsync removes a *Node from the *Sll -// and marks the node for removal. This is -// useful if a batch of many nodes are being -// removed, at the cost of the node score list -// being out of sync. -// The node score list must be updated -// with a subsequent call of the Sync() method -// once all desired nodes have been removed. -func (ll *Sll) RemoveAsync(n *Node) { - // Link next/prev nodes. - n.next.prev, n.prev.next = n.prev, n.next // Remove references. n.next, n.prev = nil, nil - // Unset the parent list. - // This is used as a removal marker - // in the Sync() function. - n.list = nil + + // Decrement len. + atomic.AddUint64(&ll.len, ^uint64(0)) } // RemoveHead removes the current *Sll.head. @@ -275,78 +311,3 @@ func (ll *Sll) RemoveHead() { func (ll *Sll) RemoveTail() { ll.Remove(ll.root.next) } - -// RemoveHeadAsync removes the current *Sll.head -// using the RemoveAsync method. -func (ll *Sll) RemoveHeadAsync() { - ll.RemoveAsync(ll.root.prev) -} - -// RemoveTailAsync removes the current *Sll.tail -// using the RemoveAsync method. -func (ll *Sll) RemoveTailAsync() { - ll.RemoveAsync(ll.root.next) -} - -// Sync traverses the node score list -// and removes any marked for removal. -// This is typically called subsequent to -// many AsyncRemove ops. -func (ll *Sll) Sync() { - // Prep an allocation-free filter slice. - newScoreList := ll.scores[:0] - - // Traverse and exclude nodes - // marked for removal. - for n := range ll.scores { - if ll.scores[n].list == ll { - newScoreList = append(newScoreList, ll.scores[n]) - } else { - // If a node is marked for removal, - // nil the entry to avoid leaks. - ll.scores[n] = nil - } - } - - // Update the ll.scores. - ll.scores = newScoreList -} - -// removeFromScores removes n from the nodeScoreList scores. -func (ll *Sll) removeFromScores(n *Node) { - // Unrolling with 5 elements - // has cut CPU-cached small element - // slice search times in half. Needs further testing. - // This will cause an out of bounds crash if the - // element we're searching for somehow doesn't exist - // (as a result of some other bug). - var i int - for p := 0; p < len(ll.scores); p += 5 { - if ll.scores[p] == n { - i = p - break - } - if ll.scores[p+1] == n { - i = p + 1 - break - } - if ll.scores[p+2] == n { - i = p + 2 - break - } - if ll.scores[p+3] == n { - i = p + 3 - break - } - if ll.scores[p+4] == n { - i = p + 4 - break - } - } - - // Set the item to nil - // to remove the reference in the - // underlying slice array. - ll.scores[i] = nil - ll.scores = append(ll.scores[:i], ll.scores[i+1:]...) -} diff --git a/sll/sll_test.go b/sll/sll_test.go index ed75de8..184be5b 100644 --- a/sll/sll_test.go +++ b/sll/sll_test.go @@ -1,13 +1,15 @@ package sll_test import ( + "math/rand" "testing" + // "fmt" "github.com/jamiealquiza/bicache/sll" ) func TestHead(t *testing.T) { - s := sll.New(5) + s := sll.New() node := s.PushHead("value") if s.Head() != node { @@ -16,7 +18,7 @@ func TestHead(t *testing.T) { } func TestTail(t *testing.T) { - s := sll.New(5) + s := sll.New() node := s.PushTail("value") if s.Tail() != node { @@ -25,7 +27,7 @@ func TestTail(t *testing.T) { } func TestRead(t *testing.T) { - s := sll.New(5) + s := sll.New() s.PushHead("value") if s.Head().Read() != "value" { @@ -34,7 +36,7 @@ func TestRead(t *testing.T) { } func TestPushHead(t *testing.T) { - s := sll.New(5) + s := sll.New() s.PushHead("value") if s.Head().Read() != "value" { @@ -43,7 +45,7 @@ func TestPushHead(t *testing.T) { } func TestPushTail(t *testing.T) { - s := sll.New(5) + s := sll.New() s.PushTail("value") if s.Tail().Read() != "value" { @@ -52,7 +54,7 @@ func TestPushTail(t *testing.T) { } func TestNext(t *testing.T) { - s := sll.New(5) + s := sll.New() firstVal := "first" secondVal := "second" @@ -66,7 +68,7 @@ func TestNext(t *testing.T) { } func TestPrev(t *testing.T) { - s := sll.New(5) + s := sll.New() firstVal := "first" secondVal := "second" @@ -80,7 +82,7 @@ func TestPrev(t *testing.T) { } func TestLen(t *testing.T) { - s := sll.New(10) + s := sll.New() for i := 0; i < 5; i++ { s.PushTail(i) @@ -92,7 +94,7 @@ func TestLen(t *testing.T) { } func TestHighScores(t *testing.T) { - s := sll.New(10) + s := sll.New() nodes := map[int]*sll.Node{} @@ -107,10 +109,21 @@ func TestHighScores(t *testing.T) { nodes[4].Read() nodes[4].Read() - // Should result in [2, 4, 3, 1, 5] with read scores - // 3, 2, 0, 0, 0 respectively. + // Should result in [0,4,3] with read scores + // 0,2,3 respectively. + scores := s.HighScores(3) + // for node := range nodes { + // fmt.Printf("node %d: %d\n", node, nodes[node].Score) + // } + // + // fmt.Println("-") + // + // for _, node := range scores { + // fmt.Printf("node %d: %d\n", node.Value, node.Score) + // } + if scores[0] != nodes[2] { t.Errorf("Expected scores position 0 node with value 2, got %d", scores[0].Read()) } @@ -125,7 +138,7 @@ func TestHighScores(t *testing.T) { } func TestLowScores(t *testing.T) { - s := sll.New(3) + s := sll.New() nodes := map[int]*sll.Node{} @@ -144,6 +157,18 @@ func TestLowScores(t *testing.T) { // with read scores of 0, 2, 3 respectively. scores := s.LowScores(3) + /* + for node := range nodes { + fmt.Printf("node %d: %d\n", node, nodes[node].Score) + } + + fmt.Println("-") + + for _, node := range scores { + fmt.Printf("node %d: %d\n", node.Value, node.Score) + } + */ + if scores[0] != nodes[2] { t.Errorf("Expected scores position 0 node with value 2, got %d", scores[2].Read()) } @@ -157,8 +182,65 @@ func TestLowScores(t *testing.T) { } } +func benchmarkHeapScores(b *testing.B, l int) { + b.N = 1 + b.StopTimer() + + // Create/populate an sll. + s := sll.New() + for i := 0; i < l; i++ { + s.PushTail(i) + } + + // Perform 3*sll.Len() reads + // on random nodes to produce + // random node score counts. + node := s.Tail() + for i := 0; i < 3*l; i++ { + node.Read() + for j := 0; j < rand.Intn(10); j++ { + node = node.Next() + if node == nil { + node = s.Tail() + } + } + } + + b.ResetTimer() + b.StartTimer() + + for n := 0; n < b.N; n++ { + // Call HighScores for + // 1/20th the sll len. + s.HighScores(l / 20) + } +} + +func BenchmarkHeapScores200K(b *testing.B) { benchmarkHeapScores(b, 200000) } +func BenchmarkHeapScores2M(b *testing.B) { benchmarkHeapScores(b, 2000000) } + +func TestScoresEmpty(t *testing.T) { + s := sll.New() + + hScores := s.HighScores(5) + lScores := s.LowScores(5) + + // We don't really care about the + // len; if it's really broken, + // we'd probably have crashed + // by now. + + if len(hScores) != 0 { + t.Errorf("Expected scores len of 0, got %d", len(hScores)) + } + + if len(lScores) != 0 { + t.Errorf("Expected scores len of 0, got %d", len(lScores)) + } +} + func TestMoveToHead(t *testing.T) { - s := sll.New(10) + s := sll.New() for i := 0; i < 10; i++ { s.PushTail(i) @@ -185,7 +267,7 @@ func TestMoveToHead(t *testing.T) { } func TestMoveToTail(t *testing.T) { - s := sll.New(10) + s := sll.New() for i := 0; i < 10; i++ { s.PushTail(i) @@ -212,8 +294,8 @@ func TestMoveToTail(t *testing.T) { } func TestPushHeadNode(t *testing.T) { - s1 := sll.New(3) - s2 := sll.New(3) + s1 := sll.New() + s2 := sll.New() s1.PushTail("target") node := s1.Tail() @@ -235,8 +317,8 @@ func TestPushHeadNode(t *testing.T) { } func TestPushTailNode(t *testing.T) { - s1 := sll.New(3) - s2 := sll.New(3) + s1 := sll.New() + s2 := sll.New() s1.PushTail("target") node := s1.Tail() @@ -258,7 +340,7 @@ func TestPushTailNode(t *testing.T) { } func TestRemove(t *testing.T) { - s := sll.New(3) + s := sll.New() nodes := map[int]*sll.Node{} @@ -289,68 +371,8 @@ func TestRemove(t *testing.T) { } } -func TestSync(t *testing.T) { - s := sll.New(3) - - first := s.PushTail("value") - second := s.PushTail("value") - s.PushTail("value") - - s.RemoveAsync(second) - - if s.Len() != 3 { - t.Errorf("Expected len 3, got %d", s.Len()) - } - - if s.Tail().Next() != first { - t.Error("Unexpected list order") - } - - s.Sync() - - if s.Len() != 2 { - t.Errorf("Expected len 2, got %d", s.Len()) - } -} - -func TestRemoveAsync(t *testing.T) { - s := sll.New(3) - - nodes := map[int]*sll.Node{} - - for i := 0; i < 3; i++ { - nodes[i] = s.PushTail(i) - } - - s.RemoveAsync(nodes[1]) - - if s.Tail().Next().Read() != 0 { - t.Errorf(`Expected node with value "0", got "%d"`, s.Tail().Next().Read()) - } - - if s.Len() != 3 { - t.Errorf("Expected len 3, got %d", s.Len()) - } - - s.Sync() - - scores := s.HighScores(3) - - if s.Len() != 2 { - t.Errorf("Expected len 2, got %d", s.Len()) - } - - if scores[0] != s.Tail() { - t.Error("Unexpected node in scores position 0") - } - - if scores[1] != s.Tail().Next() { - t.Error("Unexpected node in scores position 1") - } -} - func TestRemoveHead(t *testing.T) { - s := sll.New(3) + s := sll.New() s.PushTail("value") target := s.PushTail("value") @@ -364,7 +386,7 @@ func TestRemoveHead(t *testing.T) { } func TestRemoveTail(t *testing.T) { - s := sll.New(3) + s := sll.New() s.PushTail("value") target := s.PushTail("value") @@ -376,51 +398,3 @@ func TestRemoveTail(t *testing.T) { t.Error("Unexpected tail node") } } - -func TestRemoveHeadAsync(t *testing.T) { - s := sll.New(3) - - s.PushTail("value") - target := s.PushTail("value") - s.PushTail("value") - - s.RemoveHeadAsync() - - if s.Head() != target { - t.Error("Unexpected head node") - } - - if s.Len() != 3 { - t.Errorf("Expected len 3, got %d", s.Len()) - } - - s.Sync() - - if s.Len() != 2 { - t.Errorf("Expected len 2, got %d", s.Len()) - } -} - -func TestRemoveTailAsync(t *testing.T) { - s := sll.New(3) - - s.PushTail("value") - target := s.PushTail("value") - s.PushTail("value") - - s.RemoveTailAsync() - - if s.Tail() != target { - t.Error("Unexpected tail node") - } - - if s.Len() != 3 { - t.Errorf("Expected len 3, got %d", s.Len()) - } - - s.Sync() - - if s.Len() != 2 { - t.Errorf("Expected len 2, got %d", s.Len()) - } -} diff --git a/vendor/github.com/jamiealquiza/fnv/README.md b/vendor/github.com/jamiealquiza/fnv/README.md index dd5afb9..45fc6de 100644 --- a/vendor/github.com/jamiealquiza/fnv/README.md +++ b/vendor/github.com/jamiealquiza/fnv/README.md @@ -10,4 +10,4 @@ BenchmarkHash32a-4 300000000 4.85 ns/op 0 B/op BenchmarkHash32-4 300000000 4.78 ns/op 0 B/op 0 allocs/op BenchmarkHash64a-4 300000000 6.01 ns/op 0 B/op 0 allocs/op BenchmarkHash64-4 300000000 5.18 ns/op 0 B/op 0 allocs/op -``` \ No newline at end of file +```