pax_global_header00006660000000000000000000000064134342630600014513gustar00rootroot0000000000000052 comment=ec06cd93a07565b373789b0078ba88fe697fddd9 ccache-2.0.3/000077500000000000000000000000001343426306000127235ustar00rootroot00000000000000ccache-2.0.3/.gitignore000066400000000000000000000000101343426306000147020ustar00rootroot00000000000000vendor/ ccache-2.0.3/Gopkg.lock000066400000000000000000000014231343426306000146440ustar00rootroot00000000000000# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. [[projects]] digest = "1:65c6a2822d8653e4f780c259a86d1b444c0b1ce7601b500deb985387bfe6bdec" name = "github.com/karlseguin/expect" packages = [ ".", "build", "mock", ] pruneopts = "UT" revision = "4fcda73748276dc72bcc09729bdb56242093c12c" version = "v1.0.1" [[projects]] branch = "master" digest = "1:d594bb9f2a18ba4da7ab1368f4debf59f6b77cc7046705553f966837c12059f1" name = "github.com/wsxiaoys/terminal" packages = ["color"] pruneopts = "UT" revision = "0940f3fc43a0ed42d04916b1c04578462c650b09" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = ["github.com/karlseguin/expect"] solver-name = "gps-cdcl" solver-version = 1 ccache-2.0.3/Gopkg.toml000066400000000000000000000002001343426306000146570ustar00rootroot00000000000000 [[constraint]] name = "github.com/karlseguin/expect" version = "1.0.1" [prune] go-tests = true unused-packages = true ccache-2.0.3/Makefile000066400000000000000000000000441343426306000143610ustar00rootroot00000000000000t: go test ./... f: go fmt ./... ccache-2.0.3/bucket.go000066400000000000000000000014411343426306000145270ustar00rootroot00000000000000package ccache import ( "sync" "time" ) type bucket struct { sync.RWMutex lookup map[string]*Item } func (b *bucket) itemCount() int { b.RLock() defer b.RUnlock() return len(b.lookup) } func (b *bucket) get(key string) *Item { b.RLock() defer b.RUnlock() return b.lookup[key] } func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) { expires := time.Now().Add(duration).UnixNano() item := newItem(key, value, expires) b.Lock() defer b.Unlock() existing := b.lookup[key] b.lookup[key] = item return item, existing } func (b *bucket) delete(key string) *Item { b.Lock() defer b.Unlock() item := b.lookup[key] delete(b.lookup, key) return item } func (b *bucket) clear() { b.Lock() defer b.Unlock() b.lookup = make(map[string]*Item) } ccache-2.0.3/bucket_test.go000066400000000000000000000026541343426306000155750ustar00rootroot00000000000000package ccache import ( . "github.com/karlseguin/expect" "testing" "time" ) type BucketTests struct { } func Test_Bucket(t *testing.T) { Expectify(new(BucketTests), t) } func (_ *BucketTests) GetMissFromBucket() { bucket := testBucket() Expect(bucket.get("invalid")).To.Equal(nil) } func (_ *BucketTests) GetHitFromBucket() { bucket := testBucket() item := bucket.get("power") assertValue(item, "9000") } func (_ *BucketTests) DeleteItemFromBucket() { bucket := testBucket() bucket.delete("power") Expect(bucket.get("power")).To.Equal(nil) } func (_ *BucketTests) SetsANewBucketItem() { bucket := testBucket() item, existing := bucket.set("spice", TestValue("flow"), time.Minute) assertValue(item, "flow") item = bucket.get("spice") assertValue(item, "flow") Expect(existing).To.Equal(nil) } func (_ *BucketTests) SetsAnExistingItem() { bucket := testBucket() item, existing := bucket.set("power", TestValue("9001"), time.Minute) assertValue(item, "9001") item = bucket.get("power") assertValue(item, "9001") assertValue(existing, "9000") } func testBucket() *bucket { b := &bucket{lookup: make(map[string]*Item)} b.lookup["power"] = &Item{ key: "power", value: TestValue("9000"), } return b } func assertValue(item *Item, expected string) { value := item.value.(TestValue) Expect(value).To.Equal(TestValue(expected)) } type TestValue string func (v TestValue) Expires() time.Time { return time.Now() } ccache-2.0.3/cache.go000066400000000000000000000123301343426306000143140ustar00rootroot00000000000000// An LRU cached aimed at high concurrency package ccache import ( "container/list" "hash/fnv" "sync/atomic" "time" ) type Cache struct { *Configuration list *list.List size int64 buckets []*bucket bucketMask uint32 deletables chan *Item promotables chan *Item donec chan struct{} } // Create a new cache with the specified configuration // See ccache.Configure() for creating a configuration func New(config *Configuration) *Cache { c := &Cache{ list: list.New(), Configuration: config, bucketMask: uint32(config.buckets) - 1, buckets: make([]*bucket, config.buckets), } for i := 0; i < int(config.buckets); i++ { c.buckets[i] = &bucket{ lookup: make(map[string]*Item), } } c.restart() return c } func (c *Cache) ItemCount() int { count := 0 for _, b := range c.buckets { count += b.itemCount() } return count } // Get an item from the cache. Returns nil if the item wasn't found. // This can return an expired item. Use item.Expired() to see if the item // is expired and item.TTL() to see how long until the item expires (which // will be negative for an already expired item). func (c *Cache) Get(key string) *Item { item := c.bucket(key).get(key) if item == nil { return nil } if item.expires > time.Now().UnixNano() { c.promote(item) } return item } // Used when the cache was created with the Track() configuration option. // Avoid otherwise func (c *Cache) TrackingGet(key string) TrackedItem { item := c.Get(key) if item == nil { return NilTracked } item.track() return item } // Set the value in the cache for the specified duration func (c *Cache) Set(key string, value interface{}, duration time.Duration) { c.set(key, value, duration) } // Replace the value if it exists, does not set if it doesn't. // Returns true if the item existed an was replaced, false otherwise. // Replace does not reset item's TTL func (c *Cache) Replace(key string, value interface{}) bool { item := c.bucket(key).get(key) if item == nil { return false } c.Set(key, value, item.TTL()) return true } // Attempts to get the value from the cache and calles fetch on a miss (missing // or stale item). If fetch returns an error, no value is cached and the error // is returned back to the caller. func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { item := c.Get(key) if item != nil && !item.Expired() { return item, nil } value, err := fetch() if err != nil { return nil, err } return c.set(key, value, duration), nil } // Remove the item from the cache, return true if the item was present, false otherwise. func (c *Cache) Delete(key string) bool { item := c.bucket(key).delete(key) if item != nil { c.deletables <- item return true } return false } //this isn't thread safe. It's meant to be called from non-concurrent tests func (c *Cache) Clear() { for _, bucket := range c.buckets { bucket.clear() } c.size = 0 c.list = list.New() } // Stops the background worker. Operations performed on the cache after Stop // is called are likely to panic func (c *Cache) Stop() { close(c.promotables) <-c.donec } func (c *Cache) restart() { c.deletables = make(chan *Item, c.deleteBuffer) c.promotables = make(chan *Item, c.promoteBuffer) c.donec = make(chan struct{}) go c.worker() } func (c *Cache) deleteItem(bucket *bucket, item *Item) { bucket.delete(item.key) //stop other GETs from getting it c.deletables <- item } func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item { item, existing := c.bucket(key).set(key, value, duration) if existing != nil { c.deletables <- existing } c.promote(item) return item } func (c *Cache) bucket(key string) *bucket { h := fnv.New32a() h.Write([]byte(key)) return c.buckets[h.Sum32()&c.bucketMask] } func (c *Cache) promote(item *Item) { c.promotables <- item } func (c *Cache) worker() { defer close(c.donec) for { select { case item, ok := <-c.promotables: if ok == false { goto drain } if c.doPromote(item) && c.size > c.maxSize { c.gc() } case item := <-c.deletables: c.doDelete(item) } } drain: for { select { case item := <-c.deletables: c.doDelete(item) default: close(c.deletables) return } } } func (c *Cache) doDelete(item *Item) { if item.element == nil { item.promotions = -2 } else { c.size -= item.size if c.onDelete != nil { c.onDelete(item) } c.list.Remove(item.element) } } func (c *Cache) doPromote(item *Item) bool { //already deleted if item.promotions == -2 { return false } if item.element != nil { //not a new item if item.shouldPromote(c.getsPerPromote) { c.list.MoveToFront(item.element) item.promotions = 0 } return false } c.size += item.size item.element = c.list.PushFront(item) return true } func (c *Cache) gc() { element := c.list.Back() for i := 0; i < c.itemsToPrune; i++ { if element == nil { return } prev := element.Prev() item := element.Value.(*Item) if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { c.bucket(item.key).delete(item.key) c.size -= item.size c.list.Remove(element) if c.onDelete != nil { c.onDelete(item) } item.promotions = -2 } element = prev } } ccache-2.0.3/cache_test.go000066400000000000000000000124611343426306000153600ustar00rootroot00000000000000package ccache import ( "strconv" "testing" "time" . "github.com/karlseguin/expect" ) type CacheTests struct{} func Test_Cache(t *testing.T) { Expectify(new(CacheTests), t) } func (_ CacheTests) DeletesAValue() { cache := New(Configure()) Expect(cache.ItemCount()).To.Equal(0) cache.Set("spice", "flow", time.Minute) cache.Set("worm", "sand", time.Minute) Expect(cache.ItemCount()).To.Equal(2) cache.Delete("spice") Expect(cache.Get("spice")).To.Equal(nil) Expect(cache.Get("worm").Value()).To.Equal("sand") Expect(cache.ItemCount()).To.Equal(1) } func (_ CacheTests) OnDeleteCallbackCalled() { onDeleteFnCalled := false onDeleteFn := func(item *Item) { if item.key == "spice" { onDeleteFnCalled = true } } cache := New(Configure().OnDelete(onDeleteFn)) cache.Set("spice", "flow", time.Minute) cache.Set("worm", "sand", time.Minute) time.Sleep(time.Millisecond * 10) // Run once to init cache.Delete("spice") time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items Expect(cache.Get("spice")).To.Equal(nil) Expect(cache.Get("worm").Value()).To.Equal("sand") Expect(onDeleteFnCalled).To.Equal(true) } func (_ CacheTests) FetchesExpiredItems() { cache := New(Configure()) fn := func() (interface{}, error) { return "moo-moo", nil } cache.Set("beef", "moo", time.Second*-1) Expect(cache.Get("beef").Value()).To.Equal("moo") out, _ := cache.Fetch("beef", time.Second, fn) Expect(out.Value()).To.Equal("moo-moo") } func (_ CacheTests) GCsTheOldestItems() { cache := New(Configure().ItemsToPrune(10)) for i := 0; i < 500; i++ { cache.Set(strconv.Itoa(i), i, time.Minute) } //let the items get promoted (and added to our list) time.Sleep(time.Millisecond * 10) gcCache(cache) Expect(cache.Get("9")).To.Equal(nil) Expect(cache.Get("10").Value()).To.Equal(10) Expect(cache.ItemCount()).To.Equal(490) } func (_ CacheTests) PromotedItemsDontGetPruned() { cache := New(Configure().ItemsToPrune(10).GetsPerPromote(1)) for i := 0; i < 500; i++ { cache.Set(strconv.Itoa(i), i, time.Minute) } time.Sleep(time.Millisecond * 10) //run the worker once to init the list cache.Get("9") time.Sleep(time.Millisecond * 10) gcCache(cache) Expect(cache.Get("9").Value()).To.Equal(9) Expect(cache.Get("10")).To.Equal(nil) Expect(cache.Get("11").Value()).To.Equal(11) } func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() { cache := New(Configure().ItemsToPrune(10).Track()) for i := 0; i < 10; i++ { cache.Set(strconv.Itoa(i), i, time.Minute) } item := cache.TrackingGet("0") time.Sleep(time.Millisecond * 10) gcCache(cache) Expect(cache.Get("0").Value()).To.Equal(0) Expect(cache.Get("1")).To.Equal(nil) item.Release() gcCache(cache) Expect(cache.Get("0")).To.Equal(nil) } func (_ CacheTests) RemovesOldestItemWhenFull() { onDeleteFnCalled := false onDeleteFn := func(item *Item) { if item.key == "0" { onDeleteFnCalled = true } } cache := New(Configure().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn)) for i := 0; i < 7; i++ { cache.Set(strconv.Itoa(i), i, time.Minute) } time.Sleep(time.Millisecond * 10) Expect(cache.Get("0")).To.Equal(nil) Expect(cache.Get("1")).To.Equal(nil) Expect(cache.Get("2").Value()).To.Equal(2) Expect(onDeleteFnCalled).To.Equal(true) Expect(cache.ItemCount()).To.Equal(5) } func (_ CacheTests) RemovesOldestItemWhenFullBySizer() { cache := New(Configure().MaxSize(9).ItemsToPrune(2)) for i := 0; i < 7; i++ { cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute) } time.Sleep(time.Millisecond * 10) Expect(cache.Get("0")).To.Equal(nil) Expect(cache.Get("1")).To.Equal(nil) Expect(cache.Get("2")).To.Equal(nil) Expect(cache.Get("3")).To.Equal(nil) Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4) } func (_ CacheTests) SetUpdatesSizeOnDelta() { cache := New(Configure()) cache.Set("a", &SizedItem{0, 2}, time.Minute) cache.Set("b", &SizedItem{0, 3}, time.Minute) time.Sleep(time.Millisecond * 5) checkSize(cache, 5) cache.Set("b", &SizedItem{0, 3}, time.Minute) time.Sleep(time.Millisecond * 5) checkSize(cache, 5) cache.Set("b", &SizedItem{0, 4}, time.Minute) time.Sleep(time.Millisecond * 5) checkSize(cache, 6) cache.Set("b", &SizedItem{0, 2}, time.Minute) time.Sleep(time.Millisecond * 5) checkSize(cache, 4) cache.Delete("b") time.Sleep(time.Millisecond * 100) checkSize(cache, 2) } func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() { cache := New(Configure()) cache.Set("1", &SizedItem{1, 2}, time.Minute) cache.Set("2", &SizedItem{1, 2}, time.Minute) cache.Set("3", &SizedItem{1, 2}, time.Minute) cache.Replace("4", &SizedItem{1, 2}) time.Sleep(time.Millisecond * 5) checkSize(cache, 6) } func (_ CacheTests) ReplaceChangesSize() { cache := New(Configure()) cache.Set("1", &SizedItem{1, 2}, time.Minute) cache.Set("2", &SizedItem{1, 2}, time.Minute) cache.Replace("2", &SizedItem{1, 2}) time.Sleep(time.Millisecond * 5) checkSize(cache, 4) cache.Replace("2", &SizedItem{1, 1}) time.Sleep(time.Millisecond * 5) checkSize(cache, 3) cache.Replace("2", &SizedItem{1, 3}) time.Sleep(time.Millisecond * 5) checkSize(cache, 5) } type SizedItem struct { id int s int64 } func (s *SizedItem) Size() int64 { return s.s } func checkSize(cache *Cache, sz int64) { cache.Stop() Expect(cache.size).To.Equal(sz) cache.restart() } func gcCache(cache *Cache) { cache.Stop() cache.gc() cache.restart() } ccache-2.0.3/configuration.go000066400000000000000000000062571343426306000161330ustar00rootroot00000000000000package ccache type Configuration struct { maxSize int64 buckets int itemsToPrune int deleteBuffer int promoteBuffer int getsPerPromote int32 tracking bool onDelete func(item *Item) } // Creates a configuration object with sensible defaults // Use this as the start of the fluent configuration: // e.g.: ccache.New(ccache.Configure().MaxSize(10000)) func Configure() *Configuration { return &Configuration{ buckets: 16, itemsToPrune: 500, deleteBuffer: 1024, getsPerPromote: 3, promoteBuffer: 1024, maxSize: 5000, tracking: false, } } // The max size for the cache // [5000] func (c *Configuration) MaxSize(max int64) *Configuration { c.maxSize = max return c } // Keys are hashed into % bucket count to provide greater concurrency (every set // requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) // [16] func (c *Configuration) Buckets(count uint32) *Configuration { if count == 0 || ((count&(^count+1)) == count) == false { count = 16 } c.buckets = int(count) return c } // The number of items to prune when memory is low // [500] func (c *Configuration) ItemsToPrune(count uint32) *Configuration { c.itemsToPrune = int(count) return c } // The size of the queue for items which should be promoted. If the queue fills // up, promotions are skipped // [1024] func (c *Configuration) PromoteBuffer(size uint32) *Configuration { c.promoteBuffer = int(size) return c } // The size of the queue for items which should be deleted. If the queue fills // up, calls to Delete() will block func (c *Configuration) DeleteBuffer(size uint32) *Configuration { c.deleteBuffer = int(size) return c } // Give a large cache with a high read / write ratio, it's usually unnecessary // to promote an item on every Get. GetsPerPromote specifies the number of Gets // a key must have before being promoted // [3] func (c *Configuration) GetsPerPromote(count int32) *Configuration { c.getsPerPromote = count return c } // Typically, a cache is agnostic about how cached values are use. This is fine // for a typical cache usage, where you fetch an item from the cache, do something // (write it out) and nothing else. // However, if callers are going to keep a reference to a cached item for a long // time, things get messy. Specifically, the cache can evict the item, while // references still exist. Technically, this isn't an issue. However, if you reload // the item back into the cache, you end up with 2 objects representing the same // data. This is a waste of space and could lead to weird behavior (the type an // identity map is meant to solve). // By turning tracking on and using the cache's TrackingGet, the cache // won't evict items which you haven't called Release() on. It's a simple reference // counter. func (c *Configuration) Track() *Configuration { c.tracking = true return c } // OnDelete allows setting a callback function to react to ideam deletion. // This typically allows to do a cleanup of resources, such as calling a Close() on // cached object that require some kind of tear-down. func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration { c.onDelete = callback return c } ccache-2.0.3/configuration_test.go000066400000000000000000000007001343426306000171550ustar00rootroot00000000000000package ccache import ( . "github.com/karlseguin/expect" "testing" ) type ConfigurationTests struct{} func Test_Configuration(t *testing.T) { Expectify(new(ConfigurationTests), t) } func (_ *ConfigurationTests) BucketsPowerOf2() { for i := uint32(0); i < 31; i++ { c := Configure().Buckets(i) if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 { Expect(c.buckets).ToEqual(int(i)) } else { Expect(c.buckets).ToEqual(16) } } } ccache-2.0.3/item.go000066400000000000000000000035421343426306000142140ustar00rootroot00000000000000package ccache import ( "container/list" "sync/atomic" "time" ) type Sized interface { Size() int64 } type TrackedItem interface { Value() interface{} Release() Expired() bool TTL() time.Duration Expires() time.Time Extend(duration time.Duration) } type nilItem struct{} func (n *nilItem) Value() interface{} { return nil } func (n *nilItem) Release() {} func (i *nilItem) Expired() bool { return true } func (i *nilItem) TTL() time.Duration { return time.Minute } func (i *nilItem) Expires() time.Time { return time.Time{} } func (i *nilItem) Extend(duration time.Duration) { } var NilTracked = new(nilItem) type Item struct { key string group string promotions int32 refCount int32 expires int64 size int64 value interface{} element *list.Element } func newItem(key string, value interface{}, expires int64) *Item { size := int64(1) if sized, ok := value.(Sized); ok { size = sized.Size() } return &Item{ key: key, value: value, promotions: 0, size: size, expires: expires, } } func (i *Item) shouldPromote(getsPerPromote int32) bool { i.promotions += 1 return i.promotions == getsPerPromote } func (i *Item) Value() interface{} { return i.value } func (i *Item) track() { atomic.AddInt32(&i.refCount, 1) } func (i *Item) Release() { atomic.AddInt32(&i.refCount, -1) } func (i *Item) Expired() bool { expires := atomic.LoadInt64(&i.expires) return expires < time.Now().UnixNano() } func (i *Item) TTL() time.Duration { expires := atomic.LoadInt64(&i.expires) return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) } func (i *Item) Expires() time.Time { expires := atomic.LoadInt64(&i.expires) return time.Unix(0, expires) } func (i *Item) Extend(duration time.Duration) { atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) } ccache-2.0.3/item_test.go000066400000000000000000000023201343426306000152440ustar00rootroot00000000000000package ccache import ( "math" "testing" "time" . "github.com/karlseguin/expect" ) type ItemTests struct{} func Test_Item(t *testing.T) { Expectify(new(ItemTests), t) } func (_ *ItemTests) Promotability() { item := &Item{promotions: 4} Expect(item.shouldPromote(5)).To.Equal(true) Expect(item.shouldPromote(5)).To.Equal(false) } func (_ *ItemTests) Expired() { now := time.Now().UnixNano() item1 := &Item{expires: now + (10 * int64(time.Millisecond))} item2 := &Item{expires: now - (10 * int64(time.Millisecond))} Expect(item1.Expired()).To.Equal(false) Expect(item2.Expired()).To.Equal(true) } func (_ *ItemTests) TTL() { now := time.Now().UnixNano() item1 := &Item{expires: now + int64(time.Second)} item2 := &Item{expires: now - int64(time.Second)} Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1) Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1) } func (_ *ItemTests) Expires() { now := time.Now().UnixNano() item := &Item{expires: now + (10)} Expect(item.Expires().UnixNano()).To.Equal(now + 10) } func (_ *ItemTests) Extend() { item := &Item{expires: time.Now().UnixNano() + 10} item.Extend(time.Minute * 2) Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120) } ccache-2.0.3/layeredbucket.go000066400000000000000000000033371343426306000161030ustar00rootroot00000000000000package ccache import ( "sync" "time" ) type layeredBucket struct { sync.RWMutex buckets map[string]*bucket } func (b *layeredBucket) itemCount() int { count := 0 b.RLock() defer b.RUnlock() for _, b := range b.buckets { count += b.itemCount() } return count } func (b *layeredBucket) get(primary, secondary string) *Item { bucket := b.getSecondaryBucket(primary) if bucket == nil { return nil } return bucket.get(secondary) } func (b *layeredBucket) getSecondaryBucket(primary string) *bucket { b.RLock() bucket, exists := b.buckets[primary] b.RUnlock() if exists == false { return nil } return bucket } func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) { b.Lock() bkt, exists := b.buckets[primary] if exists == false { bkt = &bucket{lookup: make(map[string]*Item)} b.buckets[primary] = bkt } b.Unlock() item, existing := bkt.set(secondary, value, duration) item.group = primary return item, existing } func (b *layeredBucket) delete(primary, secondary string) *Item { b.RLock() bucket, exists := b.buckets[primary] b.RUnlock() if exists == false { return nil } return bucket.delete(secondary) } func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { b.RLock() bucket, exists := b.buckets[primary] b.RUnlock() if exists == false { return false } bucket.Lock() defer bucket.Unlock() if l := len(bucket.lookup); l == 0 { return false } for key, item := range bucket.lookup { delete(bucket.lookup, key) deletables <- item } return true } func (b *layeredBucket) clear() { b.Lock() defer b.Unlock() for _, bucket := range b.buckets { bucket.clear() } b.buckets = make(map[string]*bucket) } ccache-2.0.3/layeredcache.go000066400000000000000000000147111343426306000156670ustar00rootroot00000000000000// An LRU cached aimed at high concurrency package ccache import ( "container/list" "hash/fnv" "sync/atomic" "time" ) type LayeredCache struct { *Configuration list *list.List buckets []*layeredBucket bucketMask uint32 size int64 deletables chan *Item promotables chan *Item donec chan struct{} } // Create a new layered cache with the specified configuration. // A layered cache used a two keys to identify a value: a primary key // and a secondary key. Get, Set and Delete require both a primary and // secondary key. However, DeleteAll requires only a primary key, deleting // all values that share the same primary key. // Layered Cache is useful as an HTTP cache, where an HTTP purge might // delete multiple variants of the same resource: // primary key = "user/44" // secondary key 1 = ".json" // secondary key 2 = ".xml" // See ccache.Configure() for creating a configuration func Layered(config *Configuration) *LayeredCache { c := &LayeredCache{ list: list.New(), Configuration: config, bucketMask: uint32(config.buckets) - 1, buckets: make([]*layeredBucket, config.buckets), deletables: make(chan *Item, config.deleteBuffer), } for i := 0; i < int(config.buckets); i++ { c.buckets[i] = &layeredBucket{ buckets: make(map[string]*bucket), } } c.restart() return c } func (c *LayeredCache) ItemCount() int { count := 0 for _, b := range c.buckets { count += b.itemCount() } return count } // Get an item from the cache. Returns nil if the item wasn't found. // This can return an expired item. Use item.Expired() to see if the item // is expired and item.TTL() to see how long until the item expires (which // will be negative for an already expired item). func (c *LayeredCache) Get(primary, secondary string) *Item { item := c.bucket(primary).get(primary, secondary) if item == nil { return nil } if item.expires > time.Now().UnixNano() { c.promote(item) } return item } // Get the secondary cache for a given primary key. This operation will // never return nil. In the case where the primary key does not exist, a // new, underlying, empty bucket will be created and returned. func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache { primaryBkt := c.bucket(primary) bkt := primaryBkt.getSecondaryBucket(primary) primaryBkt.Lock() if bkt == nil { bkt = &bucket{lookup: make(map[string]*Item)} primaryBkt.buckets[primary] = bkt } primaryBkt.Unlock() return &SecondaryCache{ bucket: bkt, pCache: c, } } // Used when the cache was created with the Track() configuration option. // Avoid otherwise func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem { item := c.Get(primary, secondary) if item == nil { return NilTracked } item.track() return item } // Set the value in the cache for the specified duration func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) { c.set(primary, secondary, value, duration) } // Replace the value if it exists, does not set if it doesn't. // Returns true if the item existed an was replaced, false otherwise. // Replace does not reset item's TTL nor does it alter its position in the LRU func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool { item := c.bucket(primary).get(primary, secondary) if item == nil { return false } c.Set(primary, secondary, value, item.TTL()) return true } // Attempts to get the value from the cache and calles fetch on a miss. // If fetch returns an error, no value is cached and the error is returned back // to the caller. func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { item := c.Get(primary, secondary) if item != nil { return item, nil } value, err := fetch() if err != nil { return nil, err } return c.set(primary, secondary, value, duration), nil } // Remove the item from the cache, return true if the item was present, false otherwise. func (c *LayeredCache) Delete(primary, secondary string) bool { item := c.bucket(primary).delete(primary, secondary) if item != nil { c.deletables <- item return true } return false } // Deletes all items that share the same primary key func (c *LayeredCache) DeleteAll(primary string) bool { return c.bucket(primary).deleteAll(primary, c.deletables) } //this isn't thread safe. It's meant to be called from non-concurrent tests func (c *LayeredCache) Clear() { for _, bucket := range c.buckets { bucket.clear() } c.size = 0 c.list = list.New() } func (c *LayeredCache) Stop() { close(c.promotables) <-c.donec } func (c *LayeredCache) restart() { c.promotables = make(chan *Item, c.promoteBuffer) c.donec = make(chan struct{}) go c.worker() } func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item { item, existing := c.bucket(primary).set(primary, secondary, value, duration) if existing != nil { c.deletables <- existing } c.promote(item) return item } func (c *LayeredCache) bucket(key string) *layeredBucket { h := fnv.New32a() h.Write([]byte(key)) return c.buckets[h.Sum32()&c.bucketMask] } func (c *LayeredCache) promote(item *Item) { c.promotables <- item } func (c *LayeredCache) worker() { defer close(c.donec) for { select { case item, ok := <-c.promotables: if ok == false { return } if c.doPromote(item) && c.size > c.maxSize { c.gc() } case item := <-c.deletables: if item.element == nil { atomic.StoreInt32(&item.promotions, -2) } else { c.size -= item.size if c.onDelete != nil { c.onDelete(item) } c.list.Remove(item.element) } } } } func (c *LayeredCache) doPromote(item *Item) bool { // deleted before it ever got promoted if atomic.LoadInt32(&item.promotions) == -2 { return false } if item.element != nil { //not a new item if item.shouldPromote(c.getsPerPromote) { c.list.MoveToFront(item.element) atomic.StoreInt32(&item.promotions, 0) } return false } c.size += item.size item.element = c.list.PushFront(item) return true } func (c *LayeredCache) gc() { element := c.list.Back() for i := 0; i < c.itemsToPrune; i++ { if element == nil { return } prev := element.Prev() item := element.Value.(*Item) if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { c.bucket(item.group).delete(item.group, item.key) c.size -= item.size c.list.Remove(element) item.promotions = -2 } element = prev } } ccache-2.0.3/layeredcache_test.go000066400000000000000000000202011343426306000167150ustar00rootroot00000000000000package ccache import ( "strconv" "testing" "time" . "github.com/karlseguin/expect" ) type LayeredCacheTests struct{} func Test_LayeredCache(t *testing.T) { Expectify(new(LayeredCacheTests), t) } func (_ *LayeredCacheTests) GetsANonExistantValue() { cache := newLayered() Expect(cache.Get("spice", "flow")).To.Equal(nil) Expect(cache.ItemCount()).To.Equal(0) } func (_ *LayeredCacheTests) SetANewValue() { cache := newLayered() cache.Set("spice", "flow", "a value", time.Minute) Expect(cache.Get("spice", "flow").Value()).To.Equal("a value") Expect(cache.Get("spice", "stop")).To.Equal(nil) Expect(cache.ItemCount()).To.Equal(1) } func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) cache.Set("spice", "must", "value-b", time.Minute) cache.Set("leto", "sister", "ghanima", time.Minute) Expect(cache.Get("spice", "flow").Value()).To.Equal("value-a") Expect(cache.Get("spice", "must").Value()).To.Equal("value-b") Expect(cache.Get("spice", "worm")).To.Equal(nil) Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima") Expect(cache.Get("leto", "brother")).To.Equal(nil) Expect(cache.Get("baron", "friend")).To.Equal(nil) Expect(cache.ItemCount()).To.Equal(3) } func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() { cache := newLayered() Expect(cache.Replace("spice", "flow", "value-a")).To.Equal(false) Expect(cache.Get("spice", "flow")).To.Equal(nil) } func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true) Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b") Expect(cache.ItemCount()).To.Equal(1) //not sure how to test that the TTL hasn't changed sort of a sleep.. } func (_ *LayeredCacheTests) DeletesAValue() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) cache.Set("spice", "must", "value-b", time.Minute) cache.Set("leto", "sister", "ghanima", time.Minute) cache.Delete("spice", "flow") Expect(cache.Get("spice", "flow")).To.Equal(nil) Expect(cache.Get("spice", "must").Value()).To.Equal("value-b") Expect(cache.Get("spice", "worm")).To.Equal(nil) Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima") Expect(cache.ItemCount()).To.Equal(2) } func (_ *LayeredCacheTests) OnDeleteCallbackCalled() { onDeleteFnCalled := false onDeleteFn := func(item *Item) { if item.group == "spice" && item.key == "flow" { onDeleteFnCalled = true } } cache := Layered(Configure().OnDelete(onDeleteFn)) cache.Set("spice", "flow", "value-a", time.Minute) cache.Set("spice", "must", "value-b", time.Minute) cache.Set("leto", "sister", "ghanima", time.Minute) time.Sleep(time.Millisecond * 10) // Run once to init cache.Delete("spice", "flow") time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items Expect(cache.Get("spice", "flow")).To.Equal(nil) Expect(cache.Get("spice", "must").Value()).To.Equal("value-b") Expect(cache.Get("spice", "worm")).To.Equal(nil) Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima") Expect(onDeleteFnCalled).To.Equal(true) } func (_ *LayeredCacheTests) DeletesALayer() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) cache.Set("spice", "must", "value-b", time.Minute) cache.Set("leto", "sister", "ghanima", time.Minute) cache.DeleteAll("spice") Expect(cache.Get("spice", "flow")).To.Equal(nil) Expect(cache.Get("spice", "must")).To.Equal(nil) Expect(cache.Get("spice", "worm")).To.Equal(nil) Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima") } func (_ LayeredCacheTests) GCsTheOldestItems() { cache := Layered(Configure().ItemsToPrune(10)) cache.Set("xx", "a", 23, time.Minute) for i := 0; i < 500; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } cache.Set("xx", "b", 9001, time.Minute) //let the items get promoted (and added to our list) time.Sleep(time.Millisecond * 10) gcLayeredCache(cache) Expect(cache.Get("xx", "a")).To.Equal(nil) Expect(cache.Get("xx", "b").Value()).To.Equal(9001) Expect(cache.Get("8", "a")).To.Equal(nil) Expect(cache.Get("9", "a").Value()).To.Equal(9) Expect(cache.Get("10", "a").Value()).To.Equal(10) } func (_ LayeredCacheTests) PromotedItemsDontGetPruned() { cache := Layered(Configure().ItemsToPrune(10).GetsPerPromote(1)) for i := 0; i < 500; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } time.Sleep(time.Millisecond * 10) //run the worker once to init the list cache.Get("9", "a") time.Sleep(time.Millisecond * 10) gcLayeredCache(cache) Expect(cache.Get("9", "a").Value()).To.Equal(9) Expect(cache.Get("10", "a")).To.Equal(nil) Expect(cache.Get("11", "a").Value()).To.Equal(11) } func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() { cache := Layered(Configure().ItemsToPrune(10).Track()) for i := 0; i < 10; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } item := cache.TrackingGet("0", "a") time.Sleep(time.Millisecond * 10) gcLayeredCache(cache) Expect(cache.Get("0", "a").Value()).To.Equal(0) Expect(cache.Get("1", "a")).To.Equal(nil) item.Release() gcLayeredCache(cache) Expect(cache.Get("0", "a")).To.Equal(nil) } func (_ LayeredCacheTests) RemovesOldestItemWhenFull() { cache := Layered(Configure().MaxSize(5).ItemsToPrune(1)) cache.Set("xx", "a", 23, time.Minute) for i := 0; i < 7; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } cache.Set("xx", "b", 9001, time.Minute) time.Sleep(time.Millisecond * 10) Expect(cache.Get("xx", "a")).To.Equal(nil) Expect(cache.Get("0", "a")).To.Equal(nil) Expect(cache.Get("1", "a")).To.Equal(nil) Expect(cache.Get("2", "a")).To.Equal(nil) Expect(cache.Get("3", "a").Value()).To.Equal(3) Expect(cache.Get("xx", "b").Value()).To.Equal(9001) } func newLayered() *LayeredCache { return Layered(Configure()) } func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() { cache := Layered(Configure().MaxSize(9).ItemsToPrune(2)) for i := 0; i < 7; i++ { cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute) } time.Sleep(time.Millisecond * 10) Expect(cache.Get("pri", "0")).To.Equal(nil) Expect(cache.Get("pri", "1")).To.Equal(nil) Expect(cache.Get("pri", "2")).To.Equal(nil) Expect(cache.Get("pri", "3")).To.Equal(nil) Expect(cache.Get("pri", "4").Value().(*SizedItem).id).To.Equal(4) } func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() { cache := Layered(Configure()) cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute) cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 5) cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 5) cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 6) cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute) cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 7) cache.Delete("pri", "b") time.Sleep(time.Millisecond * 10) checkLayeredSize(cache, 5) } func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() { cache := Layered(Configure()) cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute) cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute) cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute) cache.Replace("sec", "3", &SizedItem{1, 2}) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 6) } func (_ LayeredCacheTests) ReplaceChangesSize() { cache := Layered(Configure()) cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute) cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute) cache.Replace("pri", "2", &SizedItem{1, 2}) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 4) cache.Replace("pri", "2", &SizedItem{1, 1}) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 3) cache.Replace("pri", "2", &SizedItem{1, 3}) time.Sleep(time.Millisecond * 5) checkLayeredSize(cache, 5) } func checkLayeredSize(cache *LayeredCache, sz int64) { cache.Stop() Expect(cache.size).To.Equal(sz) cache.restart() } func gcLayeredCache(cache *LayeredCache) { cache.Stop() cache.gc() cache.restart() } ccache-2.0.3/license.txt000066400000000000000000000020401343426306000151020ustar00rootroot00000000000000Copyright (c) 2013 Karl Seguin. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ccache-2.0.3/readme.md000066400000000000000000000157641343426306000145170ustar00rootroot00000000000000# CCache CCache is an LRU Cache, written in Go, focused on supporting high concurrency. Lock contention on the list is reduced by: * Introducing a window which limits the frequency that an item can get promoted * Using a buffered channel to queue promotions for a single worker * Garbage collecting within the same thread as the worker ## Setup First, download the project: go get github.com/karlseguin/ccache ## Configuration Next, import and create a `Cache` instance: ```go import ( "github.com/karlseguin/ccache" ) var cache = ccache.New(ccache.Configure()) ``` `Configure` exposes a chainable API: ```go var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)) ``` The most likely configuration options to tweak are: * `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) * `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) * `ItemsToPrune(int)` - the number of items to prune when we hit `MaxSize`. Freeing up more than 1 slot at a time improved performance (default: 500) Configurations that change the internals of the cache, which aren't as likely to need tweaking: * `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). * `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) * `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) ## Usage Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: ### Get ```go item := cache.Get("user:4") if item == nil { //handle } else { user := item.Value().(*User) } ``` The returned `*Item` exposes a number of methods: * `Value() interface{}` - the value cached * `Expired() bool` - whether the item is expired or not * `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) * `Expires() time.Time` - the time the item will expire By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. ### Set `Set` expects the key, value and ttl: ```go cache.Set("user:4", user, time.Minute * 10) ``` ### Fetch There's also a `Fetch` which mixes a `Get` and a `Set`: ```go item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) { //code to fetch the data incase of a miss //should return the data to cache and the error, if any }) ``` ### Delete `Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key: ```go cache.Delete("user:4") ``` ### Extend The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. ### Replace The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: ```go cache.Replace("user:4", user) ``` `Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. ### Stop The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. ## Tracking CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. When you configure your cache with `Track()`: ```go cache = ccache.New(ccache.Configure().Track()) ``` The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: ```go item := cache.TrackingGet("user:4") user := item.Value() //will be nil if "user:4" didn't exist in the cache item.Release() //can be called even if item.Value() returned nil ``` In practice, `Release` wouldn't be called until later, at some other place in your code. There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. ## LayeredCache CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). `LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. `LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: ```go cache := ccache.Layered(ccache.Configure()) cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) cache.Set("/users/goku", "type:xml", "", time.Minute * 5) json := cache.Get("/users/goku", "type:json") xml := cache.Get("/users/goku", "type:xml") cache.Delete("/users/goku", "type:json") cache.Delete("/users/goku", "type:xml") // OR cache.DeleteAll("/users/goku") ``` # SecondaryCache In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: ```go cache := ccache.Layered(ccache.Configure()) sCache := cache.GetOrCreateSecondaryCache("/users/goku") sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) ``` The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. ## Size By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. ## Want Something Simpler? For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache) ccache-2.0.3/secondarycache.go000066400000000000000000000034101343426306000162230ustar00rootroot00000000000000package ccache import "time" type SecondaryCache struct { bucket *bucket pCache *LayeredCache } // Get the secondary key. // The semantics are the same as for LayeredCache.Get func (s *SecondaryCache) Get(secondary string) *Item { return s.bucket.get(secondary) } // Set the secondary key to a value. // The semantics are the same as for LayeredCache.Set func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item { item, existing := s.bucket.set(secondary, value, duration) if existing != nil { s.pCache.deletables <- existing } s.pCache.promote(item) return item } // Fetch or set a secondary key. // The semantics are the same as for LayeredCache.Fetch func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { item := s.Get(secondary) if item != nil { return item, nil } value, err := fetch() if err != nil { return nil, err } return s.Set(secondary, value, duration), nil } // Delete a secondary key. // The semantics are the same as for LayeredCache.Delete func (s *SecondaryCache) Delete(secondary string) bool { item := s.bucket.delete(secondary) if item != nil { s.pCache.deletables <- item return true } return false } // Replace a secondary key. // The semantics are the same as for LayeredCache.Replace func (s *SecondaryCache) Replace(secondary string, value interface{}) bool { item := s.Get(secondary) if item == nil { return false } s.Set(secondary, value, item.TTL()) return true } // Track a secondary key. // The semantics are the same as for LayeredCache.TrackingGet func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem { item := c.Get(secondary) if item == nil { return NilTracked } item.track() return item } ccache-2.0.3/secondarycache_test.go000066400000000000000000000067371343426306000173010ustar00rootroot00000000000000package ccache import ( . "github.com/karlseguin/expect" "strconv" "testing" "time" ) type SecondaryCacheTests struct{} func Test_SecondaryCache(t *testing.T) { Expectify(new(SecondaryCacheTests), t) } func (_ SecondaryCacheTests) GetsANonExistantValue() { cache := newLayered().GetOrCreateSecondaryCache("foo") Expect(cache).Not.To.Equal(nil) } func (_ SecondaryCacheTests) SetANewValue() { cache := newLayered() cache.Set("spice", "flow", "a value", time.Minute) sCache := cache.GetOrCreateSecondaryCache("spice") Expect(sCache.Get("flow").Value()).To.Equal("a value") Expect(sCache.Get("stop")).To.Equal(nil) } func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() { cache := newLayered() cache.Set("spice", "flow", "a value", time.Minute) sCache := cache.GetOrCreateSecondaryCache("spice") sCache.Set("orinoco", "another value", time.Minute) Expect(sCache.Get("orinoco").Value()).To.Equal("another value") Expect(cache.Get("spice", "orinoco").Value()).To.Equal("another value") } func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() { cache := newLayered() sCache := cache.GetOrCreateSecondaryCache("spice") sCache.Set("flow", "a value", time.Minute) Expect(sCache.Get("flow").Value()).To.Equal("a value") Expect(cache.Get("spice", "flow").Value()).To.Equal("a value") } func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() { cache := newLayered() cache.Set("spice", "flow", "a value", time.Minute) cache.Set("spice", "sister", "ghanima", time.Minute) sCache := cache.GetOrCreateSecondaryCache("spice") cache.Delete("spice", "flow") Expect(cache.Get("spice", "flow")).To.Equal(nil) Expect(sCache.Get("flow")).To.Equal(nil) sCache.Delete("sister") Expect(cache.Get("spice", "sister")).To.Equal(nil) Expect(sCache.Get("sister")).To.Equal(nil) } func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() { cache := newLayered() sCache := cache.GetOrCreateSecondaryCache("spice") Expect(sCache.Replace("flow", "value-a")).To.Equal(false) Expect(cache.Get("spice", "flow")).To.Equal(nil) } func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) sCache := cache.GetOrCreateSecondaryCache("spice") Expect(sCache.Replace("flow", "value-b")).To.Equal(true) Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b") } func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() { cache := newLayered() cache.Set("spice", "flow", "value-a", time.Minute) sCache := cache.GetOrCreateSecondaryCache("spice") val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil }) Expect(val.Value().(string)).To.Equal("value-a") } func (_ SecondaryCacheTests) FetchReturnsANewValue() { cache := newLayered() sCache := cache.GetOrCreateSecondaryCache("spice") val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil }) Expect(val.Value().(string)).To.Equal("a fetched value") } func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() { cache := Layered(Configure().ItemsToPrune(10).Track()) for i := 0; i < 10; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } sCache := cache.GetOrCreateSecondaryCache("0") item := sCache.TrackingGet("a") time.Sleep(time.Millisecond * 10) gcLayeredCache(cache) Expect(cache.Get("0", "a").Value()).To.Equal(0) Expect(cache.Get("1", "a")).To.Equal(nil) item.Release() gcLayeredCache(cache) Expect(cache.Get("0", "a")).To.Equal(nil) }